Linux Audio

Check our new training course

Loading...
v6.8
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2017 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/ctype.h>
  13#include <linux/stringify.h>
  14#include <linux/ethtool.h>
  15#include <linux/ethtool_netlink.h>
  16#include <linux/linkmode.h>
  17#include <linux/interrupt.h>
  18#include <linux/pci.h>
  19#include <linux/etherdevice.h>
  20#include <linux/crc32.h>
  21#include <linux/firmware.h>
  22#include <linux/utsname.h>
  23#include <linux/time.h>
  24#include <linux/ptp_clock_kernel.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/timecounter.h>
  27#include <net/netlink.h>
  28#include "bnxt_hsi.h"
  29#include "bnxt.h"
  30#include "bnxt_hwrm.h"
  31#include "bnxt_ulp.h"
  32#include "bnxt_xdp.h"
  33#include "bnxt_ptp.h"
  34#include "bnxt_ethtool.h"
  35#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
  36#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
  37#include "bnxt_coredump.h"
  38
  39#define BNXT_NVM_ERR_MSG(dev, extack, msg)			\
  40	do {							\
  41		if (extack)					\
  42			NL_SET_ERR_MSG_MOD(extack, msg);	\
  43		netdev_err(dev, "%s\n", msg);			\
  44	} while (0)
  45
  46static u32 bnxt_get_msglevel(struct net_device *dev)
  47{
  48	struct bnxt *bp = netdev_priv(dev);
  49
  50	return bp->msg_enable;
  51}
  52
  53static void bnxt_set_msglevel(struct net_device *dev, u32 value)
  54{
  55	struct bnxt *bp = netdev_priv(dev);
  56
  57	bp->msg_enable = value;
  58}
  59
  60static int bnxt_get_coalesce(struct net_device *dev,
  61			     struct ethtool_coalesce *coal,
  62			     struct kernel_ethtool_coalesce *kernel_coal,
  63			     struct netlink_ext_ack *extack)
  64{
  65	struct bnxt *bp = netdev_priv(dev);
  66	struct bnxt_coal *hw_coal;
  67	u16 mult;
  68
  69	memset(coal, 0, sizeof(*coal));
  70
  71	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
  72
  73	hw_coal = &bp->rx_coal;
  74	mult = hw_coal->bufs_per_record;
  75	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
  76	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  77	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  78	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  79	if (hw_coal->flags &
  80	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  81		kernel_coal->use_cqe_mode_rx = true;
  82
  83	hw_coal = &bp->tx_coal;
  84	mult = hw_coal->bufs_per_record;
  85	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
  86	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  87	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  88	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  89	if (hw_coal->flags &
  90	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  91		kernel_coal->use_cqe_mode_tx = true;
  92
  93	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
  94
  95	return 0;
  96}
  97
  98static int bnxt_set_coalesce(struct net_device *dev,
  99			     struct ethtool_coalesce *coal,
 100			     struct kernel_ethtool_coalesce *kernel_coal,
 101			     struct netlink_ext_ack *extack)
 102{
 103	struct bnxt *bp = netdev_priv(dev);
 104	bool update_stats = false;
 105	struct bnxt_coal *hw_coal;
 106	int rc = 0;
 107	u16 mult;
 108
 109	if (coal->use_adaptive_rx_coalesce) {
 110		bp->flags |= BNXT_FLAG_DIM;
 111	} else {
 112		if (bp->flags & BNXT_FLAG_DIM) {
 113			bp->flags &= ~(BNXT_FLAG_DIM);
 114			goto reset_coalesce;
 115		}
 116	}
 117
 118	if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
 119	    !(bp->coal_cap.cmpl_params &
 120	      RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
 121		return -EOPNOTSUPP;
 122
 123	hw_coal = &bp->rx_coal;
 124	mult = hw_coal->bufs_per_record;
 125	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
 126	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
 127	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
 128	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
 129	hw_coal->flags &=
 130		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 131	if (kernel_coal->use_cqe_mode_rx)
 132		hw_coal->flags |=
 133			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 134
 135	hw_coal = &bp->tx_coal;
 136	mult = hw_coal->bufs_per_record;
 137	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
 138	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
 139	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
 140	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
 141	hw_coal->flags &=
 142		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 143	if (kernel_coal->use_cqe_mode_tx)
 144		hw_coal->flags |=
 145			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 146
 147	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
 148		u32 stats_ticks = coal->stats_block_coalesce_usecs;
 149
 150		/* Allow 0, which means disable. */
 151		if (stats_ticks)
 152			stats_ticks = clamp_t(u32, stats_ticks,
 153					      BNXT_MIN_STATS_COAL_TICKS,
 154					      BNXT_MAX_STATS_COAL_TICKS);
 155		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
 156		bp->stats_coal_ticks = stats_ticks;
 157		if (bp->stats_coal_ticks)
 158			bp->current_interval =
 159				bp->stats_coal_ticks * HZ / 1000000;
 160		else
 161			bp->current_interval = BNXT_TIMER_INTERVAL;
 162		update_stats = true;
 163	}
 164
 165reset_coalesce:
 166	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
 167		if (update_stats) {
 168			bnxt_close_nic(bp, true, false);
 169			rc = bnxt_open_nic(bp, true, false);
 
 170		} else {
 171			rc = bnxt_hwrm_set_coal(bp);
 172		}
 173	}
 174
 175	return rc;
 176}
 177
 178static const char * const bnxt_ring_rx_stats_str[] = {
 179	"rx_ucast_packets",
 180	"rx_mcast_packets",
 181	"rx_bcast_packets",
 182	"rx_discards",
 183	"rx_errors",
 184	"rx_ucast_bytes",
 185	"rx_mcast_bytes",
 186	"rx_bcast_bytes",
 187};
 188
 189static const char * const bnxt_ring_tx_stats_str[] = {
 190	"tx_ucast_packets",
 191	"tx_mcast_packets",
 192	"tx_bcast_packets",
 193	"tx_errors",
 194	"tx_discards",
 195	"tx_ucast_bytes",
 196	"tx_mcast_bytes",
 197	"tx_bcast_bytes",
 198};
 199
 200static const char * const bnxt_ring_tpa_stats_str[] = {
 201	"tpa_packets",
 202	"tpa_bytes",
 203	"tpa_events",
 204	"tpa_aborts",
 205};
 206
 207static const char * const bnxt_ring_tpa2_stats_str[] = {
 208	"rx_tpa_eligible_pkt",
 209	"rx_tpa_eligible_bytes",
 210	"rx_tpa_pkt",
 211	"rx_tpa_bytes",
 212	"rx_tpa_errors",
 213	"rx_tpa_events",
 214};
 215
 216static const char * const bnxt_rx_sw_stats_str[] = {
 217	"rx_l4_csum_errors",
 218	"rx_resets",
 219	"rx_buf_errors",
 220};
 221
 222static const char * const bnxt_cmn_sw_stats_str[] = {
 223	"missed_irqs",
 224};
 225
 226#define BNXT_RX_STATS_ENTRY(counter)	\
 227	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
 228
 229#define BNXT_TX_STATS_ENTRY(counter)	\
 230	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
 231
 232#define BNXT_RX_STATS_EXT_ENTRY(counter)	\
 233	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 234
 235#define BNXT_TX_STATS_EXT_ENTRY(counter)	\
 236	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 237
 238#define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
 239	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
 240	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
 241
 242#define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
 243	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
 244	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
 245
 246#define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
 247	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
 248	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
 249	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
 250	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
 251	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
 252	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
 253	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
 254	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
 255
 256#define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
 257	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
 258	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
 259	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
 260	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
 261	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
 262	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
 263	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
 264	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
 265
 266#define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
 267	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
 268	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
 269
 270#define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
 271	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
 272	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
 273
 274#define BNXT_RX_STATS_EXT_COS_ENTRIES				\
 275	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
 276	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
 277	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
 278	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
 279	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
 280	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
 281	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
 282	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
 283
 284#define BNXT_TX_STATS_EXT_COS_ENTRIES				\
 285	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
 286	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
 287	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
 288	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
 289	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
 290	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
 291	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
 292	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
 293
 294#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
 295	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
 296	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
 297
 298#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
 299	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
 300	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
 301	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
 302	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
 303	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
 304	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
 305	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
 306	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
 307
 308#define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
 309	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
 310	  __stringify(counter##_pri##n) }
 311
 312#define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
 313	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
 314	  __stringify(counter##_pri##n) }
 315
 316#define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
 317	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
 318	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
 319	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
 320	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
 321	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
 322	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
 323	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
 324	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
 325
 326#define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
 327	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
 328	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
 329	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
 330	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
 331	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
 332	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
 333	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
 334	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
 335
 336enum {
 337	RX_TOTAL_DISCARDS,
 338	TX_TOTAL_DISCARDS,
 339	RX_NETPOLL_DISCARDS,
 340};
 341
 342static const char *const bnxt_ring_err_stats_arr[] = {
 343	"rx_total_l4_csum_errors",
 344	"rx_total_resets",
 345	"rx_total_buf_errors",
 346	"rx_total_oom_discards",
 347	"rx_total_netpoll_discards",
 348	"rx_total_ring_discards",
 349	"tx_total_resets",
 350	"tx_total_ring_discards",
 351	"total_missed_irqs",
 352};
 353
 354#define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
 355#define NUM_RING_CMN_SW_STATS		ARRAY_SIZE(bnxt_cmn_sw_stats_str)
 356#define NUM_RING_RX_HW_STATS		ARRAY_SIZE(bnxt_ring_rx_stats_str)
 357#define NUM_RING_TX_HW_STATS		ARRAY_SIZE(bnxt_ring_tx_stats_str)
 358
 359static const struct {
 360	long offset;
 361	char string[ETH_GSTRING_LEN];
 362} bnxt_port_stats_arr[] = {
 363	BNXT_RX_STATS_ENTRY(rx_64b_frames),
 364	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
 365	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
 366	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
 367	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
 368	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
 369	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
 370	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
 371	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
 372	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
 373	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
 374	BNXT_RX_STATS_ENTRY(rx_total_frames),
 375	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
 376	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
 377	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
 378	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
 379	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
 380	BNXT_RX_STATS_ENTRY(rx_pause_frames),
 381	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
 382	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
 383	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
 384	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
 385	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
 386	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
 387	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
 388	BNXT_RX_STATS_ENTRY(rx_good_frames),
 389	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
 390	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
 391	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
 392	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
 393	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
 394	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
 395	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
 396	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
 397	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
 398	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
 399	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
 400	BNXT_RX_STATS_ENTRY(rx_bytes),
 401	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
 402	BNXT_RX_STATS_ENTRY(rx_runt_frames),
 403	BNXT_RX_STATS_ENTRY(rx_stat_discard),
 404	BNXT_RX_STATS_ENTRY(rx_stat_err),
 405
 406	BNXT_TX_STATS_ENTRY(tx_64b_frames),
 407	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
 408	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
 409	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
 410	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
 411	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
 412	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
 413	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
 414	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
 415	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
 416	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
 417	BNXT_TX_STATS_ENTRY(tx_good_frames),
 418	BNXT_TX_STATS_ENTRY(tx_total_frames),
 419	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
 420	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
 421	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
 422	BNXT_TX_STATS_ENTRY(tx_pause_frames),
 423	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
 424	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
 425	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
 426	BNXT_TX_STATS_ENTRY(tx_err),
 427	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
 428	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
 429	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
 430	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
 431	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
 432	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
 433	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
 434	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
 435	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
 436	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
 437	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
 438	BNXT_TX_STATS_ENTRY(tx_total_collisions),
 439	BNXT_TX_STATS_ENTRY(tx_bytes),
 440	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
 441	BNXT_TX_STATS_ENTRY(tx_stat_discard),
 442	BNXT_TX_STATS_ENTRY(tx_stat_error),
 443};
 444
 445static const struct {
 446	long offset;
 447	char string[ETH_GSTRING_LEN];
 448} bnxt_port_stats_ext_arr[] = {
 449	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
 450	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
 451	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
 452	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
 453	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
 454	BNXT_RX_STATS_EXT_COS_ENTRIES,
 455	BNXT_RX_STATS_EXT_PFC_ENTRIES,
 456	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
 457	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
 458	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
 459	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
 460	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
 461	BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
 462	BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
 463	BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
 464};
 465
 466static const struct {
 467	long offset;
 468	char string[ETH_GSTRING_LEN];
 469} bnxt_tx_port_stats_ext_arr[] = {
 470	BNXT_TX_STATS_EXT_COS_ENTRIES,
 471	BNXT_TX_STATS_EXT_PFC_ENTRIES,
 472};
 473
 474static const struct {
 475	long base_off;
 476	char string[ETH_GSTRING_LEN];
 477} bnxt_rx_bytes_pri_arr[] = {
 478	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
 479};
 480
 481static const struct {
 482	long base_off;
 483	char string[ETH_GSTRING_LEN];
 484} bnxt_rx_pkts_pri_arr[] = {
 485	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
 486};
 487
 488static const struct {
 489	long base_off;
 490	char string[ETH_GSTRING_LEN];
 491} bnxt_tx_bytes_pri_arr[] = {
 492	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
 493};
 494
 495static const struct {
 496	long base_off;
 497	char string[ETH_GSTRING_LEN];
 498} bnxt_tx_pkts_pri_arr[] = {
 499	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
 500};
 501
 502#define BNXT_NUM_RING_ERR_STATS	ARRAY_SIZE(bnxt_ring_err_stats_arr)
 503#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
 504#define BNXT_NUM_STATS_PRI			\
 505	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
 506	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
 507	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
 508	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
 509
 510static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
 511{
 512	if (BNXT_SUPPORTS_TPA(bp)) {
 513		if (bp->max_tpa_v2) {
 514			if (BNXT_CHIP_P5(bp))
 515				return BNXT_NUM_TPA_RING_STATS_P5;
 516			return BNXT_NUM_TPA_RING_STATS_P7;
 517		}
 518		return BNXT_NUM_TPA_RING_STATS;
 519	}
 520	return 0;
 521}
 522
 523static int bnxt_get_num_ring_stats(struct bnxt *bp)
 524{
 525	int rx, tx, cmn;
 526
 527	rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
 528	     bnxt_get_num_tpa_ring_stats(bp);
 529	tx = NUM_RING_TX_HW_STATS;
 530	cmn = NUM_RING_CMN_SW_STATS;
 531	return rx * bp->rx_nr_rings +
 532	       tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
 533	       cmn * bp->cp_nr_rings;
 534}
 535
 536static int bnxt_get_num_stats(struct bnxt *bp)
 537{
 538	int num_stats = bnxt_get_num_ring_stats(bp);
 539	int len;
 540
 541	num_stats += BNXT_NUM_RING_ERR_STATS;
 542
 543	if (bp->flags & BNXT_FLAG_PORT_STATS)
 544		num_stats += BNXT_NUM_PORT_STATS;
 545
 546	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 547		len = min_t(int, bp->fw_rx_stats_ext_size,
 548			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 549		num_stats += len;
 550		len = min_t(int, bp->fw_tx_stats_ext_size,
 551			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 552		num_stats += len;
 553		if (bp->pri2cos_valid)
 554			num_stats += BNXT_NUM_STATS_PRI;
 555	}
 556
 557	return num_stats;
 558}
 559
 560static int bnxt_get_sset_count(struct net_device *dev, int sset)
 561{
 562	struct bnxt *bp = netdev_priv(dev);
 563
 564	switch (sset) {
 565	case ETH_SS_STATS:
 566		return bnxt_get_num_stats(bp);
 567	case ETH_SS_TEST:
 568		if (!bp->num_tests)
 569			return -EOPNOTSUPP;
 570		return bp->num_tests;
 571	default:
 572		return -EOPNOTSUPP;
 573	}
 574}
 575
 576static bool is_rx_ring(struct bnxt *bp, int ring_num)
 577{
 578	return ring_num < bp->rx_nr_rings;
 579}
 580
 581static bool is_tx_ring(struct bnxt *bp, int ring_num)
 582{
 583	int tx_base = 0;
 584
 585	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
 586		tx_base = bp->rx_nr_rings;
 587
 588	if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
 589		return true;
 590	return false;
 591}
 592
 593static void bnxt_get_ethtool_stats(struct net_device *dev,
 594				   struct ethtool_stats *stats, u64 *buf)
 595{
 596	struct bnxt_total_ring_err_stats ring_err_stats = {0};
 597	struct bnxt *bp = netdev_priv(dev);
 598	u64 *curr, *prev;
 599	u32 tpa_stats;
 600	u32 i, j = 0;
 601
 602	if (!bp->bnapi) {
 603		j += bnxt_get_num_ring_stats(bp);
 604		goto skip_ring_stats;
 605	}
 606
 
 
 
 607	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
 608	for (i = 0; i < bp->cp_nr_rings; i++) {
 609		struct bnxt_napi *bnapi = bp->bnapi[i];
 610		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 611		u64 *sw_stats = cpr->stats.sw_stats;
 612		u64 *sw;
 613		int k;
 614
 615		if (is_rx_ring(bp, i)) {
 616			for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
 617				buf[j] = sw_stats[k];
 618		}
 619		if (is_tx_ring(bp, i)) {
 620			k = NUM_RING_RX_HW_STATS;
 621			for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 622			       j++, k++)
 623				buf[j] = sw_stats[k];
 624		}
 625		if (!tpa_stats || !is_rx_ring(bp, i))
 626			goto skip_tpa_ring_stats;
 627
 628		k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 629		for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
 630			   tpa_stats; j++, k++)
 631			buf[j] = sw_stats[k];
 632
 633skip_tpa_ring_stats:
 634		sw = (u64 *)&cpr->sw_stats.rx;
 635		if (is_rx_ring(bp, i)) {
 636			for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
 637				buf[j] = sw[k];
 638		}
 639
 640		sw = (u64 *)&cpr->sw_stats.cmn;
 641		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
 642			buf[j] = sw[k];
 
 
 
 
 
 643	}
 644
 645	bnxt_get_ring_err_stats(bp, &ring_err_stats);
 
 646
 647skip_ring_stats:
 648	curr = &ring_err_stats.rx_total_l4_csum_errors;
 649	prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
 650	for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
 651		buf[j] = *curr + *prev;
 652
 653	if (bp->flags & BNXT_FLAG_PORT_STATS) {
 654		u64 *port_stats = bp->port_stats.sw_stats;
 655
 656		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
 657			buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
 658	}
 659	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 660		u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
 661		u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
 662		u32 len;
 663
 664		len = min_t(u32, bp->fw_rx_stats_ext_size,
 665			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 666		for (i = 0; i < len; i++, j++) {
 667			buf[j] = *(rx_port_stats_ext +
 668				   bnxt_port_stats_ext_arr[i].offset);
 669		}
 670		len = min_t(u32, bp->fw_tx_stats_ext_size,
 671			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 672		for (i = 0; i < len; i++, j++) {
 673			buf[j] = *(tx_port_stats_ext +
 674				   bnxt_tx_port_stats_ext_arr[i].offset);
 675		}
 676		if (bp->pri2cos_valid) {
 677			for (i = 0; i < 8; i++, j++) {
 678				long n = bnxt_rx_bytes_pri_arr[i].base_off +
 679					 bp->pri2cos_idx[i];
 680
 681				buf[j] = *(rx_port_stats_ext + n);
 682			}
 683			for (i = 0; i < 8; i++, j++) {
 684				long n = bnxt_rx_pkts_pri_arr[i].base_off +
 685					 bp->pri2cos_idx[i];
 686
 687				buf[j] = *(rx_port_stats_ext + n);
 688			}
 689			for (i = 0; i < 8; i++, j++) {
 690				long n = bnxt_tx_bytes_pri_arr[i].base_off +
 691					 bp->pri2cos_idx[i];
 692
 693				buf[j] = *(tx_port_stats_ext + n);
 694			}
 695			for (i = 0; i < 8; i++, j++) {
 696				long n = bnxt_tx_pkts_pri_arr[i].base_off +
 697					 bp->pri2cos_idx[i];
 698
 699				buf[j] = *(tx_port_stats_ext + n);
 700			}
 701		}
 702	}
 703}
 704
 705static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 706{
 707	struct bnxt *bp = netdev_priv(dev);
 708	static const char * const *str;
 709	u32 i, j, num_str;
 710
 711	switch (stringset) {
 712	case ETH_SS_STATS:
 713		for (i = 0; i < bp->cp_nr_rings; i++) {
 714			if (is_rx_ring(bp, i)) {
 715				num_str = NUM_RING_RX_HW_STATS;
 716				for (j = 0; j < num_str; j++) {
 717					sprintf(buf, "[%d]: %s", i,
 718						bnxt_ring_rx_stats_str[j]);
 719					buf += ETH_GSTRING_LEN;
 720				}
 721			}
 722			if (is_tx_ring(bp, i)) {
 723				num_str = NUM_RING_TX_HW_STATS;
 724				for (j = 0; j < num_str; j++) {
 725					sprintf(buf, "[%d]: %s", i,
 726						bnxt_ring_tx_stats_str[j]);
 727					buf += ETH_GSTRING_LEN;
 728				}
 729			}
 730			num_str = bnxt_get_num_tpa_ring_stats(bp);
 731			if (!num_str || !is_rx_ring(bp, i))
 732				goto skip_tpa_stats;
 733
 734			if (bp->max_tpa_v2)
 735				str = bnxt_ring_tpa2_stats_str;
 736			else
 737				str = bnxt_ring_tpa_stats_str;
 738
 739			for (j = 0; j < num_str; j++) {
 740				sprintf(buf, "[%d]: %s", i, str[j]);
 741				buf += ETH_GSTRING_LEN;
 742			}
 743skip_tpa_stats:
 744			if (is_rx_ring(bp, i)) {
 745				num_str = NUM_RING_RX_SW_STATS;
 746				for (j = 0; j < num_str; j++) {
 747					sprintf(buf, "[%d]: %s", i,
 748						bnxt_rx_sw_stats_str[j]);
 749					buf += ETH_GSTRING_LEN;
 750				}
 751			}
 752			num_str = NUM_RING_CMN_SW_STATS;
 753			for (j = 0; j < num_str; j++) {
 754				sprintf(buf, "[%d]: %s", i,
 755					bnxt_cmn_sw_stats_str[j]);
 756				buf += ETH_GSTRING_LEN;
 757			}
 758		}
 759		for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
 760			strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
 761			buf += ETH_GSTRING_LEN;
 762		}
 763
 764		if (bp->flags & BNXT_FLAG_PORT_STATS) {
 765			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
 766				strcpy(buf, bnxt_port_stats_arr[i].string);
 767				buf += ETH_GSTRING_LEN;
 768			}
 769		}
 770		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 771			u32 len;
 772
 773			len = min_t(u32, bp->fw_rx_stats_ext_size,
 774				    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 775			for (i = 0; i < len; i++) {
 776				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
 777				buf += ETH_GSTRING_LEN;
 778			}
 779			len = min_t(u32, bp->fw_tx_stats_ext_size,
 780				    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 781			for (i = 0; i < len; i++) {
 782				strcpy(buf,
 783				       bnxt_tx_port_stats_ext_arr[i].string);
 784				buf += ETH_GSTRING_LEN;
 785			}
 786			if (bp->pri2cos_valid) {
 787				for (i = 0; i < 8; i++) {
 788					strcpy(buf,
 789					       bnxt_rx_bytes_pri_arr[i].string);
 790					buf += ETH_GSTRING_LEN;
 791				}
 792				for (i = 0; i < 8; i++) {
 793					strcpy(buf,
 794					       bnxt_rx_pkts_pri_arr[i].string);
 795					buf += ETH_GSTRING_LEN;
 796				}
 797				for (i = 0; i < 8; i++) {
 798					strcpy(buf,
 799					       bnxt_tx_bytes_pri_arr[i].string);
 800					buf += ETH_GSTRING_LEN;
 801				}
 802				for (i = 0; i < 8; i++) {
 803					strcpy(buf,
 804					       bnxt_tx_pkts_pri_arr[i].string);
 805					buf += ETH_GSTRING_LEN;
 806				}
 807			}
 808		}
 809		break;
 810	case ETH_SS_TEST:
 811		if (bp->num_tests)
 812			memcpy(buf, bp->test_info->string,
 813			       bp->num_tests * ETH_GSTRING_LEN);
 814		break;
 815	default:
 816		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
 817			   stringset);
 818		break;
 819	}
 820}
 821
 822static void bnxt_get_ringparam(struct net_device *dev,
 823			       struct ethtool_ringparam *ering,
 824			       struct kernel_ethtool_ringparam *kernel_ering,
 825			       struct netlink_ext_ack *extack)
 826{
 827	struct bnxt *bp = netdev_priv(dev);
 828
 829	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 830		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
 831		ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
 832		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
 833	} else {
 834		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
 835		ering->rx_jumbo_max_pending = 0;
 836		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
 837	}
 838	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
 839
 840	ering->rx_pending = bp->rx_ring_size;
 841	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
 842	ering->tx_pending = bp->tx_ring_size;
 843}
 844
 845static int bnxt_set_ringparam(struct net_device *dev,
 846			      struct ethtool_ringparam *ering,
 847			      struct kernel_ethtool_ringparam *kernel_ering,
 848			      struct netlink_ext_ack *extack)
 849{
 850	struct bnxt *bp = netdev_priv(dev);
 851
 852	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
 853	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
 854	    (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
 855		return -EINVAL;
 856
 857	if (netif_running(dev))
 858		bnxt_close_nic(bp, false, false);
 859
 860	bp->rx_ring_size = ering->rx_pending;
 861	bp->tx_ring_size = ering->tx_pending;
 862	bnxt_set_ring_params(bp);
 863
 864	if (netif_running(dev))
 865		return bnxt_open_nic(bp, false, false);
 866
 867	return 0;
 868}
 869
 870static void bnxt_get_channels(struct net_device *dev,
 871			      struct ethtool_channels *channel)
 872{
 873	struct bnxt *bp = netdev_priv(dev);
 874	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 875	int max_rx_rings, max_tx_rings, tcs;
 876	int max_tx_sch_inputs, tx_grps;
 877
 878	/* Get the most up-to-date max_tx_sch_inputs. */
 879	if (netif_running(dev) && BNXT_NEW_RM(bp))
 880		bnxt_hwrm_func_resc_qcaps(bp, false);
 881	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 882
 883	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 884	if (max_tx_sch_inputs)
 885		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 886
 887	tcs = bp->num_tc;
 888	tx_grps = max(tcs, 1);
 889	if (bp->tx_nr_rings_xdp)
 890		tx_grps++;
 891	max_tx_rings /= tx_grps;
 892	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 893
 894	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
 895		max_rx_rings = 0;
 896		max_tx_rings = 0;
 897	}
 898	if (max_tx_sch_inputs)
 899		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 900
 901	if (tcs > 1)
 902		max_tx_rings /= tcs;
 903
 904	channel->max_rx = max_rx_rings;
 905	channel->max_tx = max_tx_rings;
 906	channel->max_other = 0;
 907	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
 908		channel->combined_count = bp->rx_nr_rings;
 909		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 910			channel->combined_count--;
 911	} else {
 912		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 913			channel->rx_count = bp->rx_nr_rings;
 914			channel->tx_count = bp->tx_nr_rings_per_tc;
 915		}
 916	}
 917}
 918
 919static int bnxt_set_channels(struct net_device *dev,
 920			     struct ethtool_channels *channel)
 921{
 922	struct bnxt *bp = netdev_priv(dev);
 923	int req_tx_rings, req_rx_rings, tcs;
 924	bool sh = false;
 925	int tx_xdp = 0;
 926	int rc = 0;
 927	int tx_cp;
 928
 929	if (channel->other_count)
 930		return -EINVAL;
 931
 932	if (!channel->combined_count &&
 933	    (!channel->rx_count || !channel->tx_count))
 934		return -EINVAL;
 935
 936	if (channel->combined_count &&
 937	    (channel->rx_count || channel->tx_count))
 938		return -EINVAL;
 939
 940	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
 941					    channel->tx_count))
 942		return -EINVAL;
 943
 944	if (channel->combined_count)
 945		sh = true;
 946
 947	tcs = bp->num_tc;
 948
 949	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
 950	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
 951	if (bp->tx_nr_rings_xdp) {
 952		if (!sh) {
 953			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
 954			return -EINVAL;
 955		}
 956		tx_xdp = req_rx_rings;
 957	}
 958	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
 959	if (rc) {
 960		netdev_warn(dev, "Unable to allocate the requested rings\n");
 961		return rc;
 962	}
 963
 964	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
 965	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
 966	    netif_is_rxfh_configured(dev)) {
 967		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
 968		return -EINVAL;
 969	}
 970
 971	if (netif_running(dev)) {
 972		if (BNXT_PF(bp)) {
 973			/* TODO CHIMP_FW: Send message to all VF's
 974			 * before PF unload
 975			 */
 976		}
 977		bnxt_close_nic(bp, true, false);
 
 
 
 
 
 978	}
 979
 980	if (sh) {
 981		bp->flags |= BNXT_FLAG_SHARED_RINGS;
 982		bp->rx_nr_rings = channel->combined_count;
 983		bp->tx_nr_rings_per_tc = channel->combined_count;
 984	} else {
 985		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
 986		bp->rx_nr_rings = channel->rx_count;
 987		bp->tx_nr_rings_per_tc = channel->tx_count;
 988	}
 989	bp->tx_nr_rings_xdp = tx_xdp;
 990	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
 991	if (tcs > 1)
 992		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 993
 994	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
 995	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
 996			       tx_cp + bp->rx_nr_rings;
 997
 998	/* After changing number of rx channels, update NTUPLE feature. */
 999	netdev_update_features(dev);
1000	if (netif_running(dev)) {
1001		rc = bnxt_open_nic(bp, true, false);
1002		if ((!rc) && BNXT_PF(bp)) {
1003			/* TODO CHIMP_FW: Send message to all VF's
1004			 * to renable
1005			 */
1006		}
1007	} else {
1008		rc = bnxt_reserve_rings(bp, true);
1009	}
1010
1011	return rc;
1012}
1013
1014static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1015				     int tbl_size, u32 *ids, u32 start,
1016				     u32 id_cnt)
1017{
1018	int i, j = start;
1019
1020	if (j >= id_cnt)
1021		return j;
1022	for (i = 0; i < tbl_size; i++) {
1023		struct hlist_head *head;
1024		struct bnxt_filter_base *fltr;
1025
1026		head = &tbl[i];
1027		hlist_for_each_entry_rcu(fltr, head, hash) {
1028			if (!fltr->flags ||
1029			    test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1030				continue;
1031			ids[j++] = fltr->sw_id;
1032			if (j == id_cnt)
1033				return j;
1034		}
1035	}
1036	return j;
1037}
1038
1039static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1040						      struct hlist_head tbl[],
1041						      int tbl_size, u32 id)
1042{
1043	int i;
1044
1045	for (i = 0; i < tbl_size; i++) {
 
1046		struct hlist_head *head;
1047		struct bnxt_filter_base *fltr;
1048
1049		head = &tbl[i];
 
1050		hlist_for_each_entry_rcu(fltr, head, hash) {
1051			if (fltr->flags && fltr->sw_id == id)
1052				return fltr;
 
1053		}
 
 
 
1054	}
1055	return NULL;
1056}
1057
1058static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1059			    u32 *rule_locs)
1060{
1061	cmd->data = bp->ntp_fltr_count;
1062	rcu_read_lock();
1063	cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1064						  BNXT_NTP_FLTR_HASH_SIZE,
1065						  rule_locs, 0, cmd->rule_cnt);
1066	rcu_read_unlock();
1067
1068	return 0;
1069}
1070
1071static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1072{
1073	struct ethtool_rx_flow_spec *fs =
1074		(struct ethtool_rx_flow_spec *)&cmd->fs;
1075	struct bnxt_filter_base *fltr_base;
1076	struct bnxt_ntuple_filter *fltr;
1077	struct flow_keys *fkeys;
1078	int rc = -EINVAL;
1079
1080	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1081		return rc;
1082
1083	rcu_read_lock();
1084	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1085					  BNXT_NTP_FLTR_HASH_SIZE,
1086					  fs->location);
1087	if (!fltr_base) {
 
 
 
 
1088		rcu_read_unlock();
1089		return rc;
1090	}
1091	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1092
 
1093	fkeys = &fltr->fkeys;
1094	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1095		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1096			fs->flow_type = TCP_V4_FLOW;
1097		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1098			fs->flow_type = UDP_V4_FLOW;
1099		else
1100			goto fltr_err;
1101
1102		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1103			fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1104			fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1105		}
1106		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1107			fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1108			fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1109		}
1110		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1111			fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1112			fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1113		}
1114		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1115			fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1116			fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1117		}
1118	} else {
 
 
1119		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1120			fs->flow_type = TCP_V6_FLOW;
1121		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1122			fs->flow_type = UDP_V6_FLOW;
1123		else
1124			goto fltr_err;
1125
1126		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1127			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1128				fkeys->addrs.v6addrs.src;
1129			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
1130		}
1131		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1132			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1133				fkeys->addrs.v6addrs.dst;
1134			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
1135		}
1136		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1137			fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1138			fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1139		}
1140		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1141			fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1142			fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1143		}
 
 
 
 
 
1144	}
1145
1146	fs->ring_cookie = fltr->base.rxq;
1147	rc = 0;
1148
1149fltr_err:
1150	rcu_read_unlock();
1151
1152	return rc;
1153}
1154
1155#define IPV4_ALL_MASK		((__force __be32)~0)
1156#define L4_PORT_ALL_MASK	((__force __be16)~0)
1157
1158static bool ipv6_mask_is_full(__be32 mask[4])
1159{
1160	return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
1161}
1162
1163static bool ipv6_mask_is_zero(__be32 mask[4])
1164{
1165	return !(mask[0] | mask[1] | mask[2] | mask[3]);
1166}
1167
1168static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1169				    struct ethtool_rx_flow_spec *fs)
1170{
1171	u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1172	u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1173	struct bnxt_ntuple_filter *new_fltr, *fltr;
1174	struct bnxt_l2_filter *l2_fltr;
1175	u32 flow_type = fs->flow_type;
1176	struct flow_keys *fkeys;
1177	u32 idx;
1178	int rc;
1179
1180	if (!bp->vnic_info)
1181		return -EAGAIN;
1182
1183	if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1184		return -EOPNOTSUPP;
1185
1186	new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1187	if (!new_fltr)
1188		return -ENOMEM;
1189
1190	l2_fltr = bp->vnic_info[0].l2_filters[0];
1191	atomic_inc(&l2_fltr->refcnt);
1192	new_fltr->l2_fltr = l2_fltr;
1193	fkeys = &new_fltr->fkeys;
1194
1195	rc = -EOPNOTSUPP;
1196	switch (flow_type) {
1197	case TCP_V4_FLOW:
1198	case UDP_V4_FLOW: {
1199		struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1200		struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1201
1202		fkeys->basic.ip_proto = IPPROTO_TCP;
1203		if (flow_type == UDP_V4_FLOW)
1204			fkeys->basic.ip_proto = IPPROTO_UDP;
1205		fkeys->basic.n_proto = htons(ETH_P_IP);
1206
1207		if (ip_mask->ip4src == IPV4_ALL_MASK) {
1208			fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1209			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1210		} else if (ip_mask->ip4src) {
1211			goto ntuple_err;
1212		}
1213		if (ip_mask->ip4dst == IPV4_ALL_MASK) {
1214			fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1215			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1216		} else if (ip_mask->ip4dst) {
1217			goto ntuple_err;
1218		}
1219
1220		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1221			fkeys->ports.src = ip_spec->psrc;
1222			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1223		} else if (ip_mask->psrc) {
1224			goto ntuple_err;
1225		}
1226		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1227			fkeys->ports.dst = ip_spec->pdst;
1228			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1229		} else if (ip_mask->pdst) {
1230			goto ntuple_err;
1231		}
1232		break;
1233	}
1234	case TCP_V6_FLOW:
1235	case UDP_V6_FLOW: {
1236		struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1237		struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1238
1239		fkeys->basic.ip_proto = IPPROTO_TCP;
1240		if (flow_type == UDP_V6_FLOW)
1241			fkeys->basic.ip_proto = IPPROTO_UDP;
1242		fkeys->basic.n_proto = htons(ETH_P_IPV6);
1243
1244		if (ipv6_mask_is_full(ip_mask->ip6src)) {
1245			fkeys->addrs.v6addrs.src =
1246				*(struct in6_addr *)&ip_spec->ip6src;
1247			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1248		} else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
1249			goto ntuple_err;
1250		}
1251		if (ipv6_mask_is_full(ip_mask->ip6dst)) {
1252			fkeys->addrs.v6addrs.dst =
1253				*(struct in6_addr *)&ip_spec->ip6dst;
1254			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1255		} else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
1256			goto ntuple_err;
1257		}
1258
1259		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1260			fkeys->ports.src = ip_spec->psrc;
1261			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1262		} else if (ip_mask->psrc) {
1263			goto ntuple_err;
1264		}
1265		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1266			fkeys->ports.dst = ip_spec->pdst;
1267			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1268		} else if (ip_mask->pdst) {
1269			goto ntuple_err;
1270		}
1271		break;
1272	}
1273	default:
1274		rc = -EOPNOTSUPP;
1275		goto ntuple_err;
1276	}
1277	if (!new_fltr->ntuple_flags)
1278		goto ntuple_err;
1279
1280	idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1281	rcu_read_lock();
1282	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1283	if (fltr) {
1284		rcu_read_unlock();
1285		rc = -EEXIST;
1286		goto ntuple_err;
1287	}
1288	rcu_read_unlock();
1289
1290	new_fltr->base.rxq = ring;
1291	new_fltr->base.flags = BNXT_ACT_NO_AGING;
1292	__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1293	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1294	if (!rc) {
1295		rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1296		if (rc) {
1297			bnxt_del_ntp_filter(bp, new_fltr);
1298			return rc;
1299		}
1300		fs->location = new_fltr->base.sw_id;
1301		return 0;
1302	}
1303
1304ntuple_err:
1305	atomic_dec(&l2_fltr->refcnt);
1306	kfree(new_fltr);
1307	return rc;
1308}
1309
1310static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1311{
1312	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1313	u32 ring, flow_type;
1314	int rc;
1315	u8 vf;
1316
1317	if (!netif_running(bp->dev))
1318		return -EAGAIN;
1319	if (!(bp->flags & BNXT_FLAG_RFS))
1320		return -EPERM;
1321	if (fs->location != RX_CLS_LOC_ANY)
1322		return -EINVAL;
1323
1324	ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1325	vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1326	if (BNXT_VF(bp) && vf)
1327		return -EINVAL;
1328	if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1329		return -EINVAL;
1330	if (!vf && ring >= bp->rx_nr_rings)
1331		return -EINVAL;
1332
1333	flow_type = fs->flow_type;
1334	if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
1335		return -EINVAL;
1336	flow_type &= ~FLOW_EXT;
1337	if (flow_type == ETHER_FLOW)
1338		rc = -EOPNOTSUPP;
1339	else
1340		rc = bnxt_add_ntuple_cls_rule(bp, fs);
1341	return rc;
1342}
1343
1344static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1345{
1346	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1347	struct bnxt_filter_base *fltr_base;
1348	struct bnxt_ntuple_filter *fltr;
1349
1350	rcu_read_lock();
1351	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1352					  BNXT_NTP_FLTR_HASH_SIZE,
1353					  fs->location);
1354	if (!fltr_base) {
1355		rcu_read_unlock();
1356		return -ENOENT;
1357	}
1358
1359	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1360	if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1361		rcu_read_unlock();
1362		return -EINVAL;
1363	}
1364	rcu_read_unlock();
1365	bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1366	bnxt_del_ntp_filter(bp, fltr);
1367	return 0;
1368}
1369
1370static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1371{
1372	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1373		return RXH_IP_SRC | RXH_IP_DST;
1374	return 0;
1375}
1376
1377static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1378{
1379	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1380		return RXH_IP_SRC | RXH_IP_DST;
1381	return 0;
1382}
1383
1384static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1385{
1386	cmd->data = 0;
1387	switch (cmd->flow_type) {
1388	case TCP_V4_FLOW:
1389		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1390			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1391				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1392		cmd->data |= get_ethtool_ipv4_rss(bp);
1393		break;
1394	case UDP_V4_FLOW:
1395		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1396			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1397				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1398		fallthrough;
1399	case SCTP_V4_FLOW:
1400	case AH_ESP_V4_FLOW:
1401	case AH_V4_FLOW:
1402	case ESP_V4_FLOW:
1403	case IPV4_FLOW:
1404		cmd->data |= get_ethtool_ipv4_rss(bp);
1405		break;
1406
1407	case TCP_V6_FLOW:
1408		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1409			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1410				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1411		cmd->data |= get_ethtool_ipv6_rss(bp);
1412		break;
1413	case UDP_V6_FLOW:
1414		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1415			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1416				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1417		fallthrough;
1418	case SCTP_V6_FLOW:
1419	case AH_ESP_V6_FLOW:
1420	case AH_V6_FLOW:
1421	case ESP_V6_FLOW:
1422	case IPV6_FLOW:
1423		cmd->data |= get_ethtool_ipv6_rss(bp);
1424		break;
1425	}
1426	return 0;
1427}
1428
1429#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1430#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1431
1432static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1433{
1434	u32 rss_hash_cfg = bp->rss_hash_cfg;
1435	int tuple, rc = 0;
1436
1437	if (cmd->data == RXH_4TUPLE)
1438		tuple = 4;
1439	else if (cmd->data == RXH_2TUPLE)
1440		tuple = 2;
1441	else if (!cmd->data)
1442		tuple = 0;
1443	else
1444		return -EINVAL;
1445
1446	if (cmd->flow_type == TCP_V4_FLOW) {
1447		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1448		if (tuple == 4)
1449			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1450	} else if (cmd->flow_type == UDP_V4_FLOW) {
1451		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1452			return -EINVAL;
1453		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1454		if (tuple == 4)
1455			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1456	} else if (cmd->flow_type == TCP_V6_FLOW) {
1457		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1458		if (tuple == 4)
1459			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1460	} else if (cmd->flow_type == UDP_V6_FLOW) {
1461		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1462			return -EINVAL;
1463		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1464		if (tuple == 4)
1465			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1466	} else if (tuple == 4) {
1467		return -EINVAL;
1468	}
1469
1470	switch (cmd->flow_type) {
1471	case TCP_V4_FLOW:
1472	case UDP_V4_FLOW:
1473	case SCTP_V4_FLOW:
1474	case AH_ESP_V4_FLOW:
1475	case AH_V4_FLOW:
1476	case ESP_V4_FLOW:
1477	case IPV4_FLOW:
1478		if (tuple == 2)
1479			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1480		else if (!tuple)
1481			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1482		break;
1483
1484	case TCP_V6_FLOW:
1485	case UDP_V6_FLOW:
1486	case SCTP_V6_FLOW:
1487	case AH_ESP_V6_FLOW:
1488	case AH_V6_FLOW:
1489	case ESP_V6_FLOW:
1490	case IPV6_FLOW:
1491		if (tuple == 2)
1492			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1493		else if (!tuple)
1494			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1495		break;
1496	}
1497
1498	if (bp->rss_hash_cfg == rss_hash_cfg)
1499		return 0;
1500
1501	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1502		bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1503	bp->rss_hash_cfg = rss_hash_cfg;
1504	if (netif_running(bp->dev)) {
1505		bnxt_close_nic(bp, false, false);
1506		rc = bnxt_open_nic(bp, false, false);
1507	}
1508	return rc;
1509}
1510
1511static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1512			  u32 *rule_locs)
1513{
1514	struct bnxt *bp = netdev_priv(dev);
1515	int rc = 0;
1516
1517	switch (cmd->cmd) {
 
1518	case ETHTOOL_GRXRINGS:
1519		cmd->data = bp->rx_nr_rings;
1520		break;
1521
1522	case ETHTOOL_GRXCLSRLCNT:
1523		cmd->rule_cnt = bp->ntp_fltr_count;
1524		cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
1525		break;
1526
1527	case ETHTOOL_GRXCLSRLALL:
1528		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1529		break;
1530
1531	case ETHTOOL_GRXCLSRULE:
1532		rc = bnxt_grxclsrule(bp, cmd);
1533		break;
 
1534
1535	case ETHTOOL_GRXFH:
1536		rc = bnxt_grxfh(bp, cmd);
1537		break;
1538
1539	default:
1540		rc = -EOPNOTSUPP;
1541		break;
1542	}
1543
1544	return rc;
1545}
1546
1547static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1548{
1549	struct bnxt *bp = netdev_priv(dev);
1550	int rc;
1551
1552	switch (cmd->cmd) {
1553	case ETHTOOL_SRXFH:
1554		rc = bnxt_srxfh(bp, cmd);
1555		break;
1556
1557	case ETHTOOL_SRXCLSRLINS:
1558		rc = bnxt_srxclsrlins(bp, cmd);
1559		break;
1560
1561	case ETHTOOL_SRXCLSRLDEL:
1562		rc = bnxt_srxclsrldel(bp, cmd);
1563		break;
1564
1565	default:
1566		rc = -EOPNOTSUPP;
1567		break;
1568	}
1569	return rc;
1570}
1571
1572u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1573{
1574	struct bnxt *bp = netdev_priv(dev);
1575
1576	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1577		return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1578		       BNXT_RSS_TABLE_ENTRIES_P5;
1579	return HW_HASH_INDEX_SIZE;
1580}
1581
1582static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1583{
1584	return HW_HASH_KEY_SIZE;
1585}
1586
1587static int bnxt_get_rxfh(struct net_device *dev,
1588			 struct ethtool_rxfh_param *rxfh)
1589{
1590	struct bnxt *bp = netdev_priv(dev);
1591	struct bnxt_vnic_info *vnic;
1592	u32 i, tbl_size;
1593
1594	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
1595
1596	if (!bp->vnic_info)
1597		return 0;
1598
1599	vnic = &bp->vnic_info[0];
1600	if (rxfh->indir && bp->rss_indir_tbl) {
1601		tbl_size = bnxt_get_rxfh_indir_size(dev);
1602		for (i = 0; i < tbl_size; i++)
1603			rxfh->indir[i] = bp->rss_indir_tbl[i];
1604	}
1605
1606	if (rxfh->key && vnic->rss_hash_key)
1607		memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1608
1609	return 0;
1610}
1611
1612static int bnxt_set_rxfh(struct net_device *dev,
1613			 struct ethtool_rxfh_param *rxfh,
1614			 struct netlink_ext_ack *extack)
1615{
1616	struct bnxt *bp = netdev_priv(dev);
1617	int rc = 0;
1618
1619	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1620		return -EOPNOTSUPP;
1621
1622	if (rxfh->key)
1623		return -EOPNOTSUPP;
1624
1625	if (rxfh->indir) {
1626		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1627
1628		for (i = 0; i < tbl_size; i++)
1629			bp->rss_indir_tbl[i] = rxfh->indir[i];
1630		pad = bp->rss_indir_tbl_entries - tbl_size;
1631		if (pad)
1632			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1633	}
1634
1635	if (netif_running(bp->dev)) {
1636		bnxt_close_nic(bp, false, false);
1637		rc = bnxt_open_nic(bp, false, false);
1638	}
1639	return rc;
1640}
1641
1642static void bnxt_get_drvinfo(struct net_device *dev,
1643			     struct ethtool_drvinfo *info)
1644{
1645	struct bnxt *bp = netdev_priv(dev);
1646
1647	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1648	strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1649	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1650	info->n_stats = bnxt_get_num_stats(bp);
1651	info->testinfo_len = bp->num_tests;
1652	/* TODO CHIMP_FW: eeprom dump details */
1653	info->eedump_len = 0;
1654	/* TODO CHIMP FW: reg dump details */
1655	info->regdump_len = 0;
1656}
1657
1658static int bnxt_get_regs_len(struct net_device *dev)
1659{
1660	struct bnxt *bp = netdev_priv(dev);
1661	int reg_len;
1662
1663	if (!BNXT_PF(bp))
1664		return -EOPNOTSUPP;
1665
1666	reg_len = BNXT_PXP_REG_LEN;
1667
1668	if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1669		reg_len += sizeof(struct pcie_ctx_hw_stats);
1670
1671	return reg_len;
1672}
1673
1674static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1675			  void *_p)
1676{
1677	struct pcie_ctx_hw_stats *hw_pcie_stats;
1678	struct hwrm_pcie_qstats_input *req;
1679	struct bnxt *bp = netdev_priv(dev);
1680	dma_addr_t hw_pcie_stats_addr;
1681	int rc;
1682
1683	regs->version = 0;
1684	bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1685
1686	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1687		return;
1688
1689	if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
 
 
 
1690		return;
1691
1692	hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1693					   &hw_pcie_stats_addr);
1694	if (!hw_pcie_stats) {
1695		hwrm_req_drop(bp, req);
1696		return;
1697	}
1698
1699	regs->version = 1;
1700	hwrm_req_hold(bp, req); /* hold on to slice */
1701	req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1702	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1703	rc = hwrm_req_send(bp, req);
 
1704	if (!rc) {
1705		__le64 *src = (__le64 *)hw_pcie_stats;
1706		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1707		int i;
1708
1709		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1710			dst[i] = le64_to_cpu(src[i]);
1711	}
1712	hwrm_req_drop(bp, req);
 
 
1713}
1714
1715static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1716{
1717	struct bnxt *bp = netdev_priv(dev);
1718
1719	wol->supported = 0;
1720	wol->wolopts = 0;
1721	memset(&wol->sopass, 0, sizeof(wol->sopass));
1722	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1723		wol->supported = WAKE_MAGIC;
1724		if (bp->wol)
1725			wol->wolopts = WAKE_MAGIC;
1726	}
1727}
1728
1729static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1730{
1731	struct bnxt *bp = netdev_priv(dev);
1732
1733	if (wol->wolopts & ~WAKE_MAGIC)
1734		return -EINVAL;
1735
1736	if (wol->wolopts & WAKE_MAGIC) {
1737		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1738			return -EINVAL;
1739		if (!bp->wol) {
1740			if (bnxt_hwrm_alloc_wol_fltr(bp))
1741				return -EBUSY;
1742			bp->wol = 1;
1743		}
1744	} else {
1745		if (bp->wol) {
1746			if (bnxt_hwrm_free_wol_fltr(bp))
1747				return -EBUSY;
1748			bp->wol = 0;
1749		}
1750	}
1751	return 0;
1752}
1753
1754u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1755{
1756	u32 speed_mask = 0;
1757
1758	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1759	/* set the advertised speeds */
1760	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1761		speed_mask |= ADVERTISED_100baseT_Full;
1762	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1763		speed_mask |= ADVERTISED_1000baseT_Full;
1764	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1765		speed_mask |= ADVERTISED_2500baseX_Full;
1766	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1767		speed_mask |= ADVERTISED_10000baseT_Full;
1768	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1769		speed_mask |= ADVERTISED_40000baseCR4_Full;
1770
1771	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1772		speed_mask |= ADVERTISED_Pause;
1773	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1774		speed_mask |= ADVERTISED_Asym_Pause;
1775	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1776		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1777
1778	return speed_mask;
1779}
1780
1781enum bnxt_media_type {
1782	BNXT_MEDIA_UNKNOWN = 0,
1783	BNXT_MEDIA_TP,
1784	BNXT_MEDIA_CR,
1785	BNXT_MEDIA_SR,
1786	BNXT_MEDIA_LR_ER_FR,
1787	BNXT_MEDIA_KR,
1788	BNXT_MEDIA_KX,
1789	BNXT_MEDIA_X,
1790	__BNXT_MEDIA_END,
1791};
1792
1793static const enum bnxt_media_type bnxt_phy_types[] = {
1794	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
1795	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] =  BNXT_MEDIA_KR,
1796	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
1797	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
1798	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
1799	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
1800	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
1801	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
1802	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
1803	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
1804	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
1805	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
1806	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
1807	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
1808	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
1809	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1810	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1811	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
1812	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
1813	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
1814	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1815	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1816	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
1817	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
1818	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
1819	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
1820	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
1821	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
1822	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1823	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1824	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
1825	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
1826	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1827	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1828	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
1829	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
1830	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1831	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1832	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
1833	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
1834	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1835	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1836	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
1837	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
1838	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1839	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1840	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
1841	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
1842	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
1843	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
1844	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
1845	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
1846	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1847	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1848};
1849
1850static enum bnxt_media_type
1851bnxt_get_media(struct bnxt_link_info *link_info)
1852{
1853	switch (link_info->media_type) {
1854	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
1855		return BNXT_MEDIA_TP;
1856	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
1857		return BNXT_MEDIA_CR;
1858	default:
1859		if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
1860			return bnxt_phy_types[link_info->phy_type];
1861		return BNXT_MEDIA_UNKNOWN;
1862	}
1863}
1864
1865enum bnxt_link_speed_indices {
1866	BNXT_LINK_SPEED_UNKNOWN = 0,
1867	BNXT_LINK_SPEED_100MB_IDX,
1868	BNXT_LINK_SPEED_1GB_IDX,
1869	BNXT_LINK_SPEED_10GB_IDX,
1870	BNXT_LINK_SPEED_25GB_IDX,
1871	BNXT_LINK_SPEED_40GB_IDX,
1872	BNXT_LINK_SPEED_50GB_IDX,
1873	BNXT_LINK_SPEED_100GB_IDX,
1874	BNXT_LINK_SPEED_200GB_IDX,
1875	BNXT_LINK_SPEED_400GB_IDX,
1876	__BNXT_LINK_SPEED_END
1877};
1878
1879static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
1880{
1881	switch (speed) {
1882	case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
1883	case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
1884	case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
1885	case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
1886	case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
1887	case BNXT_LINK_SPEED_50GB:
1888	case BNXT_LINK_SPEED_50GB_PAM4:
1889		return BNXT_LINK_SPEED_50GB_IDX;
1890	case BNXT_LINK_SPEED_100GB:
1891	case BNXT_LINK_SPEED_100GB_PAM4:
1892	case BNXT_LINK_SPEED_100GB_PAM4_112:
1893		return BNXT_LINK_SPEED_100GB_IDX;
1894	case BNXT_LINK_SPEED_200GB:
1895	case BNXT_LINK_SPEED_200GB_PAM4:
1896	case BNXT_LINK_SPEED_200GB_PAM4_112:
1897		return BNXT_LINK_SPEED_200GB_IDX;
1898	case BNXT_LINK_SPEED_400GB:
1899	case BNXT_LINK_SPEED_400GB_PAM4:
1900	case BNXT_LINK_SPEED_400GB_PAM4_112:
1901		return BNXT_LINK_SPEED_400GB_IDX;
1902	default: return BNXT_LINK_SPEED_UNKNOWN;
1903	}
1904}
1905
1906static const enum ethtool_link_mode_bit_indices
1907bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
1908	[BNXT_LINK_SPEED_100MB_IDX] = {
1909		{
1910			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1911		},
1912	},
1913	[BNXT_LINK_SPEED_1GB_IDX] = {
1914		{
1915			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1916			/* historically baseT, but DAC is more correctly baseX */
1917			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1918			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1919			[BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1920			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1921		},
1922	},
1923	[BNXT_LINK_SPEED_10GB_IDX] = {
1924		{
1925			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1926			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1927			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1928			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1929			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1930			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1931		},
1932	},
1933	[BNXT_LINK_SPEED_25GB_IDX] = {
1934		{
1935			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1936			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1937			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1938		},
1939	},
1940	[BNXT_LINK_SPEED_40GB_IDX] = {
1941		{
1942			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1943			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1944			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1945			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1946		},
1947	},
1948	[BNXT_LINK_SPEED_50GB_IDX] = {
1949		[BNXT_SIG_MODE_NRZ] = {
1950			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1951			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1952			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1953		},
1954		[BNXT_SIG_MODE_PAM4] = {
1955			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1956			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1957			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1958			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1959		},
1960	},
1961	[BNXT_LINK_SPEED_100GB_IDX] = {
1962		[BNXT_SIG_MODE_NRZ] = {
1963			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1964			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1965			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1966			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1967		},
1968		[BNXT_SIG_MODE_PAM4] = {
1969			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
1970			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
1971			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
1972			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
1973		},
1974		[BNXT_SIG_MODE_PAM4_112] = {
1975			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
1976			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
1977			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
1978			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
1979		},
1980	},
1981	[BNXT_LINK_SPEED_200GB_IDX] = {
1982		[BNXT_SIG_MODE_PAM4] = {
1983			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1984			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1985			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1986			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1987		},
1988		[BNXT_SIG_MODE_PAM4_112] = {
1989			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
1990			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
1991			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
1992			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
1993		},
1994	},
1995	[BNXT_LINK_SPEED_400GB_IDX] = {
1996		[BNXT_SIG_MODE_PAM4] = {
1997			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
1998			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
1999			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2000			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2001		},
2002		[BNXT_SIG_MODE_PAM4_112] = {
2003			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2004			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2005			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2006			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2007		},
2008	},
2009};
2010
2011#define BNXT_LINK_MODE_UNKNOWN -1
2012
2013static enum ethtool_link_mode_bit_indices
2014bnxt_get_link_mode(struct bnxt_link_info *link_info)
2015{
2016	enum ethtool_link_mode_bit_indices link_mode;
2017	enum bnxt_link_speed_indices speed;
2018	enum bnxt_media_type media;
2019	u8 sig_mode;
2020
2021	if (link_info->phy_link_status != BNXT_LINK_LINK)
2022		return BNXT_LINK_MODE_UNKNOWN;
2023
2024	media = bnxt_get_media(link_info);
2025	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2026		speed = bnxt_fw_speed_idx(link_info->link_speed);
2027		sig_mode = link_info->active_fec_sig_mode &
2028			PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2029	} else {
2030		speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2031		sig_mode = link_info->req_signal_mode;
2032	}
2033	if (sig_mode >= BNXT_SIG_MODE_MAX)
2034		return BNXT_LINK_MODE_UNKNOWN;
2035
2036	/* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2037	 * link mode, but since no such devices exist, the zeroes in the
2038	 * map can be conveniently used to represent unknown link modes.
2039	 */
2040	link_mode = bnxt_link_modes[speed][sig_mode][media];
2041	if (!link_mode)
2042		return BNXT_LINK_MODE_UNKNOWN;
2043
2044	switch (link_mode) {
2045	case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2046		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2047			link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2048		break;
2049	case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2050		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2051			link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2052		break;
2053	default:
2054		break;
2055	}
2056
2057	return link_mode;
2058}
2059
2060static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2061				   struct ethtool_link_ksettings *lk_ksettings)
2062{
2063	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2064
2065	if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2066		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2067				 lk_ksettings->link_modes.supported);
2068		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2069				 lk_ksettings->link_modes.supported);
2070	}
2071
2072	if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2073	    link_info->support_pam4_auto_speeds)
2074		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2075				 lk_ksettings->link_modes.supported);
2076
2077	if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2078		return;
2079
2080	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2081		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2082				 lk_ksettings->link_modes.advertising);
2083	if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2084		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2085				 lk_ksettings->link_modes.advertising);
2086	if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2087		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2088				 lk_ksettings->link_modes.lp_advertising);
2089	if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2090		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2091				 lk_ksettings->link_modes.lp_advertising);
2092}
2093
2094static const u16 bnxt_nrz_speed_masks[] = {
2095	[BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2096	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2097	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2098	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2099	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2100	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2101	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2102	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2103};
2104
2105static const u16 bnxt_pam4_speed_masks[] = {
2106	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2107	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2108	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2109	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2110};
2111
2112static const u16 bnxt_nrz_speeds2_masks[] = {
2113	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2114	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2115	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2116	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2117	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2118	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2119	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2120};
2121
2122static const u16 bnxt_pam4_speeds2_masks[] = {
2123	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2124	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2125	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2126	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2127};
2128
2129static const u16 bnxt_pam4_112_speeds2_masks[] = {
2130	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2131	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2132	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2133};
2134
2135static enum bnxt_link_speed_indices
2136bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2137{
2138	const u16 *speeds;
2139	int idx, len;
2140
2141	switch (sig_mode) {
2142	case BNXT_SIG_MODE_NRZ:
2143		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2144			speeds = bnxt_nrz_speeds2_masks;
2145			len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2146		} else {
2147			speeds = bnxt_nrz_speed_masks;
2148			len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2149		}
2150		break;
2151	case BNXT_SIG_MODE_PAM4:
2152		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2153			speeds = bnxt_pam4_speeds2_masks;
2154			len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2155		} else {
2156			speeds = bnxt_pam4_speed_masks;
2157			len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2158		}
2159		break;
2160	case BNXT_SIG_MODE_PAM4_112:
2161		speeds = bnxt_pam4_112_speeds2_masks;
2162		len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2163		break;
2164	default:
2165		return BNXT_LINK_SPEED_UNKNOWN;
2166	}
2167
2168	for (idx = 0; idx < len; idx++) {
2169		if (speeds[idx] == speed_msk)
2170			return idx;
2171	}
2172
2173	return BNXT_LINK_SPEED_UNKNOWN;
2174}
2175
2176#define BNXT_FW_SPEED_MSK_BITS 16
2177
2178static void
2179__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2180			  u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2181{
2182	enum ethtool_link_mode_bit_indices link_mode;
2183	enum bnxt_link_speed_indices speed;
2184	u8 bit;
2185
2186	for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2187		speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2188		if (!speed)
2189			continue;
2190
2191		link_mode = bnxt_link_modes[speed][sig_mode][media];
2192		if (!link_mode)
2193			continue;
2194
2195		linkmode_set_bit(link_mode, et_mask);
2196	}
2197}
2198
2199static void
2200bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2201			u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2202{
2203	if (media) {
2204		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2205					  et_mask);
2206		return;
2207	}
2208
2209	/* list speeds for all media if unknown */
2210	for (media = 1; media < __BNXT_MEDIA_END; media++)
2211		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2212					  et_mask);
2213}
2214
2215static void
2216bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2217				    enum bnxt_media_type media,
2218				    struct ethtool_link_ksettings *lk_ksettings)
2219{
2220	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2221	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2222	u16 phy_flags = bp->phy_flags;
2223
2224	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2225		sp_nrz = link_info->support_speeds2;
2226		sp_pam4 = link_info->support_speeds2;
2227		sp_pam4_112 = link_info->support_speeds2;
2228	} else {
2229		sp_nrz = link_info->support_speeds;
2230		sp_pam4 = link_info->support_pam4_speeds;
2231	}
2232	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2233				lk_ksettings->link_modes.supported);
2234	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2235				lk_ksettings->link_modes.supported);
2236	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2237				phy_flags, lk_ksettings->link_modes.supported);
2238}
2239
2240static void
2241bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2242				enum bnxt_media_type media,
2243				struct ethtool_link_ksettings *lk_ksettings)
2244{
2245	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2246	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2247	u16 phy_flags = bp->phy_flags;
2248
2249	sp_nrz = link_info->advertising;
2250	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2251		sp_pam4 = link_info->advertising;
2252		sp_pam4_112 = link_info->advertising;
2253	} else {
2254		sp_pam4 = link_info->advertising_pam4;
2255	}
2256	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2257				lk_ksettings->link_modes.advertising);
2258	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2259				lk_ksettings->link_modes.advertising);
2260	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2261				phy_flags, lk_ksettings->link_modes.advertising);
2262}
2263
2264static void
2265bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2266			       enum bnxt_media_type media,
2267			       struct ethtool_link_ksettings *lk_ksettings)
2268{
2269	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2270	u16 phy_flags = bp->phy_flags;
2271
2272	bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2273				BNXT_SIG_MODE_NRZ, phy_flags,
2274				lk_ksettings->link_modes.lp_advertising);
2275	bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2276				BNXT_SIG_MODE_PAM4, phy_flags,
2277				lk_ksettings->link_modes.lp_advertising);
2278}
2279
2280static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2281			      u16 speed_msk, const unsigned long *et_mask,
2282			      enum ethtool_link_mode_bit_indices mode)
2283{
2284	bool mode_desired = linkmode_test_bit(mode, et_mask);
2285
2286	if (!mode)
2287		return;
2288
2289	/* enabled speeds for installed media should override */
2290	if (installed_media && mode_desired) {
2291		*speeds |= speed_msk;
2292		*delta |= speed_msk;
2293		return;
2294	}
2295
2296	/* many to one mapping, only allow one change per fw_speed bit */
2297	if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2298		*speeds ^= speed_msk;
2299		*delta |= speed_msk;
2300	}
2301}
2302
2303static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2304				    const unsigned long *et_mask)
2305{
2306	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2307	u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2308	enum bnxt_media_type media = bnxt_get_media(link_info);
2309	u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2310	u32 delta_pam4_112 = 0;
2311	u32 delta_pam4 = 0;
2312	u32 delta_nrz = 0;
2313	int i, m;
2314
2315	adv = &link_info->advertising;
2316	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2317		adv_pam4 = &link_info->advertising;
2318		adv_pam4_112 = &link_info->advertising;
2319		sp_msks = bnxt_nrz_speeds2_masks;
2320		sp_pam4_msks = bnxt_pam4_speeds2_masks;
2321		sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2322	} else {
2323		adv_pam4 = &link_info->advertising_pam4;
2324		sp_msks = bnxt_nrz_speed_masks;
2325		sp_pam4_msks = bnxt_pam4_speed_masks;
2326	}
2327	for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2328		/* accept any legal media from user */
2329		for (m = 1; m < __BNXT_MEDIA_END; m++) {
2330			bnxt_update_speed(&delta_nrz, m == media,
2331					  adv, sp_msks[i], et_mask,
2332					  bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2333			bnxt_update_speed(&delta_pam4, m == media,
2334					  adv_pam4, sp_pam4_msks[i], et_mask,
2335					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2336			if (!adv_pam4_112)
2337				continue;
2338
2339			bnxt_update_speed(&delta_pam4_112, m == media,
2340					  adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2341					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2342		}
2343	}
2344}
2345
2346static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2347				struct ethtool_link_ksettings *lk_ksettings)
2348{
2349	u16 fec_cfg = link_info->fec_cfg;
2350
2351	if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2352		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2353				 lk_ksettings->link_modes.advertising);
2354		return;
2355	}
2356	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2357		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2358				 lk_ksettings->link_modes.advertising);
2359	if (fec_cfg & BNXT_FEC_ENC_RS)
2360		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2361				 lk_ksettings->link_modes.advertising);
2362	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2363		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2364				 lk_ksettings->link_modes.advertising);
2365}
2366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2367static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2368				struct ethtool_link_ksettings *lk_ksettings)
2369{
2370	u16 fec_cfg = link_info->fec_cfg;
2371
2372	if (fec_cfg & BNXT_FEC_NONE) {
2373		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2374				 lk_ksettings->link_modes.supported);
2375		return;
2376	}
2377	if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2378		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2379				 lk_ksettings->link_modes.supported);
2380	if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2381		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2382				 lk_ksettings->link_modes.supported);
2383	if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2384		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2385				 lk_ksettings->link_modes.supported);
2386}
2387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2388u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2389{
2390	switch (fw_link_speed) {
2391	case BNXT_LINK_SPEED_100MB:
2392		return SPEED_100;
2393	case BNXT_LINK_SPEED_1GB:
2394		return SPEED_1000;
2395	case BNXT_LINK_SPEED_2_5GB:
2396		return SPEED_2500;
2397	case BNXT_LINK_SPEED_10GB:
2398		return SPEED_10000;
2399	case BNXT_LINK_SPEED_20GB:
2400		return SPEED_20000;
2401	case BNXT_LINK_SPEED_25GB:
2402		return SPEED_25000;
2403	case BNXT_LINK_SPEED_40GB:
2404		return SPEED_40000;
2405	case BNXT_LINK_SPEED_50GB:
2406	case BNXT_LINK_SPEED_50GB_PAM4:
2407		return SPEED_50000;
2408	case BNXT_LINK_SPEED_100GB:
2409	case BNXT_LINK_SPEED_100GB_PAM4:
2410	case BNXT_LINK_SPEED_100GB_PAM4_112:
2411		return SPEED_100000;
2412	case BNXT_LINK_SPEED_200GB:
2413	case BNXT_LINK_SPEED_200GB_PAM4:
2414	case BNXT_LINK_SPEED_200GB_PAM4_112:
2415		return SPEED_200000;
2416	case BNXT_LINK_SPEED_400GB:
2417	case BNXT_LINK_SPEED_400GB_PAM4:
2418	case BNXT_LINK_SPEED_400GB_PAM4_112:
2419		return SPEED_400000;
2420	default:
2421		return SPEED_UNKNOWN;
2422	}
2423}
2424
2425static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2426				    struct bnxt_link_info *link_info)
2427{
2428	struct ethtool_link_settings *base = &lk_ksettings->base;
2429
2430	if (link_info->link_state == BNXT_LINK_STATE_UP) {
2431		base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2432		base->duplex = DUPLEX_HALF;
2433		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2434			base->duplex = DUPLEX_FULL;
2435		lk_ksettings->lanes = link_info->active_lanes;
2436	} else if (!link_info->autoneg) {
2437		base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2438		base->duplex = DUPLEX_HALF;
2439		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2440			base->duplex = DUPLEX_FULL;
2441	}
2442}
2443
2444static int bnxt_get_link_ksettings(struct net_device *dev,
2445				   struct ethtool_link_ksettings *lk_ksettings)
2446{
2447	struct ethtool_link_settings *base = &lk_ksettings->base;
2448	enum ethtool_link_mode_bit_indices link_mode;
2449	struct bnxt *bp = netdev_priv(dev);
2450	struct bnxt_link_info *link_info;
2451	enum bnxt_media_type media;
 
2452
2453	ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2454	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2455	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2456	base->duplex = DUPLEX_UNKNOWN;
2457	base->speed = SPEED_UNKNOWN;
2458	link_info = &bp->link_info;
2459
2460	mutex_lock(&bp->link_lock);
2461	bnxt_get_ethtool_modes(link_info, lk_ksettings);
2462	media = bnxt_get_media(link_info);
2463	bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2464	bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2465	link_mode = bnxt_get_link_mode(link_info);
2466	if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2467		ethtool_params_from_link_mode(lk_ksettings, link_mode);
2468	else
2469		bnxt_get_default_speeds(lk_ksettings, link_info);
2470
 
2471	if (link_info->autoneg) {
2472		bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2473		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2474				 lk_ksettings->link_modes.advertising);
2475		base->autoneg = AUTONEG_ENABLE;
2476		bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2477		if (link_info->phy_link_status == BNXT_LINK_LINK)
2478			bnxt_get_all_ethtool_lp_speeds(link_info, media,
2479						       lk_ksettings);
 
 
 
 
 
2480	} else {
2481		base->autoneg = AUTONEG_DISABLE;
 
 
 
 
 
2482	}
 
2483
2484	base->port = PORT_NONE;
2485	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2486		base->port = PORT_TP;
2487		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2488				 lk_ksettings->link_modes.supported);
2489		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2490				 lk_ksettings->link_modes.advertising);
2491	} else {
2492		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2493				 lk_ksettings->link_modes.supported);
2494		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2495				 lk_ksettings->link_modes.advertising);
2496
2497		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2498			base->port = PORT_DA;
2499		else
 
2500			base->port = PORT_FIBRE;
2501	}
2502	base->phy_address = link_info->phy_addr;
2503	mutex_unlock(&bp->link_lock);
2504
2505	return 0;
2506}
2507
2508static int
2509bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2510{
2511	struct bnxt *bp = netdev_priv(dev);
2512	struct bnxt_link_info *link_info = &bp->link_info;
2513	u16 support_pam4_spds = link_info->support_pam4_speeds;
2514	u16 support_spds2 = link_info->support_speeds2;
2515	u16 support_spds = link_info->support_speeds;
2516	u8 sig_mode = BNXT_SIG_MODE_NRZ;
2517	u32 lanes_needed = 1;
2518	u16 fw_speed = 0;
2519
2520	switch (ethtool_speed) {
2521	case SPEED_100:
2522		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2523			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2524		break;
2525	case SPEED_1000:
2526		if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2527		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2528			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2529		break;
2530	case SPEED_2500:
2531		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2532			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2533		break;
2534	case SPEED_10000:
2535		if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2536		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2537			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2538		break;
2539	case SPEED_20000:
2540		if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2541			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2542			lanes_needed = 2;
2543		}
2544		break;
2545	case SPEED_25000:
2546		if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2547		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2548			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2549		break;
2550	case SPEED_40000:
2551		if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2552		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2553			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2554			lanes_needed = 4;
2555		}
2556		break;
2557	case SPEED_50000:
2558		if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2559		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2560		    lanes != 1) {
2561			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2562			lanes_needed = 2;
2563		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2564			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2565			sig_mode = BNXT_SIG_MODE_PAM4;
2566		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2567			fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2568			sig_mode = BNXT_SIG_MODE_PAM4;
2569		}
2570		break;
2571	case SPEED_100000:
2572		if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2573		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2574		    lanes != 2 && lanes != 1) {
2575			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2576			lanes_needed = 4;
2577		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2578			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2579			sig_mode = BNXT_SIG_MODE_PAM4;
2580			lanes_needed = 2;
2581		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2582			   lanes != 1) {
2583			fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2584			sig_mode = BNXT_SIG_MODE_PAM4;
2585			lanes_needed = 2;
2586		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2587			fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2588			sig_mode = BNXT_SIG_MODE_PAM4_112;
2589		}
2590		break;
2591	case SPEED_200000:
2592		if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2593			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2594			sig_mode = BNXT_SIG_MODE_PAM4;
2595			lanes_needed = 4;
2596		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2597			   lanes != 2) {
2598			fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2599			sig_mode = BNXT_SIG_MODE_PAM4;
2600			lanes_needed = 4;
2601		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2602			fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2603			sig_mode = BNXT_SIG_MODE_PAM4_112;
2604			lanes_needed = 2;
2605		}
2606		break;
2607	case SPEED_400000:
2608		if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2609		    lanes != 4) {
2610			fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2611			sig_mode = BNXT_SIG_MODE_PAM4;
2612			lanes_needed = 8;
2613		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2614			fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2615			sig_mode = BNXT_SIG_MODE_PAM4_112;
2616			lanes_needed = 4;
2617		}
2618		break;
2619	}
2620
2621	if (!fw_speed) {
2622		netdev_err(dev, "unsupported speed!\n");
2623		return -EINVAL;
2624	}
2625
2626	if (lanes && lanes != lanes_needed) {
2627		netdev_err(dev, "unsupported number of lanes for speed\n");
2628		return -EINVAL;
2629	}
2630
2631	if (link_info->req_link_speed == fw_speed &&
2632	    link_info->req_signal_mode == sig_mode &&
2633	    link_info->autoneg == 0)
2634		return -EALREADY;
2635
2636	link_info->req_link_speed = fw_speed;
2637	link_info->req_signal_mode = sig_mode;
2638	link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2639	link_info->autoneg = 0;
2640	link_info->advertising = 0;
2641	link_info->advertising_pam4 = 0;
2642
2643	return 0;
2644}
2645
2646u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
2647{
2648	u16 fw_speed_mask = 0;
2649
2650	/* only support autoneg at speed 100, 1000, and 10000 */
2651	if (advertising & (ADVERTISED_100baseT_Full |
2652			   ADVERTISED_100baseT_Half)) {
2653		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
2654	}
2655	if (advertising & (ADVERTISED_1000baseT_Full |
2656			   ADVERTISED_1000baseT_Half)) {
2657		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
2658	}
2659	if (advertising & ADVERTISED_10000baseT_Full)
2660		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
2661
2662	if (advertising & ADVERTISED_40000baseCR4_Full)
2663		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
2664
2665	return fw_speed_mask;
2666}
2667
2668static int bnxt_set_link_ksettings(struct net_device *dev,
2669			   const struct ethtool_link_ksettings *lk_ksettings)
2670{
2671	struct bnxt *bp = netdev_priv(dev);
2672	struct bnxt_link_info *link_info = &bp->link_info;
2673	const struct ethtool_link_settings *base = &lk_ksettings->base;
2674	bool set_pause = false;
2675	u32 speed, lanes = 0;
2676	int rc = 0;
2677
2678	if (!BNXT_PHY_CFG_ABLE(bp))
2679		return -EOPNOTSUPP;
2680
2681	mutex_lock(&bp->link_lock);
2682	if (base->autoneg == AUTONEG_ENABLE) {
2683		bnxt_set_ethtool_speeds(link_info,
2684					lk_ksettings->link_modes.advertising);
 
 
 
 
2685		link_info->autoneg |= BNXT_AUTONEG_SPEED;
2686		if (!link_info->advertising && !link_info->advertising_pam4) {
2687			link_info->advertising = link_info->support_auto_speeds;
2688			link_info->advertising_pam4 =
2689				link_info->support_pam4_auto_speeds;
2690		}
2691		/* any change to autoneg will cause link change, therefore the
2692		 * driver should put back the original pause setting in autoneg
2693		 */
2694		if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2695			set_pause = true;
2696	} else {
2697		u8 phy_type = link_info->phy_type;
2698
2699		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
2700		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
2701		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2702			netdev_err(dev, "10GBase-T devices must autoneg\n");
2703			rc = -EINVAL;
2704			goto set_setting_exit;
2705		}
2706		if (base->duplex == DUPLEX_HALF) {
2707			netdev_err(dev, "HALF DUPLEX is not supported!\n");
2708			rc = -EINVAL;
2709			goto set_setting_exit;
2710		}
2711		speed = base->speed;
2712		lanes = lk_ksettings->lanes;
2713		rc = bnxt_force_link_speed(dev, speed, lanes);
2714		if (rc) {
2715			if (rc == -EALREADY)
2716				rc = 0;
2717			goto set_setting_exit;
2718		}
2719	}
2720
2721	if (netif_running(dev))
2722		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
2723
2724set_setting_exit:
2725	mutex_unlock(&bp->link_lock);
2726	return rc;
2727}
2728
2729static int bnxt_get_fecparam(struct net_device *dev,
2730			     struct ethtool_fecparam *fec)
2731{
2732	struct bnxt *bp = netdev_priv(dev);
2733	struct bnxt_link_info *link_info;
2734	u8 active_fec;
2735	u16 fec_cfg;
2736
2737	link_info = &bp->link_info;
2738	fec_cfg = link_info->fec_cfg;
2739	active_fec = link_info->active_fec_sig_mode &
2740		     PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
2741	if (fec_cfg & BNXT_FEC_NONE) {
2742		fec->fec = ETHTOOL_FEC_NONE;
2743		fec->active_fec = ETHTOOL_FEC_NONE;
2744		return 0;
2745	}
2746	if (fec_cfg & BNXT_FEC_AUTONEG)
2747		fec->fec |= ETHTOOL_FEC_AUTO;
2748	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2749		fec->fec |= ETHTOOL_FEC_BASER;
2750	if (fec_cfg & BNXT_FEC_ENC_RS)
2751		fec->fec |= ETHTOOL_FEC_RS;
2752	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2753		fec->fec |= ETHTOOL_FEC_LLRS;
2754
2755	switch (active_fec) {
2756	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
2757		fec->active_fec |= ETHTOOL_FEC_BASER;
2758		break;
2759	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
2760	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
2761	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
2762		fec->active_fec |= ETHTOOL_FEC_RS;
2763		break;
2764	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
2765	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
2766		fec->active_fec |= ETHTOOL_FEC_LLRS;
2767		break;
2768	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
2769		fec->active_fec |= ETHTOOL_FEC_OFF;
2770		break;
2771	}
2772	return 0;
2773}
2774
2775static void bnxt_get_fec_stats(struct net_device *dev,
2776			       struct ethtool_fec_stats *fec_stats)
2777{
2778	struct bnxt *bp = netdev_priv(dev);
2779	u64 *rx;
2780
2781	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2782		return;
2783
2784	rx = bp->rx_port_stats_ext.sw_stats;
2785	fec_stats->corrected_bits.total =
2786		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2787
2788	if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2789		return;
2790
2791	fec_stats->corrected_blocks.total =
2792		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2793	fec_stats->uncorrectable_blocks.total =
2794		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2795}
2796
2797static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2798					 u32 fec)
2799{
2800	u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2801
2802	if (fec & ETHTOOL_FEC_BASER)
2803		fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2804	else if (fec & ETHTOOL_FEC_RS)
2805		fw_fec |= BNXT_FEC_RS_ON(link_info);
2806	else if (fec & ETHTOOL_FEC_LLRS)
2807		fw_fec |= BNXT_FEC_LLRS_ON;
2808	return fw_fec;
2809}
2810
2811static int bnxt_set_fecparam(struct net_device *dev,
2812			     struct ethtool_fecparam *fecparam)
2813{
2814	struct hwrm_port_phy_cfg_input *req;
2815	struct bnxt *bp = netdev_priv(dev);
2816	struct bnxt_link_info *link_info;
2817	u32 new_cfg, fec = fecparam->fec;
2818	u16 fec_cfg;
2819	int rc;
2820
2821	link_info = &bp->link_info;
2822	fec_cfg = link_info->fec_cfg;
2823	if (fec_cfg & BNXT_FEC_NONE)
2824		return -EOPNOTSUPP;
2825
2826	if (fec & ETHTOOL_FEC_OFF) {
2827		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2828			  BNXT_FEC_ALL_OFF(link_info);
2829		goto apply_fec;
2830	}
2831	if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2832	    ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2833	    ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2834	    ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2835		return -EINVAL;
2836
2837	if (fec & ETHTOOL_FEC_AUTO) {
2838		if (!link_info->autoneg)
2839			return -EINVAL;
2840		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2841	} else {
2842		new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2843	}
2844
2845apply_fec:
2846	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2847	if (rc)
2848		return rc;
2849	req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2850	rc = hwrm_req_send(bp, req);
2851	/* update current settings */
2852	if (!rc) {
2853		mutex_lock(&bp->link_lock);
2854		bnxt_update_link(bp, false);
2855		mutex_unlock(&bp->link_lock);
2856	}
2857	return rc;
2858}
2859
2860static void bnxt_get_pauseparam(struct net_device *dev,
2861				struct ethtool_pauseparam *epause)
2862{
2863	struct bnxt *bp = netdev_priv(dev);
2864	struct bnxt_link_info *link_info = &bp->link_info;
2865
2866	if (BNXT_VF(bp))
2867		return;
2868	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2869	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2870	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2871}
2872
2873static void bnxt_get_pause_stats(struct net_device *dev,
2874				 struct ethtool_pause_stats *epstat)
2875{
2876	struct bnxt *bp = netdev_priv(dev);
2877	u64 *rx, *tx;
2878
2879	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2880		return;
2881
2882	rx = bp->port_stats.sw_stats;
2883	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2884
2885	epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2886	epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2887}
2888
2889static int bnxt_set_pauseparam(struct net_device *dev,
2890			       struct ethtool_pauseparam *epause)
2891{
2892	int rc = 0;
2893	struct bnxt *bp = netdev_priv(dev);
2894	struct bnxt_link_info *link_info = &bp->link_info;
2895
2896	if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2897		return -EOPNOTSUPP;
2898
2899	mutex_lock(&bp->link_lock);
2900	if (epause->autoneg) {
2901		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2902			rc = -EINVAL;
2903			goto pause_exit;
2904		}
2905
2906		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2907		link_info->req_flow_ctrl = 0;
 
 
2908	} else {
2909		/* when transition from auto pause to force pause,
2910		 * force a link change
2911		 */
2912		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2913			link_info->force_link_chng = true;
2914		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2915		link_info->req_flow_ctrl = 0;
2916	}
2917	if (epause->rx_pause)
2918		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2919
2920	if (epause->tx_pause)
2921		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2922
2923	if (netif_running(dev))
2924		rc = bnxt_hwrm_set_pause(bp);
2925
2926pause_exit:
2927	mutex_unlock(&bp->link_lock);
2928	return rc;
2929}
2930
2931static u32 bnxt_get_link(struct net_device *dev)
2932{
2933	struct bnxt *bp = netdev_priv(dev);
2934
2935	/* TODO: handle MF, VF, driver close case */
2936	return BNXT_LINK_IS_UP(bp);
2937}
2938
2939int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2940			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2941{
2942	struct hwrm_nvm_get_dev_info_output *resp;
2943	struct hwrm_nvm_get_dev_info_input *req;
2944	int rc;
2945
2946	if (BNXT_VF(bp))
2947		return -EOPNOTSUPP;
2948
2949	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2950	if (rc)
2951		return rc;
2952
2953	resp = hwrm_req_hold(bp, req);
2954	rc = hwrm_req_send(bp, req);
2955	if (!rc)
2956		memcpy(nvm_dev_info, resp, sizeof(*resp));
2957	hwrm_req_drop(bp, req);
2958	return rc;
2959}
2960
2961static void bnxt_print_admin_err(struct bnxt *bp)
2962{
2963	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2964}
2965
2966int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2967			 u16 ext, u16 *index, u32 *item_length,
2968			 u32 *data_length);
2969
2970int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2971		     u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2972		     u32 dir_item_len, const u8 *data,
2973		     size_t data_len)
2974{
2975	struct bnxt *bp = netdev_priv(dev);
2976	struct hwrm_nvm_write_input *req;
2977	int rc;
 
 
 
2978
2979	rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2980	if (rc)
2981		return rc;
2982
 
 
 
 
 
2983	if (data_len && data) {
2984		dma_addr_t dma_handle;
2985		u8 *kmem;
2986
2987		kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2988		if (!kmem) {
2989			hwrm_req_drop(bp, req);
2990			return -ENOMEM;
2991		}
2992
2993		req->dir_data_length = cpu_to_le32(data_len);
2994
2995		memcpy(kmem, data, data_len);
2996		req->host_src_addr = cpu_to_le64(dma_handle);
2997	}
2998
2999	hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3000	req->dir_type = cpu_to_le16(dir_type);
3001	req->dir_ordinal = cpu_to_le16(dir_ordinal);
3002	req->dir_ext = cpu_to_le16(dir_ext);
3003	req->dir_attr = cpu_to_le16(dir_attr);
3004	req->dir_item_length = cpu_to_le32(dir_item_len);
3005	rc = hwrm_req_send(bp, req);
3006
3007	if (rc == -EACCES)
3008		bnxt_print_admin_err(bp);
3009	return rc;
3010}
3011
3012int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3013			     u8 self_reset, u8 flags)
 
3014{
3015	struct bnxt *bp = netdev_priv(dev);
3016	struct hwrm_fw_reset_input *req;
3017	int rc;
3018
3019	if (!bnxt_hwrm_reset_permitted(bp)) {
3020		netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3021		return -EPERM;
3022	}
 
 
3023
3024	rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3025	if (rc)
3026		return rc;
 
 
 
 
 
3027
3028	req->embedded_proc_type = proc_type;
3029	req->selfrst_status = self_reset;
3030	req->flags = flags;
3031
3032	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3033		rc = hwrm_req_send_silent(bp, req);
 
3034	} else {
3035		rc = hwrm_req_send(bp, req);
3036		if (rc == -EACCES)
3037			bnxt_print_admin_err(bp);
3038	}
3039	return rc;
3040}
3041
3042static int bnxt_firmware_reset(struct net_device *dev,
3043			       enum bnxt_nvm_directory_type dir_type)
3044{
3045	u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3046	u8 proc_type, flags = 0;
3047
3048	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3049	/*       (e.g. when firmware isn't already running) */
3050	switch (dir_type) {
3051	case BNX_DIR_TYPE_CHIMP_PATCH:
3052	case BNX_DIR_TYPE_BOOTCODE:
3053	case BNX_DIR_TYPE_BOOTCODE_2:
3054		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3055		/* Self-reset ChiMP upon next PCIe reset: */
3056		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3057		break;
3058	case BNX_DIR_TYPE_APE_FW:
3059	case BNX_DIR_TYPE_APE_PATCH:
3060		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3061		/* Self-reset APE upon next PCIe reset: */
3062		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3063		break;
3064	case BNX_DIR_TYPE_KONG_FW:
3065	case BNX_DIR_TYPE_KONG_PATCH:
3066		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
3067		break;
3068	case BNX_DIR_TYPE_BONO_FW:
3069	case BNX_DIR_TYPE_BONO_PATCH:
3070		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
3071		break;
3072	default:
3073		return -EINVAL;
3074	}
3075
3076	return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3077}
3078
3079static int bnxt_firmware_reset_chip(struct net_device *dev)
3080{
3081	struct bnxt *bp = netdev_priv(dev);
3082	u8 flags = 0;
3083
3084	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3085		flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3086
3087	return bnxt_hwrm_firmware_reset(dev,
3088					FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3089					FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3090					flags);
3091}
3092
3093static int bnxt_firmware_reset_ap(struct net_device *dev)
3094{
3095	return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3096					FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3097					0);
3098}
3099
3100static int bnxt_flash_firmware(struct net_device *dev,
3101			       u16 dir_type,
3102			       const u8 *fw_data,
3103			       size_t fw_size)
3104{
3105	int	rc = 0;
3106	u16	code_type;
3107	u32	stored_crc;
3108	u32	calculated_crc;
3109	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3110
3111	switch (dir_type) {
3112	case BNX_DIR_TYPE_BOOTCODE:
3113	case BNX_DIR_TYPE_BOOTCODE_2:
3114		code_type = CODE_BOOT;
3115		break;
3116	case BNX_DIR_TYPE_CHIMP_PATCH:
3117		code_type = CODE_CHIMP_PATCH;
3118		break;
3119	case BNX_DIR_TYPE_APE_FW:
3120		code_type = CODE_MCTP_PASSTHRU;
3121		break;
3122	case BNX_DIR_TYPE_APE_PATCH:
3123		code_type = CODE_APE_PATCH;
3124		break;
3125	case BNX_DIR_TYPE_KONG_FW:
3126		code_type = CODE_KONG_FW;
3127		break;
3128	case BNX_DIR_TYPE_KONG_PATCH:
3129		code_type = CODE_KONG_PATCH;
3130		break;
3131	case BNX_DIR_TYPE_BONO_FW:
3132		code_type = CODE_BONO_FW;
3133		break;
3134	case BNX_DIR_TYPE_BONO_PATCH:
3135		code_type = CODE_BONO_PATCH;
3136		break;
3137	default:
3138		netdev_err(dev, "Unsupported directory entry type: %u\n",
3139			   dir_type);
3140		return -EINVAL;
3141	}
3142	if (fw_size < sizeof(struct bnxt_fw_header)) {
3143		netdev_err(dev, "Invalid firmware file size: %u\n",
3144			   (unsigned int)fw_size);
3145		return -EINVAL;
3146	}
3147	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3148		netdev_err(dev, "Invalid firmware signature: %08X\n",
3149			   le32_to_cpu(header->signature));
3150		return -EINVAL;
3151	}
3152	if (header->code_type != code_type) {
3153		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3154			   code_type, header->code_type);
3155		return -EINVAL;
3156	}
3157	if (header->device != DEVICE_CUMULUS_FAMILY) {
3158		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3159			   DEVICE_CUMULUS_FAMILY, header->device);
3160		return -EINVAL;
3161	}
3162	/* Confirm the CRC32 checksum of the file: */
3163	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3164					     sizeof(stored_crc)));
3165	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3166	if (calculated_crc != stored_crc) {
3167		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3168			   (unsigned long)stored_crc,
3169			   (unsigned long)calculated_crc);
3170		return -EINVAL;
3171	}
3172	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3173			      0, 0, 0, fw_data, fw_size);
3174	if (rc == 0)	/* Firmware update successful */
3175		rc = bnxt_firmware_reset(dev, dir_type);
3176
3177	return rc;
3178}
3179
3180static int bnxt_flash_microcode(struct net_device *dev,
3181				u16 dir_type,
3182				const u8 *fw_data,
3183				size_t fw_size)
3184{
3185	struct bnxt_ucode_trailer *trailer;
3186	u32 calculated_crc;
3187	u32 stored_crc;
3188	int rc = 0;
3189
3190	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3191		netdev_err(dev, "Invalid microcode file size: %u\n",
3192			   (unsigned int)fw_size);
3193		return -EINVAL;
3194	}
3195	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3196						sizeof(*trailer)));
3197	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3198		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3199			   le32_to_cpu(trailer->sig));
3200		return -EINVAL;
3201	}
3202	if (le16_to_cpu(trailer->dir_type) != dir_type) {
3203		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3204			   dir_type, le16_to_cpu(trailer->dir_type));
3205		return -EINVAL;
3206	}
3207	if (le16_to_cpu(trailer->trailer_length) <
3208		sizeof(struct bnxt_ucode_trailer)) {
3209		netdev_err(dev, "Invalid microcode trailer length: %d\n",
3210			   le16_to_cpu(trailer->trailer_length));
3211		return -EINVAL;
3212	}
3213
3214	/* Confirm the CRC32 checksum of the file: */
3215	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3216					     sizeof(stored_crc)));
3217	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3218	if (calculated_crc != stored_crc) {
3219		netdev_err(dev,
3220			   "CRC32 (%08lX) does not match calculated: %08lX\n",
3221			   (unsigned long)stored_crc,
3222			   (unsigned long)calculated_crc);
3223		return -EINVAL;
3224	}
3225	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3226			      0, 0, 0, fw_data, fw_size);
3227
3228	return rc;
3229}
3230
3231static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3232{
3233	switch (dir_type) {
3234	case BNX_DIR_TYPE_CHIMP_PATCH:
3235	case BNX_DIR_TYPE_BOOTCODE:
3236	case BNX_DIR_TYPE_BOOTCODE_2:
3237	case BNX_DIR_TYPE_APE_FW:
3238	case BNX_DIR_TYPE_APE_PATCH:
3239	case BNX_DIR_TYPE_KONG_FW:
3240	case BNX_DIR_TYPE_KONG_PATCH:
3241	case BNX_DIR_TYPE_BONO_FW:
3242	case BNX_DIR_TYPE_BONO_PATCH:
3243		return true;
3244	}
3245
3246	return false;
3247}
3248
3249static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3250{
3251	switch (dir_type) {
3252	case BNX_DIR_TYPE_AVS:
3253	case BNX_DIR_TYPE_EXP_ROM_MBA:
3254	case BNX_DIR_TYPE_PCIE:
3255	case BNX_DIR_TYPE_TSCF_UCODE:
3256	case BNX_DIR_TYPE_EXT_PHY:
3257	case BNX_DIR_TYPE_CCM:
3258	case BNX_DIR_TYPE_ISCSI_BOOT:
3259	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3260	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3261		return true;
3262	}
3263
3264	return false;
3265}
3266
3267static bool bnxt_dir_type_is_executable(u16 dir_type)
3268{
3269	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3270		bnxt_dir_type_is_other_exec_format(dir_type);
3271}
3272
3273static int bnxt_flash_firmware_from_file(struct net_device *dev,
3274					 u16 dir_type,
3275					 const char *filename)
3276{
3277	const struct firmware  *fw;
3278	int			rc;
3279
3280	rc = request_firmware(&fw, filename, &dev->dev);
3281	if (rc != 0) {
3282		netdev_err(dev, "Error %d requesting firmware file: %s\n",
3283			   rc, filename);
3284		return rc;
3285	}
3286	if (bnxt_dir_type_is_ape_bin_format(dir_type))
3287		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3288	else if (bnxt_dir_type_is_other_exec_format(dir_type))
3289		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3290	else
3291		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3292				      0, 0, 0, fw->data, fw->size);
3293	release_firmware(fw);
3294	return rc;
3295}
3296
3297#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3298#define MSG_INVALID_PKG "PKG install error : Invalid package"
3299#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3300#define MSG_INVALID_DEV "PKG install error : Invalid device"
3301#define MSG_INTERNAL_ERR "PKG install error : Internal error"
3302#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3303#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3304#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3305#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3306#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3307
3308static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3309				    struct netlink_ext_ack *extack)
3310{
3311	switch (result) {
3312	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3313	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3314	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3315	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3316	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3317	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3318		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3319		return -EINVAL;
3320	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3321	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3322	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3323	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3324	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3325	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3326	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3327	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3328	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3329	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3330	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3331	case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3332	case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3333		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3334		return -ENOPKG;
3335	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3336		BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3337		return -EPERM;
3338	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3339	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3340	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3341	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3342	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3343		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3344		return -EOPNOTSUPP;
3345	default:
3346		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3347		return -EIO;
3348	}
3349}
3350
3351#define BNXT_PKG_DMA_SIZE	0x40000
3352#define BNXT_NVM_MORE_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3353#define BNXT_NVM_LAST_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3354
3355static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3356				    struct netlink_ext_ack *extack)
3357{
3358	u32 item_len;
3359	int rc;
3360
3361	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3362				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3363				  &item_len, NULL);
3364	if (rc) {
3365		BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3366		return rc;
3367	}
3368
3369	if (fw_size > item_len) {
3370		rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3371				      BNX_DIR_ORDINAL_FIRST, 0, 1,
3372				      round_up(fw_size, 4096), NULL, 0);
3373		if (rc) {
3374			BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3375			return rc;
3376		}
3377	}
3378	return 0;
3379}
3380
3381int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3382				   u32 install_type, struct netlink_ext_ack *extack)
3383{
3384	struct hwrm_nvm_install_update_input *install;
3385	struct hwrm_nvm_install_update_output *resp;
3386	struct hwrm_nvm_modify_input *modify;
3387	struct bnxt *bp = netdev_priv(dev);
3388	bool defrag_attempted = false;
3389	dma_addr_t dma_handle;
3390	u8 *kmem = NULL;
3391	u32 modify_len;
3392	u32 item_len;
3393	u8 cmd_err;
3394	u16 index;
3395	int rc;
3396
3397	/* resize before flashing larger image than available space */
3398	rc = bnxt_resize_update_entry(dev, fw->size, extack);
3399	if (rc)
3400		return rc;
3401
3402	bnxt_hwrm_fw_set_time(bp);
3403
3404	rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3405	if (rc)
3406		return rc;
3407
3408	/* Try allocating a large DMA buffer first.  Older fw will
3409	 * cause excessive NVRAM erases when using small blocks.
3410	 */
3411	modify_len = roundup_pow_of_two(fw->size);
3412	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3413	while (1) {
3414		kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
 
3415		if (!kmem && modify_len > PAGE_SIZE)
3416			modify_len /= 2;
3417		else
3418			break;
3419	}
3420	if (!kmem) {
3421		hwrm_req_drop(bp, modify);
3422		return -ENOMEM;
3423	}
3424
3425	rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3426	if (rc) {
3427		hwrm_req_drop(bp, modify);
3428		return rc;
3429	}
3430
3431	hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3432	hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3433
3434	hwrm_req_hold(bp, modify);
3435	modify->host_src_addr = cpu_to_le64(dma_handle);
3436
3437	resp = hwrm_req_hold(bp, install);
3438	if ((install_type & 0xffff) == 0)
3439		install_type >>= 16;
3440	install->install_type = cpu_to_le32(install_type);
3441
3442	do {
3443		u32 copied = 0, len = modify_len;
3444
3445		rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3446					  BNX_DIR_ORDINAL_FIRST,
3447					  BNX_DIR_EXT_NONE,
3448					  &index, &item_len, NULL);
3449		if (rc) {
3450			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3451			break;
3452		}
3453		if (fw->size > item_len) {
3454			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
 
3455			rc = -EFBIG;
3456			break;
3457		}
3458
3459		modify->dir_idx = cpu_to_le16(index);
3460
3461		if (fw->size > modify_len)
3462			modify->flags = BNXT_NVM_MORE_FLAG;
3463		while (copied < fw->size) {
3464			u32 balance = fw->size - copied;
3465
3466			if (balance <= modify_len) {
3467				len = balance;
3468				if (copied)
3469					modify->flags |= BNXT_NVM_LAST_FLAG;
3470			}
3471			memcpy(kmem, fw->data + copied, len);
3472			modify->len = cpu_to_le32(len);
3473			modify->offset = cpu_to_le32(copied);
3474			rc = hwrm_req_send(bp, modify);
 
3475			if (rc)
3476				goto pkg_abort;
3477			copied += len;
3478		}
3479
3480		rc = hwrm_req_send_silent(bp, install);
3481		if (!rc)
3482			break;
3483
3484		if (defrag_attempted) {
3485			/* We have tried to defragment already in the previous
3486			 * iteration. Return with the result for INSTALL_UPDATE
3487			 */
 
3488			break;
3489		}
3490
3491		cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3492
3493		switch (cmd_err) {
3494		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3495			BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3496			rc = -EALREADY;
3497			break;
3498		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3499			install->flags =
3500				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3501
3502			rc = hwrm_req_send_silent(bp, install);
3503			if (!rc)
3504				break;
 
3505
3506			cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3507
3508			if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3509				/* FW has cleared NVM area, driver will create
3510				 * UPDATE directory and try the flash again
3511				 */
3512				defrag_attempted = true;
3513				install->flags = 0;
3514				rc = bnxt_flash_nvram(bp->dev,
3515						      BNX_DIR_TYPE_UPDATE,
3516						      BNX_DIR_ORDINAL_FIRST,
3517						      0, 0, item_len, NULL, 0);
3518				if (!rc)
3519					break;
 
3520			}
3521			fallthrough;
3522		default:
3523			BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3524		}
 
3525	} while (defrag_attempted && !rc);
3526
3527pkg_abort:
3528	hwrm_req_drop(bp, modify);
3529	hwrm_req_drop(bp, install);
3530
3531	if (resp->result) {
3532		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3533			   (s8)resp->result, (int)resp->problem_item);
3534		rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3535	}
3536	if (rc == -EACCES)
3537		bnxt_print_admin_err(bp);
3538	return rc;
3539}
3540
3541static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3542					u32 install_type, struct netlink_ext_ack *extack)
3543{
3544	const struct firmware *fw;
3545	int rc;
3546
3547	rc = request_firmware(&fw, filename, &dev->dev);
3548	if (rc != 0) {
3549		netdev_err(dev, "PKG error %d requesting file: %s\n",
3550			   rc, filename);
3551		return rc;
3552	}
3553
3554	rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3555
3556	release_firmware(fw);
3557
3558	return rc;
3559}
3560
3561static int bnxt_flash_device(struct net_device *dev,
3562			     struct ethtool_flash *flash)
3563{
3564	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3565		netdev_err(dev, "flashdev not supported from a virtual function\n");
3566		return -EINVAL;
3567	}
3568
3569	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3570	    flash->region > 0xffff)
3571		return bnxt_flash_package_from_file(dev, flash->data,
3572						    flash->region, NULL);
3573
3574	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3575}
3576
3577static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3578{
3579	struct hwrm_nvm_get_dir_info_output *output;
3580	struct hwrm_nvm_get_dir_info_input *req;
3581	struct bnxt *bp = netdev_priv(dev);
3582	int rc;
 
 
3583
3584	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3585	if (rc)
3586		return rc;
3587
3588	output = hwrm_req_hold(bp, req);
3589	rc = hwrm_req_send(bp, req);
3590	if (!rc) {
3591		*entries = le32_to_cpu(output->entries);
3592		*length = le32_to_cpu(output->entry_length);
3593	}
3594	hwrm_req_drop(bp, req);
3595	return rc;
3596}
3597
3598static int bnxt_get_eeprom_len(struct net_device *dev)
3599{
3600	struct bnxt *bp = netdev_priv(dev);
3601
3602	if (BNXT_VF(bp))
3603		return 0;
3604
3605	/* The -1 return value allows the entire 32-bit range of offsets to be
3606	 * passed via the ethtool command-line utility.
3607	 */
3608	return -1;
3609}
3610
3611static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3612{
3613	struct bnxt *bp = netdev_priv(dev);
3614	int rc;
3615	u32 dir_entries;
3616	u32 entry_length;
3617	u8 *buf;
3618	size_t buflen;
3619	dma_addr_t dma_handle;
3620	struct hwrm_nvm_get_dir_entries_input *req;
3621
3622	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3623	if (rc != 0)
3624		return rc;
3625
3626	if (!dir_entries || !entry_length)
3627		return -EIO;
3628
3629	/* Insert 2 bytes of directory info (count and size of entries) */
3630	if (len < 2)
3631		return -EINVAL;
3632
3633	*data++ = dir_entries;
3634	*data++ = entry_length;
3635	len -= 2;
3636	memset(data, 0xff, len);
3637
3638	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3639	if (rc)
3640		return rc;
3641
3642	buflen = mul_u32_u32(dir_entries, entry_length);
3643	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
3644	if (!buf) {
3645		hwrm_req_drop(bp, req);
 
3646		return -ENOMEM;
3647	}
3648	req->host_dest_addr = cpu_to_le64(dma_handle);
3649
3650	hwrm_req_hold(bp, req); /* hold the slice */
3651	rc = hwrm_req_send(bp, req);
3652	if (rc == 0)
3653		memcpy(data, buf, len > buflen ? buflen : len);
3654	hwrm_req_drop(bp, req);
3655	return rc;
3656}
3657
3658int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
3659			u32 length, u8 *data)
3660{
3661	struct bnxt *bp = netdev_priv(dev);
3662	int rc;
3663	u8 *buf;
3664	dma_addr_t dma_handle;
3665	struct hwrm_nvm_read_input *req;
3666
3667	if (!length)
3668		return -EINVAL;
3669
3670	rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
3671	if (rc)
3672		return rc;
3673
3674	buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
3675	if (!buf) {
3676		hwrm_req_drop(bp, req);
 
3677		return -ENOMEM;
3678	}
 
 
 
 
 
3679
3680	req->host_dest_addr = cpu_to_le64(dma_handle);
3681	req->dir_idx = cpu_to_le16(index);
3682	req->offset = cpu_to_le32(offset);
3683	req->len = cpu_to_le32(length);
3684
3685	hwrm_req_hold(bp, req); /* hold the slice */
3686	rc = hwrm_req_send(bp, req);
3687	if (rc == 0)
3688		memcpy(data, buf, length);
3689	hwrm_req_drop(bp, req);
3690	return rc;
3691}
3692
3693int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3694			 u16 ext, u16 *index, u32 *item_length,
3695			 u32 *data_length)
3696{
3697	struct hwrm_nvm_find_dir_entry_output *output;
3698	struct hwrm_nvm_find_dir_entry_input *req;
3699	struct bnxt *bp = netdev_priv(dev);
3700	int rc;
 
 
3701
3702	rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
3703	if (rc)
3704		return rc;
3705
3706	req->enables = 0;
3707	req->dir_idx = 0;
3708	req->dir_type = cpu_to_le16(type);
3709	req->dir_ordinal = cpu_to_le16(ordinal);
3710	req->dir_ext = cpu_to_le16(ext);
3711	req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
3712	output = hwrm_req_hold(bp, req);
3713	rc = hwrm_req_send_silent(bp, req);
3714	if (rc == 0) {
3715		if (index)
3716			*index = le16_to_cpu(output->dir_idx);
3717		if (item_length)
3718			*item_length = le32_to_cpu(output->dir_item_length);
3719		if (data_length)
3720			*data_length = le32_to_cpu(output->dir_data_length);
3721	}
3722	hwrm_req_drop(bp, req);
3723	return rc;
3724}
3725
3726static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
3727{
3728	char	*retval = NULL;
3729	char	*p;
3730	char	*value;
3731	int	field = 0;
3732
3733	if (datalen < 1)
3734		return NULL;
3735	/* null-terminate the log data (removing last '\n'): */
3736	data[datalen - 1] = 0;
3737	for (p = data; *p != 0; p++) {
3738		field = 0;
3739		retval = NULL;
3740		while (*p != 0 && *p != '\n') {
3741			value = p;
3742			while (*p != 0 && *p != '\t' && *p != '\n')
3743				p++;
3744			if (field == desired_field)
3745				retval = value;
3746			if (*p != '\t')
3747				break;
3748			*p = 0;
3749			field++;
3750			p++;
3751		}
3752		if (*p == 0)
3753			break;
3754		*p = 0;
3755	}
3756	return retval;
3757}
3758
3759int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
3760{
3761	struct bnxt *bp = netdev_priv(dev);
3762	u16 index = 0;
3763	char *pkgver;
3764	u32 pkglen;
3765	u8 *pkgbuf;
3766	int rc;
3767
3768	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
3769				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
3770				  &index, NULL, &pkglen);
3771	if (rc)
3772		return rc;
3773
3774	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
3775	if (!pkgbuf) {
3776		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3777			pkglen);
3778		return -ENOMEM;
3779	}
3780
3781	rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3782	if (rc)
3783		goto err;
3784
3785	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3786				   pkglen);
3787	if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3788		strscpy(ver, pkgver, size);
3789	else
3790		rc = -ENOENT;
3791
3792err:
3793	kfree(pkgbuf);
3794
3795	return rc;
3796}
3797
3798static void bnxt_get_pkgver(struct net_device *dev)
3799{
3800	struct bnxt *bp = netdev_priv(dev);
3801	char buf[FW_VER_STR_LEN];
3802	int len;
3803
3804	if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3805		len = strlen(bp->fw_ver_str);
3806		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3807			 "/pkg %s", buf);
3808	}
 
 
3809}
3810
3811static int bnxt_get_eeprom(struct net_device *dev,
3812			   struct ethtool_eeprom *eeprom,
3813			   u8 *data)
3814{
3815	u32 index;
3816	u32 offset;
3817
3818	if (eeprom->offset == 0) /* special offset value to get directory */
3819		return bnxt_get_nvram_directory(dev, eeprom->len, data);
3820
3821	index = eeprom->offset >> 24;
3822	offset = eeprom->offset & 0xffffff;
3823
3824	if (index == 0) {
3825		netdev_err(dev, "unsupported index value: %d\n", index);
3826		return -EINVAL;
3827	}
3828
3829	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3830}
3831
3832static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3833{
3834	struct hwrm_nvm_erase_dir_entry_input *req;
3835	struct bnxt *bp = netdev_priv(dev);
3836	int rc;
3837
3838	rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3839	if (rc)
3840		return rc;
3841
3842	req->dir_idx = cpu_to_le16(index);
3843	return hwrm_req_send(bp, req);
3844}
3845
3846static int bnxt_set_eeprom(struct net_device *dev,
3847			   struct ethtool_eeprom *eeprom,
3848			   u8 *data)
3849{
3850	struct bnxt *bp = netdev_priv(dev);
3851	u8 index, dir_op;
3852	u16 type, ext, ordinal, attr;
3853
3854	if (!BNXT_PF(bp)) {
3855		netdev_err(dev, "NVM write not supported from a virtual function\n");
3856		return -EINVAL;
3857	}
3858
3859	type = eeprom->magic >> 16;
3860
3861	if (type == 0xffff) { /* special value for directory operations */
3862		index = eeprom->magic & 0xff;
3863		dir_op = eeprom->magic >> 8;
3864		if (index == 0)
3865			return -EINVAL;
3866		switch (dir_op) {
3867		case 0x0e: /* erase */
3868			if (eeprom->offset != ~eeprom->magic)
3869				return -EINVAL;
3870			return bnxt_erase_nvram_directory(dev, index - 1);
3871		default:
3872			return -EINVAL;
3873		}
3874	}
3875
3876	/* Create or re-write an NVM item: */
3877	if (bnxt_dir_type_is_executable(type))
3878		return -EOPNOTSUPP;
3879	ext = eeprom->magic & 0xffff;
3880	ordinal = eeprom->offset >> 16;
3881	attr = eeprom->offset & 0xffff;
3882
3883	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3884				eeprom->len);
3885}
3886
3887static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3888{
3889	struct bnxt *bp = netdev_priv(dev);
3890	struct ethtool_eee *eee = &bp->eee;
3891	struct bnxt_link_info *link_info = &bp->link_info;
3892	u32 advertising;
3893	int rc = 0;
3894
3895	if (!BNXT_PHY_CFG_ABLE(bp))
3896		return -EOPNOTSUPP;
3897
3898	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3899		return -EOPNOTSUPP;
3900
3901	mutex_lock(&bp->link_lock);
3902	advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3903	if (!edata->eee_enabled)
3904		goto eee_ok;
3905
3906	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3907		netdev_warn(dev, "EEE requires autoneg\n");
3908		rc = -EINVAL;
3909		goto eee_exit;
3910	}
3911	if (edata->tx_lpi_enabled) {
3912		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3913				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3914			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3915				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3916			rc = -EINVAL;
3917			goto eee_exit;
3918		} else if (!bp->lpi_tmr_hi) {
3919			edata->tx_lpi_timer = eee->tx_lpi_timer;
3920		}
3921	}
3922	if (!edata->advertised) {
3923		edata->advertised = advertising & eee->supported;
3924	} else if (edata->advertised & ~advertising) {
3925		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3926			    edata->advertised, advertising);
3927		rc = -EINVAL;
3928		goto eee_exit;
3929	}
3930
3931	eee->advertised = edata->advertised;
3932	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3933	eee->tx_lpi_timer = edata->tx_lpi_timer;
3934eee_ok:
3935	eee->eee_enabled = edata->eee_enabled;
3936
3937	if (netif_running(dev))
3938		rc = bnxt_hwrm_set_link_setting(bp, false, true);
3939
3940eee_exit:
3941	mutex_unlock(&bp->link_lock);
3942	return rc;
3943}
3944
3945static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3946{
3947	struct bnxt *bp = netdev_priv(dev);
3948
3949	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3950		return -EOPNOTSUPP;
3951
3952	*edata = bp->eee;
3953	if (!bp->eee.eee_enabled) {
3954		/* Preserve tx_lpi_timer so that the last value will be used
3955		 * by default when it is re-enabled.
3956		 */
3957		edata->advertised = 0;
3958		edata->tx_lpi_enabled = 0;
3959	}
3960
3961	if (!bp->eee.eee_active)
3962		edata->lp_advertised = 0;
3963
3964	return 0;
3965}
3966
3967static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3968					    u16 page_number, u8 bank,
3969					    u16 start_addr, u16 data_length,
3970					    u8 *buf)
3971{
3972	struct hwrm_port_phy_i2c_read_output *output;
3973	struct hwrm_port_phy_i2c_read_input *req;
3974	int rc, byte_offset = 0;
3975
3976	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3977	if (rc)
3978		return rc;
3979
3980	output = hwrm_req_hold(bp, req);
3981	req->i2c_slave_addr = i2c_addr;
3982	req->page_number = cpu_to_le16(page_number);
3983	req->port_id = cpu_to_le16(bp->pf.port_id);
3984	do {
3985		u16 xfer_size;
3986
3987		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3988		data_length -= xfer_size;
3989		req->page_offset = cpu_to_le16(start_addr + byte_offset);
3990		req->data_length = xfer_size;
3991		req->enables =
3992			cpu_to_le32((start_addr + byte_offset ?
3993				     PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3994				     0) |
3995				    (bank ?
3996				     PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3997				     0));
3998		rc = hwrm_req_send(bp, req);
3999		if (!rc)
4000			memcpy(buf + byte_offset, output->data, xfer_size);
 
4001		byte_offset += xfer_size;
4002	} while (!rc && data_length > 0);
4003	hwrm_req_drop(bp, req);
4004
4005	return rc;
4006}
4007
4008static int bnxt_get_module_info(struct net_device *dev,
4009				struct ethtool_modinfo *modinfo)
4010{
4011	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4012	struct bnxt *bp = netdev_priv(dev);
4013	int rc;
4014
4015	/* No point in going further if phy status indicates
4016	 * module is not inserted or if it is powered down or
4017	 * if it is of type 10GBase-T
4018	 */
4019	if (bp->link_info.module_status >
4020		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4021		return -EOPNOTSUPP;
4022
4023	/* This feature is not supported in older firmware versions */
4024	if (bp->hwrm_spec_code < 0x10202)
4025		return -EOPNOTSUPP;
4026
4027	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4028					      SFF_DIAG_SUPPORT_OFFSET + 1,
4029					      data);
4030	if (!rc) {
4031		u8 module_id = data[0];
4032		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4033
4034		switch (module_id) {
4035		case SFF_MODULE_ID_SFP:
4036			modinfo->type = ETH_MODULE_SFF_8472;
4037			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4038			if (!diag_supported)
4039				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4040			break;
4041		case SFF_MODULE_ID_QSFP:
4042		case SFF_MODULE_ID_QSFP_PLUS:
4043			modinfo->type = ETH_MODULE_SFF_8436;
4044			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4045			break;
4046		case SFF_MODULE_ID_QSFP28:
4047			modinfo->type = ETH_MODULE_SFF_8636;
4048			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4049			break;
4050		default:
4051			rc = -EOPNOTSUPP;
4052			break;
4053		}
4054	}
4055	return rc;
4056}
4057
4058static int bnxt_get_module_eeprom(struct net_device *dev,
4059				  struct ethtool_eeprom *eeprom,
4060				  u8 *data)
4061{
4062	struct bnxt *bp = netdev_priv(dev);
4063	u16  start = eeprom->offset, length = eeprom->len;
4064	int rc = 0;
4065
4066	memset(data, 0, eeprom->len);
4067
4068	/* Read A0 portion of the EEPROM */
4069	if (start < ETH_MODULE_SFF_8436_LEN) {
4070		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4071			length = ETH_MODULE_SFF_8436_LEN - start;
4072		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4073						      start, length, data);
4074		if (rc)
4075			return rc;
4076		start += length;
4077		data += length;
4078		length = eeprom->len - length;
4079	}
4080
4081	/* Read A2 portion of the EEPROM */
4082	if (length) {
4083		start -= ETH_MODULE_SFF_8436_LEN;
4084		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4085						      start, length, data);
4086	}
4087	return rc;
4088}
4089
4090static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4091{
4092	if (bp->link_info.module_status <=
4093	    PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4094		return 0;
4095
4096	switch (bp->link_info.module_status) {
4097	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4098		NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4099		break;
4100	case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4101		NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4102		break;
4103	case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4104		NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4105		break;
4106	default:
4107		NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4108		break;
4109	}
4110	return -EINVAL;
4111}
4112
4113static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4114					  const struct ethtool_module_eeprom *page_data,
4115					  struct netlink_ext_ack *extack)
4116{
4117	struct bnxt *bp = netdev_priv(dev);
4118	int rc;
4119
4120	rc = bnxt_get_module_status(bp, extack);
4121	if (rc)
4122		return rc;
4123
4124	if (bp->hwrm_spec_code < 0x10202) {
4125		NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4126		return -EINVAL;
4127	}
4128
4129	if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4130		NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4131		return -EINVAL;
4132	}
4133
4134	rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4135					      page_data->page, page_data->bank,
4136					      page_data->offset,
4137					      page_data->length,
4138					      page_data->data);
4139	if (rc) {
4140		NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4141		return rc;
4142	}
4143	return page_data->length;
4144}
4145
4146static int bnxt_nway_reset(struct net_device *dev)
4147{
4148	int rc = 0;
4149
4150	struct bnxt *bp = netdev_priv(dev);
4151	struct bnxt_link_info *link_info = &bp->link_info;
4152
4153	if (!BNXT_PHY_CFG_ABLE(bp))
4154		return -EOPNOTSUPP;
4155
4156	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4157		return -EINVAL;
4158
4159	if (netif_running(dev))
4160		rc = bnxt_hwrm_set_link_setting(bp, true, false);
4161
4162	return rc;
4163}
4164
4165static int bnxt_set_phys_id(struct net_device *dev,
4166			    enum ethtool_phys_id_state state)
4167{
4168	struct hwrm_port_led_cfg_input *req;
4169	struct bnxt *bp = netdev_priv(dev);
4170	struct bnxt_pf_info *pf = &bp->pf;
4171	struct bnxt_led_cfg *led_cfg;
4172	u8 led_state;
4173	__le16 duration;
4174	int rc, i;
4175
4176	if (!bp->num_leds || BNXT_VF(bp))
4177		return -EOPNOTSUPP;
4178
4179	if (state == ETHTOOL_ID_ACTIVE) {
4180		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4181		duration = cpu_to_le16(500);
4182	} else if (state == ETHTOOL_ID_INACTIVE) {
4183		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4184		duration = cpu_to_le16(0);
4185	} else {
4186		return -EINVAL;
4187	}
4188	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4189	if (rc)
4190		return rc;
4191
4192	req->port_id = cpu_to_le16(pf->port_id);
4193	req->num_leds = bp->num_leds;
4194	led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4195	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4196		req->enables |= BNXT_LED_DFLT_ENABLES(i);
4197		led_cfg->led_id = bp->leds[i].led_id;
4198		led_cfg->led_state = led_state;
4199		led_cfg->led_blink_on = duration;
4200		led_cfg->led_blink_off = duration;
4201		led_cfg->led_group_id = bp->leds[i].led_group_id;
4202	}
4203	return hwrm_req_send(bp, req);
4204}
4205
4206static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4207{
4208	struct hwrm_selftest_irq_input *req;
4209	int rc;
4210
4211	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4212	if (rc)
4213		return rc;
4214
4215	req->cmpl_ring = cpu_to_le16(cmpl_ring);
4216	return hwrm_req_send(bp, req);
4217}
4218
4219static int bnxt_test_irq(struct bnxt *bp)
4220{
4221	int i;
4222
4223	for (i = 0; i < bp->cp_nr_rings; i++) {
4224		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4225		int rc;
4226
4227		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4228		if (rc)
4229			return rc;
4230	}
4231	return 0;
4232}
4233
4234static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4235{
4236	struct hwrm_port_mac_cfg_input *req;
4237	int rc;
4238
4239	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4240	if (rc)
4241		return rc;
4242
4243	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4244	if (enable)
4245		req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4246	else
4247		req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4248	return hwrm_req_send(bp, req);
4249}
4250
4251static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4252{
4253	struct hwrm_port_phy_qcaps_output *resp;
4254	struct hwrm_port_phy_qcaps_input *req;
4255	int rc;
4256
4257	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4258	if (rc)
4259		return rc;
4260
4261	resp = hwrm_req_hold(bp, req);
4262	rc = hwrm_req_send(bp, req);
4263	if (!rc)
4264		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4265
4266	hwrm_req_drop(bp, req);
4267	return rc;
4268}
4269
4270static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4271				    struct hwrm_port_phy_cfg_input *req)
4272{
4273	struct bnxt_link_info *link_info = &bp->link_info;
4274	u16 fw_advertising;
4275	u16 fw_speed;
4276	int rc;
4277
4278	if (!link_info->autoneg ||
4279	    (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4280		return 0;
4281
4282	rc = bnxt_query_force_speeds(bp, &fw_advertising);
4283	if (rc)
4284		return rc;
4285
4286	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4287	if (BNXT_LINK_IS_UP(bp))
4288		fw_speed = bp->link_info.link_speed;
4289	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4290		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4291	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4292		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4293	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4294		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4295	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4296		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4297
4298	req->force_link_speed = cpu_to_le16(fw_speed);
4299	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4300				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4301	rc = hwrm_req_send(bp, req);
4302	req->flags = 0;
4303	req->force_link_speed = cpu_to_le16(0);
4304	return rc;
4305}
4306
4307static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4308{
4309	struct hwrm_port_phy_cfg_input *req;
4310	int rc;
4311
4312	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4313	if (rc)
4314		return rc;
4315
4316	/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4317	hwrm_req_hold(bp, req);
4318
4319	if (enable) {
4320		bnxt_disable_an_for_lpbk(bp, req);
4321		if (ext)
4322			req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4323		else
4324			req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4325	} else {
4326		req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4327	}
4328	req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4329	rc = hwrm_req_send(bp, req);
4330	hwrm_req_drop(bp, req);
4331	return rc;
4332}
4333
4334static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4335			    u32 raw_cons, int pkt_size)
4336{
4337	struct bnxt_napi *bnapi = cpr->bnapi;
4338	struct bnxt_rx_ring_info *rxr;
4339	struct bnxt_sw_rx_bd *rx_buf;
4340	struct rx_cmp *rxcmp;
4341	u16 cp_cons, cons;
4342	u8 *data;
4343	u32 len;
4344	int i;
4345
4346	rxr = bnapi->rx_ring;
4347	cp_cons = RING_CMP(raw_cons);
4348	rxcmp = (struct rx_cmp *)
4349		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4350	cons = rxcmp->rx_cmp_opaque;
4351	rx_buf = &rxr->rx_buf_ring[cons];
4352	data = rx_buf->data_ptr;
4353	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4354	if (len != pkt_size)
4355		return -EIO;
4356	i = ETH_ALEN;
4357	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4358		return -EIO;
4359	i += ETH_ALEN;
4360	for (  ; i < pkt_size; i++) {
4361		if (data[i] != (u8)(i & 0xff))
4362			return -EIO;
4363	}
4364	return 0;
4365}
4366
4367static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4368			      int pkt_size)
4369{
4370	struct tx_cmp *txcmp;
4371	int rc = -EIO;
4372	u32 raw_cons;
4373	u32 cons;
4374	int i;
4375
4376	raw_cons = cpr->cp_raw_cons;
4377	for (i = 0; i < 200; i++) {
4378		cons = RING_CMP(raw_cons);
4379		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4380
4381		if (!TX_CMP_VALID(txcmp, raw_cons)) {
4382			udelay(5);
4383			continue;
4384		}
4385
4386		/* The valid test of the entry must be done first before
4387		 * reading any further.
4388		 */
4389		dma_rmb();
4390		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4391		    TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4392			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4393			raw_cons = NEXT_RAW_CMP(raw_cons);
4394			raw_cons = NEXT_RAW_CMP(raw_cons);
4395			break;
4396		}
4397		raw_cons = NEXT_RAW_CMP(raw_cons);
4398	}
4399	cpr->cp_raw_cons = raw_cons;
4400	return rc;
4401}
4402
4403static int bnxt_run_loopback(struct bnxt *bp)
4404{
4405	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4406	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4407	struct bnxt_cp_ring_info *cpr;
4408	int pkt_size, i = 0;
4409	struct sk_buff *skb;
4410	dma_addr_t map;
4411	u8 *data;
4412	int rc;
4413
4414	cpr = &rxr->bnapi->cp_ring;
4415	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4416		cpr = rxr->rx_cpr;
4417	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4418	skb = netdev_alloc_skb(bp->dev, pkt_size);
4419	if (!skb)
4420		return -ENOMEM;
4421	data = skb_put(skb, pkt_size);
4422	ether_addr_copy(&data[i], bp->dev->dev_addr);
4423	i += ETH_ALEN;
4424	ether_addr_copy(&data[i], bp->dev->dev_addr);
4425	i += ETH_ALEN;
4426	for ( ; i < pkt_size; i++)
4427		data[i] = (u8)(i & 0xff);
4428
4429	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4430			     DMA_TO_DEVICE);
4431	if (dma_mapping_error(&bp->pdev->dev, map)) {
4432		dev_kfree_skb(skb);
4433		return -EIO;
4434	}
4435	bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4436
4437	/* Sync BD data before updating doorbell */
4438	wmb();
4439
4440	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4441	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4442
4443	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4444	dev_kfree_skb(skb);
4445	return rc;
4446}
4447
4448static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4449{
4450	struct hwrm_selftest_exec_output *resp;
4451	struct hwrm_selftest_exec_input *req;
4452	int rc;
4453
4454	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4455	if (rc)
4456		return rc;
4457
4458	hwrm_req_timeout(bp, req, bp->test_info->timeout);
4459	req->flags = test_mask;
4460
4461	resp = hwrm_req_hold(bp, req);
4462	rc = hwrm_req_send(bp, req);
4463	*test_results = resp->test_success;
4464	hwrm_req_drop(bp, req);
4465	return rc;
4466}
4467
4468#define BNXT_DRV_TESTS			4
4469#define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
4470#define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
4471#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
4472#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
4473
4474static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4475			   u64 *buf)
4476{
4477	struct bnxt *bp = netdev_priv(dev);
4478	bool do_ext_lpbk = false;
4479	bool offline = false;
4480	u8 test_results = 0;
4481	u8 test_mask = 0;
4482	int rc = 0, i;
4483
4484	if (!bp->num_tests || !BNXT_PF(bp))
4485		return;
4486	memset(buf, 0, sizeof(u64) * bp->num_tests);
4487	if (!netif_running(dev)) {
4488		etest->flags |= ETH_TEST_FL_FAILED;
4489		return;
4490	}
4491
4492	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4493	    (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4494		do_ext_lpbk = true;
4495
4496	if (etest->flags & ETH_TEST_FL_OFFLINE) {
4497		if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4498			etest->flags |= ETH_TEST_FL_FAILED;
4499			netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4500			return;
4501		}
4502		offline = true;
4503	}
4504
4505	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4506		u8 bit_val = 1 << i;
4507
4508		if (!(bp->test_info->offline_mask & bit_val))
4509			test_mask |= bit_val;
4510		else if (offline)
4511			test_mask |= bit_val;
4512	}
4513	if (!offline) {
4514		bnxt_run_fw_tests(bp, test_mask, &test_results);
4515	} else {
4516		bnxt_ulp_stop(bp);
4517		bnxt_close_nic(bp, true, false);
 
4518		bnxt_run_fw_tests(bp, test_mask, &test_results);
4519
4520		buf[BNXT_MACLPBK_TEST_IDX] = 1;
4521		bnxt_hwrm_mac_loopback(bp, true);
4522		msleep(250);
4523		rc = bnxt_half_open_nic(bp);
4524		if (rc) {
4525			bnxt_hwrm_mac_loopback(bp, false);
4526			etest->flags |= ETH_TEST_FL_FAILED;
4527			bnxt_ulp_start(bp, rc);
4528			return;
4529		}
4530		if (bnxt_run_loopback(bp))
4531			etest->flags |= ETH_TEST_FL_FAILED;
4532		else
4533			buf[BNXT_MACLPBK_TEST_IDX] = 0;
4534
4535		bnxt_hwrm_mac_loopback(bp, false);
4536		bnxt_hwrm_phy_loopback(bp, true, false);
4537		msleep(1000);
4538		if (bnxt_run_loopback(bp)) {
4539			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4540			etest->flags |= ETH_TEST_FL_FAILED;
4541		}
4542		if (do_ext_lpbk) {
4543			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4544			bnxt_hwrm_phy_loopback(bp, true, true);
4545			msleep(1000);
4546			if (bnxt_run_loopback(bp)) {
4547				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4548				etest->flags |= ETH_TEST_FL_FAILED;
4549			}
4550		}
4551		bnxt_hwrm_phy_loopback(bp, false, false);
4552		bnxt_half_close_nic(bp);
4553		rc = bnxt_open_nic(bp, true, true);
4554		bnxt_ulp_start(bp, rc);
4555	}
4556	if (rc || bnxt_test_irq(bp)) {
4557		buf[BNXT_IRQ_TEST_IDX] = 1;
4558		etest->flags |= ETH_TEST_FL_FAILED;
4559	}
4560	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4561		u8 bit_val = 1 << i;
4562
4563		if ((test_mask & bit_val) && !(test_results & bit_val)) {
4564			buf[i] = 1;
4565			etest->flags |= ETH_TEST_FL_FAILED;
4566		}
4567	}
4568}
4569
4570static int bnxt_reset(struct net_device *dev, u32 *flags)
4571{
4572	struct bnxt *bp = netdev_priv(dev);
4573	bool reload = false;
4574	u32 req = *flags;
4575
4576	if (!req)
4577		return -EINVAL;
4578
4579	if (!BNXT_PF(bp)) {
4580		netdev_err(dev, "Reset is not supported from a VF\n");
4581		return -EOPNOTSUPP;
4582	}
4583
4584	if (pci_vfs_assigned(bp->pdev) &&
4585	    !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4586		netdev_err(dev,
4587			   "Reset not allowed when VFs are assigned to VMs\n");
4588		return -EBUSY;
4589	}
4590
4591	if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4592		/* This feature is not supported in older firmware versions */
4593		if (bp->hwrm_spec_code >= 0x10803) {
4594			if (!bnxt_firmware_reset_chip(dev)) {
4595				netdev_info(dev, "Firmware reset request successful.\n");
4596				if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4597					reload = true;
4598				*flags &= ~BNXT_FW_RESET_CHIP;
4599			}
4600		} else if (req == BNXT_FW_RESET_CHIP) {
4601			return -EOPNOTSUPP; /* only request, fail hard */
4602		}
4603	}
4604
4605	if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4606		/* This feature is not supported in older firmware versions */
4607		if (bp->hwrm_spec_code >= 0x10803) {
4608			if (!bnxt_firmware_reset_ap(dev)) {
4609				netdev_info(dev, "Reset application processor successful.\n");
4610				reload = true;
4611				*flags &= ~BNXT_FW_RESET_AP;
4612			}
4613		} else if (req == BNXT_FW_RESET_AP) {
4614			return -EOPNOTSUPP; /* only request, fail hard */
4615		}
4616	}
4617
4618	if (reload)
4619		netdev_info(dev, "Reload driver to complete reset\n");
4620
4621	return 0;
4622}
4623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4624static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
4625{
4626	struct bnxt *bp = netdev_priv(dev);
4627
4628	if (dump->flag > BNXT_DUMP_CRASH) {
4629		netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4630		return -EINVAL;
4631	}
4632
4633	if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
4634		netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4635		return -EOPNOTSUPP;
4636	}
4637
4638	bp->dump_flag = dump->flag;
4639	return 0;
4640}
4641
4642static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
4643{
4644	struct bnxt *bp = netdev_priv(dev);
4645
4646	if (bp->hwrm_spec_code < 0x10801)
4647		return -EOPNOTSUPP;
4648
4649	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
4650			bp->ver_resp.hwrm_fw_min_8b << 16 |
4651			bp->ver_resp.hwrm_fw_bld_8b << 8 |
4652			bp->ver_resp.hwrm_fw_rsvd_8b;
4653
4654	dump->flag = bp->dump_flag;
4655	dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
 
 
 
4656	return 0;
4657}
4658
4659static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
4660			      void *buf)
4661{
4662	struct bnxt *bp = netdev_priv(dev);
4663
4664	if (bp->hwrm_spec_code < 0x10801)
4665		return -EOPNOTSUPP;
4666
4667	memset(buf, 0, dump->len);
4668
4669	dump->flag = bp->dump_flag;
4670	return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
 
 
 
 
 
 
 
 
4671}
4672
4673static int bnxt_get_ts_info(struct net_device *dev,
4674			    struct ethtool_ts_info *info)
4675{
4676	struct bnxt *bp = netdev_priv(dev);
4677	struct bnxt_ptp_cfg *ptp;
4678
4679	ptp = bp->ptp_cfg;
4680	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4681				SOF_TIMESTAMPING_RX_SOFTWARE |
4682				SOF_TIMESTAMPING_SOFTWARE;
4683
4684	info->phc_index = -1;
4685	if (!ptp)
4686		return 0;
4687
4688	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
4689				 SOF_TIMESTAMPING_RX_HARDWARE |
4690				 SOF_TIMESTAMPING_RAW_HARDWARE;
4691	if (ptp->ptp_clock)
4692		info->phc_index = ptp_clock_index(ptp->ptp_clock);
4693
4694	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4695
4696	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
4697			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
4698			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4699
4700	if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
4701		info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
4702	return 0;
4703}
4704
4705void bnxt_ethtool_init(struct bnxt *bp)
4706{
4707	struct hwrm_selftest_qlist_output *resp;
4708	struct hwrm_selftest_qlist_input *req;
4709	struct bnxt_test_info *test_info;
4710	struct net_device *dev = bp->dev;
4711	int i, rc;
4712
4713	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
4714		bnxt_get_pkgver(dev);
4715
4716	bp->num_tests = 0;
4717	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
4718		return;
4719
 
 
 
 
 
 
4720	test_info = bp->test_info;
4721	if (!test_info) {
4722		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
4723		if (!test_info)
4724			return;
4725		bp->test_info = test_info;
4726	}
4727
4728	if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
4729		return;
4730
4731	resp = hwrm_req_hold(bp, req);
4732	rc = hwrm_req_send_silent(bp, req);
4733	if (rc)
4734		goto ethtool_init_exit;
4735
 
4736	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
4737	if (bp->num_tests > BNXT_MAX_TEST)
4738		bp->num_tests = BNXT_MAX_TEST;
4739
4740	test_info->offline_mask = resp->offline_tests;
4741	test_info->timeout = le16_to_cpu(resp->test_timeout);
4742	if (!test_info->timeout)
4743		test_info->timeout = HWRM_CMD_TIMEOUT;
4744	for (i = 0; i < bp->num_tests; i++) {
4745		char *str = test_info->string[i];
4746		char *fw_str = resp->test_name[i];
4747
4748		if (i == BNXT_MACLPBK_TEST_IDX) {
4749			strcpy(str, "Mac loopback test (offline)");
4750		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
4751			strcpy(str, "Phy loopback test (offline)");
4752		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
4753			strcpy(str, "Ext loopback test (offline)");
4754		} else if (i == BNXT_IRQ_TEST_IDX) {
4755			strcpy(str, "Interrupt_test (offline)");
4756		} else {
4757			snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
4758				 fw_str, test_info->offline_mask & (1 << i) ?
4759					"offline" : "online");
 
 
 
 
 
4760		}
4761	}
4762
4763ethtool_init_exit:
4764	hwrm_req_drop(bp, req);
4765}
4766
4767static void bnxt_get_eth_phy_stats(struct net_device *dev,
4768				   struct ethtool_eth_phy_stats *phy_stats)
4769{
4770	struct bnxt *bp = netdev_priv(dev);
4771	u64 *rx;
4772
4773	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4774		return;
4775
4776	rx = bp->rx_port_stats_ext.sw_stats;
4777	phy_stats->SymbolErrorDuringCarrier =
4778		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4779}
4780
4781static void bnxt_get_eth_mac_stats(struct net_device *dev,
4782				   struct ethtool_eth_mac_stats *mac_stats)
4783{
4784	struct bnxt *bp = netdev_priv(dev);
4785	u64 *rx, *tx;
4786
4787	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4788		return;
4789
4790	rx = bp->port_stats.sw_stats;
4791	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4792
4793	mac_stats->FramesReceivedOK =
4794		BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4795	mac_stats->FramesTransmittedOK =
4796		BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4797	mac_stats->FrameCheckSequenceErrors =
4798		BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4799	mac_stats->AlignmentErrors =
4800		BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4801	mac_stats->OutOfRangeLengthField =
4802		BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4803}
4804
4805static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4806				    struct ethtool_eth_ctrl_stats *ctrl_stats)
4807{
4808	struct bnxt *bp = netdev_priv(dev);
4809	u64 *rx;
4810
4811	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4812		return;
4813
4814	rx = bp->port_stats.sw_stats;
4815	ctrl_stats->MACControlFramesReceived =
4816		BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4817}
4818
4819static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4820	{    0,    64 },
4821	{   65,   127 },
4822	{  128,   255 },
4823	{  256,   511 },
4824	{  512,  1023 },
4825	{ 1024,  1518 },
4826	{ 1519,  2047 },
4827	{ 2048,  4095 },
4828	{ 4096,  9216 },
4829	{ 9217, 16383 },
4830	{}
4831};
4832
4833static void bnxt_get_rmon_stats(struct net_device *dev,
4834				struct ethtool_rmon_stats *rmon_stats,
4835				const struct ethtool_rmon_hist_range **ranges)
4836{
4837	struct bnxt *bp = netdev_priv(dev);
4838	u64 *rx, *tx;
4839
4840	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4841		return;
4842
4843	rx = bp->port_stats.sw_stats;
4844	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4845
4846	rmon_stats->jabbers =
4847		BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4848	rmon_stats->oversize_pkts =
4849		BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4850	rmon_stats->undersize_pkts =
4851		BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4852
4853	rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4854	rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4855	rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4856	rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4857	rmon_stats->hist[4] =
4858		BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4859	rmon_stats->hist[5] =
4860		BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4861	rmon_stats->hist[6] =
4862		BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4863	rmon_stats->hist[7] =
4864		BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4865	rmon_stats->hist[8] =
4866		BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4867	rmon_stats->hist[9] =
4868		BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4869
4870	rmon_stats->hist_tx[0] =
4871		BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4872	rmon_stats->hist_tx[1] =
4873		BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4874	rmon_stats->hist_tx[2] =
4875		BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4876	rmon_stats->hist_tx[3] =
4877		BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4878	rmon_stats->hist_tx[4] =
4879		BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4880	rmon_stats->hist_tx[5] =
4881		BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4882	rmon_stats->hist_tx[6] =
4883		BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4884	rmon_stats->hist_tx[7] =
4885		BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4886	rmon_stats->hist_tx[8] =
4887		BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4888	rmon_stats->hist_tx[9] =
4889		BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4890
4891	*ranges = bnxt_rmon_ranges;
4892}
4893
4894static void bnxt_get_link_ext_stats(struct net_device *dev,
4895				    struct ethtool_link_ext_stats *stats)
4896{
4897	struct bnxt *bp = netdev_priv(dev);
4898	u64 *rx;
4899
4900	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4901		return;
4902
4903	rx = bp->rx_port_stats_ext.sw_stats;
4904	stats->link_down_events =
4905		*(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4906}
4907
4908void bnxt_ethtool_free(struct bnxt *bp)
4909{
4910	kfree(bp->test_info);
4911	bp->test_info = NULL;
4912}
4913
4914const struct ethtool_ops bnxt_ethtool_ops = {
4915	.cap_link_lanes_supported	= 1,
4916	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4917				     ETHTOOL_COALESCE_MAX_FRAMES |
4918				     ETHTOOL_COALESCE_USECS_IRQ |
4919				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4920				     ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4921				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4922				     ETHTOOL_COALESCE_USE_CQE,
4923	.get_link_ksettings	= bnxt_get_link_ksettings,
4924	.set_link_ksettings	= bnxt_set_link_ksettings,
4925	.get_fec_stats		= bnxt_get_fec_stats,
4926	.get_fecparam		= bnxt_get_fecparam,
4927	.set_fecparam		= bnxt_set_fecparam,
4928	.get_pause_stats	= bnxt_get_pause_stats,
4929	.get_pauseparam		= bnxt_get_pauseparam,
4930	.set_pauseparam		= bnxt_set_pauseparam,
4931	.get_drvinfo		= bnxt_get_drvinfo,
4932	.get_regs_len		= bnxt_get_regs_len,
4933	.get_regs		= bnxt_get_regs,
4934	.get_wol		= bnxt_get_wol,
4935	.set_wol		= bnxt_set_wol,
4936	.get_coalesce		= bnxt_get_coalesce,
4937	.set_coalesce		= bnxt_set_coalesce,
4938	.get_msglevel		= bnxt_get_msglevel,
4939	.set_msglevel		= bnxt_set_msglevel,
4940	.get_sset_count		= bnxt_get_sset_count,
4941	.get_strings		= bnxt_get_strings,
4942	.get_ethtool_stats	= bnxt_get_ethtool_stats,
4943	.set_ringparam		= bnxt_set_ringparam,
4944	.get_ringparam		= bnxt_get_ringparam,
4945	.get_channels		= bnxt_get_channels,
4946	.set_channels		= bnxt_set_channels,
4947	.get_rxnfc		= bnxt_get_rxnfc,
4948	.set_rxnfc		= bnxt_set_rxnfc,
4949	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4950	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4951	.get_rxfh               = bnxt_get_rxfh,
4952	.set_rxfh		= bnxt_set_rxfh,
4953	.flash_device		= bnxt_flash_device,
4954	.get_eeprom_len         = bnxt_get_eeprom_len,
4955	.get_eeprom             = bnxt_get_eeprom,
4956	.set_eeprom		= bnxt_set_eeprom,
4957	.get_link		= bnxt_get_link,
4958	.get_link_ext_stats	= bnxt_get_link_ext_stats,
4959	.get_eee		= bnxt_get_eee,
4960	.set_eee		= bnxt_set_eee,
4961	.get_module_info	= bnxt_get_module_info,
4962	.get_module_eeprom	= bnxt_get_module_eeprom,
4963	.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4964	.nway_reset		= bnxt_nway_reset,
4965	.set_phys_id		= bnxt_set_phys_id,
4966	.self_test		= bnxt_self_test,
4967	.get_ts_info		= bnxt_get_ts_info,
4968	.reset			= bnxt_reset,
4969	.set_dump		= bnxt_set_dump,
4970	.get_dump_flag		= bnxt_get_dump_flag,
4971	.get_dump_data		= bnxt_get_dump_data,
4972	.get_eth_phy_stats	= bnxt_get_eth_phy_stats,
4973	.get_eth_mac_stats	= bnxt_get_eth_mac_stats,
4974	.get_eth_ctrl_stats	= bnxt_get_eth_ctrl_stats,
4975	.get_rmon_stats		= bnxt_get_rmon_stats,
4976};
v5.14.15
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2017 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
 
  11#include <linux/ctype.h>
  12#include <linux/stringify.h>
  13#include <linux/ethtool.h>
 
  14#include <linux/linkmode.h>
  15#include <linux/interrupt.h>
  16#include <linux/pci.h>
  17#include <linux/etherdevice.h>
  18#include <linux/crc32.h>
  19#include <linux/firmware.h>
  20#include <linux/utsname.h>
  21#include <linux/time.h>
  22#include <linux/ptp_clock_kernel.h>
  23#include <linux/net_tstamp.h>
  24#include <linux/timecounter.h>
 
  25#include "bnxt_hsi.h"
  26#include "bnxt.h"
 
 
  27#include "bnxt_xdp.h"
  28#include "bnxt_ptp.h"
  29#include "bnxt_ethtool.h"
  30#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
  31#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
  32#include "bnxt_coredump.h"
  33#define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)
  34#define FLASH_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200)
  35#define INSTALL_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200)
 
 
 
 
  36
  37static u32 bnxt_get_msglevel(struct net_device *dev)
  38{
  39	struct bnxt *bp = netdev_priv(dev);
  40
  41	return bp->msg_enable;
  42}
  43
  44static void bnxt_set_msglevel(struct net_device *dev, u32 value)
  45{
  46	struct bnxt *bp = netdev_priv(dev);
  47
  48	bp->msg_enable = value;
  49}
  50
  51static int bnxt_get_coalesce(struct net_device *dev,
  52			     struct ethtool_coalesce *coal)
 
 
  53{
  54	struct bnxt *bp = netdev_priv(dev);
  55	struct bnxt_coal *hw_coal;
  56	u16 mult;
  57
  58	memset(coal, 0, sizeof(*coal));
  59
  60	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
  61
  62	hw_coal = &bp->rx_coal;
  63	mult = hw_coal->bufs_per_record;
  64	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
  65	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  66	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  67	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
 
 
 
  68
  69	hw_coal = &bp->tx_coal;
  70	mult = hw_coal->bufs_per_record;
  71	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
  72	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  73	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  74	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
 
 
 
  75
  76	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
  77
  78	return 0;
  79}
  80
  81static int bnxt_set_coalesce(struct net_device *dev,
  82			     struct ethtool_coalesce *coal)
 
 
  83{
  84	struct bnxt *bp = netdev_priv(dev);
  85	bool update_stats = false;
  86	struct bnxt_coal *hw_coal;
  87	int rc = 0;
  88	u16 mult;
  89
  90	if (coal->use_adaptive_rx_coalesce) {
  91		bp->flags |= BNXT_FLAG_DIM;
  92	} else {
  93		if (bp->flags & BNXT_FLAG_DIM) {
  94			bp->flags &= ~(BNXT_FLAG_DIM);
  95			goto reset_coalesce;
  96		}
  97	}
  98
 
 
 
 
 
  99	hw_coal = &bp->rx_coal;
 100	mult = hw_coal->bufs_per_record;
 101	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
 102	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
 103	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
 104	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
 
 
 
 
 
 105
 106	hw_coal = &bp->tx_coal;
 107	mult = hw_coal->bufs_per_record;
 108	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
 109	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
 110	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
 111	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
 
 
 
 
 
 112
 113	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
 114		u32 stats_ticks = coal->stats_block_coalesce_usecs;
 115
 116		/* Allow 0, which means disable. */
 117		if (stats_ticks)
 118			stats_ticks = clamp_t(u32, stats_ticks,
 119					      BNXT_MIN_STATS_COAL_TICKS,
 120					      BNXT_MAX_STATS_COAL_TICKS);
 121		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
 122		bp->stats_coal_ticks = stats_ticks;
 123		if (bp->stats_coal_ticks)
 124			bp->current_interval =
 125				bp->stats_coal_ticks * HZ / 1000000;
 126		else
 127			bp->current_interval = BNXT_TIMER_INTERVAL;
 128		update_stats = true;
 129	}
 130
 131reset_coalesce:
 132	if (netif_running(dev)) {
 133		if (update_stats) {
 134			rc = bnxt_close_nic(bp, true, false);
 135			if (!rc)
 136				rc = bnxt_open_nic(bp, true, false);
 137		} else {
 138			rc = bnxt_hwrm_set_coal(bp);
 139		}
 140	}
 141
 142	return rc;
 143}
 144
 145static const char * const bnxt_ring_rx_stats_str[] = {
 146	"rx_ucast_packets",
 147	"rx_mcast_packets",
 148	"rx_bcast_packets",
 149	"rx_discards",
 150	"rx_errors",
 151	"rx_ucast_bytes",
 152	"rx_mcast_bytes",
 153	"rx_bcast_bytes",
 154};
 155
 156static const char * const bnxt_ring_tx_stats_str[] = {
 157	"tx_ucast_packets",
 158	"tx_mcast_packets",
 159	"tx_bcast_packets",
 160	"tx_errors",
 161	"tx_discards",
 162	"tx_ucast_bytes",
 163	"tx_mcast_bytes",
 164	"tx_bcast_bytes",
 165};
 166
 167static const char * const bnxt_ring_tpa_stats_str[] = {
 168	"tpa_packets",
 169	"tpa_bytes",
 170	"tpa_events",
 171	"tpa_aborts",
 172};
 173
 174static const char * const bnxt_ring_tpa2_stats_str[] = {
 175	"rx_tpa_eligible_pkt",
 176	"rx_tpa_eligible_bytes",
 177	"rx_tpa_pkt",
 178	"rx_tpa_bytes",
 179	"rx_tpa_errors",
 180	"rx_tpa_events",
 181};
 182
 183static const char * const bnxt_rx_sw_stats_str[] = {
 184	"rx_l4_csum_errors",
 185	"rx_resets",
 186	"rx_buf_errors",
 187};
 188
 189static const char * const bnxt_cmn_sw_stats_str[] = {
 190	"missed_irqs",
 191};
 192
 193#define BNXT_RX_STATS_ENTRY(counter)	\
 194	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
 195
 196#define BNXT_TX_STATS_ENTRY(counter)	\
 197	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
 198
 199#define BNXT_RX_STATS_EXT_ENTRY(counter)	\
 200	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 201
 202#define BNXT_TX_STATS_EXT_ENTRY(counter)	\
 203	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 204
 205#define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
 206	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
 207	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
 208
 209#define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
 210	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
 211	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
 212
 213#define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
 214	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
 215	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
 216	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
 217	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
 218	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
 219	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
 220	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
 221	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
 222
 223#define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
 224	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
 225	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
 226	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
 227	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
 228	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
 229	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
 230	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
 231	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
 232
 233#define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
 234	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
 235	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
 236
 237#define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
 238	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
 239	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
 240
 241#define BNXT_RX_STATS_EXT_COS_ENTRIES				\
 242	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
 243	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
 244	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
 245	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
 246	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
 247	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
 248	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
 249	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
 250
 251#define BNXT_TX_STATS_EXT_COS_ENTRIES				\
 252	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
 253	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
 254	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
 255	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
 256	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
 257	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
 258	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
 259	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
 260
 261#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
 262	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
 263	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
 264
 265#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
 266	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
 267	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
 268	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
 269	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
 270	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
 271	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
 272	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
 273	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
 274
 275#define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
 276	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
 277	  __stringify(counter##_pri##n) }
 278
 279#define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
 280	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
 281	  __stringify(counter##_pri##n) }
 282
 283#define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
 284	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
 285	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
 286	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
 287	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
 288	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
 289	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
 290	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
 291	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
 292
 293#define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
 294	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
 295	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
 296	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
 297	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
 298	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
 299	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
 300	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
 301	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
 302
 303enum {
 304	RX_TOTAL_DISCARDS,
 305	TX_TOTAL_DISCARDS,
 
 306};
 307
 308static struct {
 309	u64			counter;
 310	char			string[ETH_GSTRING_LEN];
 311} bnxt_sw_func_stats[] = {
 312	{0, "rx_total_discard_pkts"},
 313	{0, "tx_total_discard_pkts"},
 
 
 
 
 314};
 315
 316#define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
 317#define NUM_RING_CMN_SW_STATS		ARRAY_SIZE(bnxt_cmn_sw_stats_str)
 318#define NUM_RING_RX_HW_STATS		ARRAY_SIZE(bnxt_ring_rx_stats_str)
 319#define NUM_RING_TX_HW_STATS		ARRAY_SIZE(bnxt_ring_tx_stats_str)
 320
 321static const struct {
 322	long offset;
 323	char string[ETH_GSTRING_LEN];
 324} bnxt_port_stats_arr[] = {
 325	BNXT_RX_STATS_ENTRY(rx_64b_frames),
 326	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
 327	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
 328	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
 329	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
 330	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
 331	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
 332	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
 333	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
 334	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
 335	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
 336	BNXT_RX_STATS_ENTRY(rx_total_frames),
 337	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
 338	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
 339	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
 340	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
 341	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
 342	BNXT_RX_STATS_ENTRY(rx_pause_frames),
 343	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
 344	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
 345	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
 346	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
 347	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
 348	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
 349	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
 350	BNXT_RX_STATS_ENTRY(rx_good_frames),
 351	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
 352	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
 353	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
 354	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
 355	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
 356	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
 357	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
 358	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
 359	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
 360	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
 361	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
 362	BNXT_RX_STATS_ENTRY(rx_bytes),
 363	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
 364	BNXT_RX_STATS_ENTRY(rx_runt_frames),
 365	BNXT_RX_STATS_ENTRY(rx_stat_discard),
 366	BNXT_RX_STATS_ENTRY(rx_stat_err),
 367
 368	BNXT_TX_STATS_ENTRY(tx_64b_frames),
 369	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
 370	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
 371	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
 372	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
 373	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
 374	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
 375	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
 376	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
 377	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
 378	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
 379	BNXT_TX_STATS_ENTRY(tx_good_frames),
 380	BNXT_TX_STATS_ENTRY(tx_total_frames),
 381	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
 382	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
 383	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
 384	BNXT_TX_STATS_ENTRY(tx_pause_frames),
 385	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
 386	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
 387	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
 388	BNXT_TX_STATS_ENTRY(tx_err),
 389	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
 390	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
 391	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
 392	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
 393	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
 394	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
 395	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
 396	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
 397	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
 398	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
 399	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
 400	BNXT_TX_STATS_ENTRY(tx_total_collisions),
 401	BNXT_TX_STATS_ENTRY(tx_bytes),
 402	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
 403	BNXT_TX_STATS_ENTRY(tx_stat_discard),
 404	BNXT_TX_STATS_ENTRY(tx_stat_error),
 405};
 406
 407static const struct {
 408	long offset;
 409	char string[ETH_GSTRING_LEN];
 410} bnxt_port_stats_ext_arr[] = {
 411	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
 412	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
 413	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
 414	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
 415	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
 416	BNXT_RX_STATS_EXT_COS_ENTRIES,
 417	BNXT_RX_STATS_EXT_PFC_ENTRIES,
 418	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
 419	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
 420	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
 421	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
 422	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
 
 
 
 423};
 424
 425static const struct {
 426	long offset;
 427	char string[ETH_GSTRING_LEN];
 428} bnxt_tx_port_stats_ext_arr[] = {
 429	BNXT_TX_STATS_EXT_COS_ENTRIES,
 430	BNXT_TX_STATS_EXT_PFC_ENTRIES,
 431};
 432
 433static const struct {
 434	long base_off;
 435	char string[ETH_GSTRING_LEN];
 436} bnxt_rx_bytes_pri_arr[] = {
 437	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
 438};
 439
 440static const struct {
 441	long base_off;
 442	char string[ETH_GSTRING_LEN];
 443} bnxt_rx_pkts_pri_arr[] = {
 444	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
 445};
 446
 447static const struct {
 448	long base_off;
 449	char string[ETH_GSTRING_LEN];
 450} bnxt_tx_bytes_pri_arr[] = {
 451	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
 452};
 453
 454static const struct {
 455	long base_off;
 456	char string[ETH_GSTRING_LEN];
 457} bnxt_tx_pkts_pri_arr[] = {
 458	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
 459};
 460
 461#define BNXT_NUM_SW_FUNC_STATS	ARRAY_SIZE(bnxt_sw_func_stats)
 462#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
 463#define BNXT_NUM_STATS_PRI			\
 464	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
 465	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
 466	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
 467	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
 468
 469static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
 470{
 471	if (BNXT_SUPPORTS_TPA(bp)) {
 472		if (bp->max_tpa_v2) {
 473			if (BNXT_CHIP_P5_THOR(bp))
 474				return BNXT_NUM_TPA_RING_STATS_P5;
 475			return BNXT_NUM_TPA_RING_STATS_P5_SR2;
 476		}
 477		return BNXT_NUM_TPA_RING_STATS;
 478	}
 479	return 0;
 480}
 481
 482static int bnxt_get_num_ring_stats(struct bnxt *bp)
 483{
 484	int rx, tx, cmn;
 485
 486	rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
 487	     bnxt_get_num_tpa_ring_stats(bp);
 488	tx = NUM_RING_TX_HW_STATS;
 489	cmn = NUM_RING_CMN_SW_STATS;
 490	return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
 
 491	       cmn * bp->cp_nr_rings;
 492}
 493
 494static int bnxt_get_num_stats(struct bnxt *bp)
 495{
 496	int num_stats = bnxt_get_num_ring_stats(bp);
 
 497
 498	num_stats += BNXT_NUM_SW_FUNC_STATS;
 499
 500	if (bp->flags & BNXT_FLAG_PORT_STATS)
 501		num_stats += BNXT_NUM_PORT_STATS;
 502
 503	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 504		num_stats += bp->fw_rx_stats_ext_size +
 505			     bp->fw_tx_stats_ext_size;
 
 
 
 
 506		if (bp->pri2cos_valid)
 507			num_stats += BNXT_NUM_STATS_PRI;
 508	}
 509
 510	return num_stats;
 511}
 512
 513static int bnxt_get_sset_count(struct net_device *dev, int sset)
 514{
 515	struct bnxt *bp = netdev_priv(dev);
 516
 517	switch (sset) {
 518	case ETH_SS_STATS:
 519		return bnxt_get_num_stats(bp);
 520	case ETH_SS_TEST:
 521		if (!bp->num_tests)
 522			return -EOPNOTSUPP;
 523		return bp->num_tests;
 524	default:
 525		return -EOPNOTSUPP;
 526	}
 527}
 528
 529static bool is_rx_ring(struct bnxt *bp, int ring_num)
 530{
 531	return ring_num < bp->rx_nr_rings;
 532}
 533
 534static bool is_tx_ring(struct bnxt *bp, int ring_num)
 535{
 536	int tx_base = 0;
 537
 538	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
 539		tx_base = bp->rx_nr_rings;
 540
 541	if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
 542		return true;
 543	return false;
 544}
 545
 546static void bnxt_get_ethtool_stats(struct net_device *dev,
 547				   struct ethtool_stats *stats, u64 *buf)
 548{
 549	u32 i, j = 0;
 550	struct bnxt *bp = netdev_priv(dev);
 
 551	u32 tpa_stats;
 
 552
 553	if (!bp->bnapi) {
 554		j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
 555		goto skip_ring_stats;
 556	}
 557
 558	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
 559		bnxt_sw_func_stats[i].counter = 0;
 560
 561	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
 562	for (i = 0; i < bp->cp_nr_rings; i++) {
 563		struct bnxt_napi *bnapi = bp->bnapi[i];
 564		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 565		u64 *sw_stats = cpr->stats.sw_stats;
 566		u64 *sw;
 567		int k;
 568
 569		if (is_rx_ring(bp, i)) {
 570			for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
 571				buf[j] = sw_stats[k];
 572		}
 573		if (is_tx_ring(bp, i)) {
 574			k = NUM_RING_RX_HW_STATS;
 575			for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 576			       j++, k++)
 577				buf[j] = sw_stats[k];
 578		}
 579		if (!tpa_stats || !is_rx_ring(bp, i))
 580			goto skip_tpa_ring_stats;
 581
 582		k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 583		for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
 584			   tpa_stats; j++, k++)
 585			buf[j] = sw_stats[k];
 586
 587skip_tpa_ring_stats:
 588		sw = (u64 *)&cpr->sw_stats.rx;
 589		if (is_rx_ring(bp, i)) {
 590			for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
 591				buf[j] = sw[k];
 592		}
 593
 594		sw = (u64 *)&cpr->sw_stats.cmn;
 595		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
 596			buf[j] = sw[k];
 597
 598		bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
 599			BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
 600		bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
 601			BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
 602	}
 603
 604	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
 605		buf[j] = bnxt_sw_func_stats[i].counter;
 606
 607skip_ring_stats:
 
 
 
 
 
 608	if (bp->flags & BNXT_FLAG_PORT_STATS) {
 609		u64 *port_stats = bp->port_stats.sw_stats;
 610
 611		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
 612			buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
 613	}
 614	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 615		u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
 616		u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
 
 617
 618		for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
 
 
 619			buf[j] = *(rx_port_stats_ext +
 620				   bnxt_port_stats_ext_arr[i].offset);
 621		}
 622		for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
 
 
 623			buf[j] = *(tx_port_stats_ext +
 624				   bnxt_tx_port_stats_ext_arr[i].offset);
 625		}
 626		if (bp->pri2cos_valid) {
 627			for (i = 0; i < 8; i++, j++) {
 628				long n = bnxt_rx_bytes_pri_arr[i].base_off +
 629					 bp->pri2cos_idx[i];
 630
 631				buf[j] = *(rx_port_stats_ext + n);
 632			}
 633			for (i = 0; i < 8; i++, j++) {
 634				long n = bnxt_rx_pkts_pri_arr[i].base_off +
 635					 bp->pri2cos_idx[i];
 636
 637				buf[j] = *(rx_port_stats_ext + n);
 638			}
 639			for (i = 0; i < 8; i++, j++) {
 640				long n = bnxt_tx_bytes_pri_arr[i].base_off +
 641					 bp->pri2cos_idx[i];
 642
 643				buf[j] = *(tx_port_stats_ext + n);
 644			}
 645			for (i = 0; i < 8; i++, j++) {
 646				long n = bnxt_tx_pkts_pri_arr[i].base_off +
 647					 bp->pri2cos_idx[i];
 648
 649				buf[j] = *(tx_port_stats_ext + n);
 650			}
 651		}
 652	}
 653}
 654
 655static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 656{
 657	struct bnxt *bp = netdev_priv(dev);
 658	static const char * const *str;
 659	u32 i, j, num_str;
 660
 661	switch (stringset) {
 662	case ETH_SS_STATS:
 663		for (i = 0; i < bp->cp_nr_rings; i++) {
 664			if (is_rx_ring(bp, i)) {
 665				num_str = NUM_RING_RX_HW_STATS;
 666				for (j = 0; j < num_str; j++) {
 667					sprintf(buf, "[%d]: %s", i,
 668						bnxt_ring_rx_stats_str[j]);
 669					buf += ETH_GSTRING_LEN;
 670				}
 671			}
 672			if (is_tx_ring(bp, i)) {
 673				num_str = NUM_RING_TX_HW_STATS;
 674				for (j = 0; j < num_str; j++) {
 675					sprintf(buf, "[%d]: %s", i,
 676						bnxt_ring_tx_stats_str[j]);
 677					buf += ETH_GSTRING_LEN;
 678				}
 679			}
 680			num_str = bnxt_get_num_tpa_ring_stats(bp);
 681			if (!num_str || !is_rx_ring(bp, i))
 682				goto skip_tpa_stats;
 683
 684			if (bp->max_tpa_v2)
 685				str = bnxt_ring_tpa2_stats_str;
 686			else
 687				str = bnxt_ring_tpa_stats_str;
 688
 689			for (j = 0; j < num_str; j++) {
 690				sprintf(buf, "[%d]: %s", i, str[j]);
 691				buf += ETH_GSTRING_LEN;
 692			}
 693skip_tpa_stats:
 694			if (is_rx_ring(bp, i)) {
 695				num_str = NUM_RING_RX_SW_STATS;
 696				for (j = 0; j < num_str; j++) {
 697					sprintf(buf, "[%d]: %s", i,
 698						bnxt_rx_sw_stats_str[j]);
 699					buf += ETH_GSTRING_LEN;
 700				}
 701			}
 702			num_str = NUM_RING_CMN_SW_STATS;
 703			for (j = 0; j < num_str; j++) {
 704				sprintf(buf, "[%d]: %s", i,
 705					bnxt_cmn_sw_stats_str[j]);
 706				buf += ETH_GSTRING_LEN;
 707			}
 708		}
 709		for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
 710			strcpy(buf, bnxt_sw_func_stats[i].string);
 711			buf += ETH_GSTRING_LEN;
 712		}
 713
 714		if (bp->flags & BNXT_FLAG_PORT_STATS) {
 715			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
 716				strcpy(buf, bnxt_port_stats_arr[i].string);
 717				buf += ETH_GSTRING_LEN;
 718			}
 719		}
 720		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 721			for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
 
 
 
 
 722				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
 723				buf += ETH_GSTRING_LEN;
 724			}
 725			for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
 
 
 726				strcpy(buf,
 727				       bnxt_tx_port_stats_ext_arr[i].string);
 728				buf += ETH_GSTRING_LEN;
 729			}
 730			if (bp->pri2cos_valid) {
 731				for (i = 0; i < 8; i++) {
 732					strcpy(buf,
 733					       bnxt_rx_bytes_pri_arr[i].string);
 734					buf += ETH_GSTRING_LEN;
 735				}
 736				for (i = 0; i < 8; i++) {
 737					strcpy(buf,
 738					       bnxt_rx_pkts_pri_arr[i].string);
 739					buf += ETH_GSTRING_LEN;
 740				}
 741				for (i = 0; i < 8; i++) {
 742					strcpy(buf,
 743					       bnxt_tx_bytes_pri_arr[i].string);
 744					buf += ETH_GSTRING_LEN;
 745				}
 746				for (i = 0; i < 8; i++) {
 747					strcpy(buf,
 748					       bnxt_tx_pkts_pri_arr[i].string);
 749					buf += ETH_GSTRING_LEN;
 750				}
 751			}
 752		}
 753		break;
 754	case ETH_SS_TEST:
 755		if (bp->num_tests)
 756			memcpy(buf, bp->test_info->string,
 757			       bp->num_tests * ETH_GSTRING_LEN);
 758		break;
 759	default:
 760		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
 761			   stringset);
 762		break;
 763	}
 764}
 765
 766static void bnxt_get_ringparam(struct net_device *dev,
 767			       struct ethtool_ringparam *ering)
 
 
 768{
 769	struct bnxt *bp = netdev_priv(dev);
 770
 771	ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
 772	ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
 
 
 
 
 
 
 
 773	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
 774
 775	ering->rx_pending = bp->rx_ring_size;
 776	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
 777	ering->tx_pending = bp->tx_ring_size;
 778}
 779
 780static int bnxt_set_ringparam(struct net_device *dev,
 781			      struct ethtool_ringparam *ering)
 
 
 782{
 783	struct bnxt *bp = netdev_priv(dev);
 784
 785	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
 786	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
 787	    (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
 788		return -EINVAL;
 789
 790	if (netif_running(dev))
 791		bnxt_close_nic(bp, false, false);
 792
 793	bp->rx_ring_size = ering->rx_pending;
 794	bp->tx_ring_size = ering->tx_pending;
 795	bnxt_set_ring_params(bp);
 796
 797	if (netif_running(dev))
 798		return bnxt_open_nic(bp, false, false);
 799
 800	return 0;
 801}
 802
 803static void bnxt_get_channels(struct net_device *dev,
 804			      struct ethtool_channels *channel)
 805{
 806	struct bnxt *bp = netdev_priv(dev);
 807	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 808	int max_rx_rings, max_tx_rings, tcs;
 809	int max_tx_sch_inputs, tx_grps;
 810
 811	/* Get the most up-to-date max_tx_sch_inputs. */
 812	if (netif_running(dev) && BNXT_NEW_RM(bp))
 813		bnxt_hwrm_func_resc_qcaps(bp, false);
 814	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 815
 816	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 817	if (max_tx_sch_inputs)
 818		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 819
 820	tcs = netdev_get_num_tc(dev);
 821	tx_grps = max(tcs, 1);
 822	if (bp->tx_nr_rings_xdp)
 823		tx_grps++;
 824	max_tx_rings /= tx_grps;
 825	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 826
 827	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
 828		max_rx_rings = 0;
 829		max_tx_rings = 0;
 830	}
 831	if (max_tx_sch_inputs)
 832		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 833
 834	if (tcs > 1)
 835		max_tx_rings /= tcs;
 836
 837	channel->max_rx = max_rx_rings;
 838	channel->max_tx = max_tx_rings;
 839	channel->max_other = 0;
 840	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
 841		channel->combined_count = bp->rx_nr_rings;
 842		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 843			channel->combined_count--;
 844	} else {
 845		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 846			channel->rx_count = bp->rx_nr_rings;
 847			channel->tx_count = bp->tx_nr_rings_per_tc;
 848		}
 849	}
 850}
 851
 852static int bnxt_set_channels(struct net_device *dev,
 853			     struct ethtool_channels *channel)
 854{
 855	struct bnxt *bp = netdev_priv(dev);
 856	int req_tx_rings, req_rx_rings, tcs;
 857	bool sh = false;
 858	int tx_xdp = 0;
 859	int rc = 0;
 
 860
 861	if (channel->other_count)
 862		return -EINVAL;
 863
 864	if (!channel->combined_count &&
 865	    (!channel->rx_count || !channel->tx_count))
 866		return -EINVAL;
 867
 868	if (channel->combined_count &&
 869	    (channel->rx_count || channel->tx_count))
 870		return -EINVAL;
 871
 872	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
 873					    channel->tx_count))
 874		return -EINVAL;
 875
 876	if (channel->combined_count)
 877		sh = true;
 878
 879	tcs = netdev_get_num_tc(dev);
 880
 881	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
 882	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
 883	if (bp->tx_nr_rings_xdp) {
 884		if (!sh) {
 885			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
 886			return -EINVAL;
 887		}
 888		tx_xdp = req_rx_rings;
 889	}
 890	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
 891	if (rc) {
 892		netdev_warn(dev, "Unable to allocate the requested rings\n");
 893		return rc;
 894	}
 895
 896	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
 897	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
 898	    (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
 899		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
 900		return -EINVAL;
 901	}
 902
 903	if (netif_running(dev)) {
 904		if (BNXT_PF(bp)) {
 905			/* TODO CHIMP_FW: Send message to all VF's
 906			 * before PF unload
 907			 */
 908		}
 909		rc = bnxt_close_nic(bp, true, false);
 910		if (rc) {
 911			netdev_err(bp->dev, "Set channel failure rc :%x\n",
 912				   rc);
 913			return rc;
 914		}
 915	}
 916
 917	if (sh) {
 918		bp->flags |= BNXT_FLAG_SHARED_RINGS;
 919		bp->rx_nr_rings = channel->combined_count;
 920		bp->tx_nr_rings_per_tc = channel->combined_count;
 921	} else {
 922		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
 923		bp->rx_nr_rings = channel->rx_count;
 924		bp->tx_nr_rings_per_tc = channel->tx_count;
 925	}
 926	bp->tx_nr_rings_xdp = tx_xdp;
 927	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
 928	if (tcs > 1)
 929		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 930
 931	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
 932			       bp->tx_nr_rings + bp->rx_nr_rings;
 
 933
 934	/* After changing number of rx channels, update NTUPLE feature. */
 935	netdev_update_features(dev);
 936	if (netif_running(dev)) {
 937		rc = bnxt_open_nic(bp, true, false);
 938		if ((!rc) && BNXT_PF(bp)) {
 939			/* TODO CHIMP_FW: Send message to all VF's
 940			 * to renable
 941			 */
 942		}
 943	} else {
 944		rc = bnxt_reserve_rings(bp, true);
 945	}
 946
 947	return rc;
 948}
 949
 950#ifdef CONFIG_RFS_ACCEL
 951static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
 952			    u32 *rule_locs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953{
 954	int i, j = 0;
 955
 956	cmd->data = bp->ntp_fltr_count;
 957	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
 958		struct hlist_head *head;
 959		struct bnxt_ntuple_filter *fltr;
 960
 961		head = &bp->ntp_fltr_hash_tbl[i];
 962		rcu_read_lock();
 963		hlist_for_each_entry_rcu(fltr, head, hash) {
 964			if (j == cmd->rule_cnt)
 965				break;
 966			rule_locs[j++] = fltr->sw_id;
 967		}
 968		rcu_read_unlock();
 969		if (j == cmd->rule_cnt)
 970			break;
 971	}
 972	cmd->rule_cnt = j;
 
 
 
 
 
 
 
 
 
 
 
 
 973	return 0;
 974}
 975
 976static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
 977{
 978	struct ethtool_rx_flow_spec *fs =
 979		(struct ethtool_rx_flow_spec *)&cmd->fs;
 
 980	struct bnxt_ntuple_filter *fltr;
 981	struct flow_keys *fkeys;
 982	int i, rc = -EINVAL;
 983
 984	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
 985		return rc;
 986
 987	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
 988		struct hlist_head *head;
 989
 990		head = &bp->ntp_fltr_hash_tbl[i];
 991		rcu_read_lock();
 992		hlist_for_each_entry_rcu(fltr, head, hash) {
 993			if (fltr->sw_id == fs->location)
 994				goto fltr_found;
 995		}
 996		rcu_read_unlock();
 
 997	}
 998	return rc;
 999
1000fltr_found:
1001	fkeys = &fltr->fkeys;
1002	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1003		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1004			fs->flow_type = TCP_V4_FLOW;
1005		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1006			fs->flow_type = UDP_V4_FLOW;
1007		else
1008			goto fltr_err;
1009
1010		fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1011		fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1012
1013		fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1014		fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1015
1016		fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1017		fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1018
1019		fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1020		fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
 
 
 
 
 
1021	} else {
1022		int i;
1023
1024		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1025			fs->flow_type = TCP_V6_FLOW;
1026		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1027			fs->flow_type = UDP_V6_FLOW;
1028		else
1029			goto fltr_err;
1030
1031		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1032			fkeys->addrs.v6addrs.src;
1033		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1034			fkeys->addrs.v6addrs.dst;
1035		for (i = 0; i < 4; i++) {
1036			fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1037			fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
 
 
 
 
 
 
 
 
 
 
1038		}
1039		fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1040		fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1041
1042		fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1043		fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1044	}
1045
1046	fs->ring_cookie = fltr->rxq;
1047	rc = 0;
1048
1049fltr_err:
1050	rcu_read_unlock();
1051
1052	return rc;
1053}
1054#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1055
1056static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1057{
1058	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1059		return RXH_IP_SRC | RXH_IP_DST;
1060	return 0;
1061}
1062
1063static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1064{
1065	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1066		return RXH_IP_SRC | RXH_IP_DST;
1067	return 0;
1068}
1069
1070static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1071{
1072	cmd->data = 0;
1073	switch (cmd->flow_type) {
1074	case TCP_V4_FLOW:
1075		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1076			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1077				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1078		cmd->data |= get_ethtool_ipv4_rss(bp);
1079		break;
1080	case UDP_V4_FLOW:
1081		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1082			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1083				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1084		fallthrough;
1085	case SCTP_V4_FLOW:
1086	case AH_ESP_V4_FLOW:
1087	case AH_V4_FLOW:
1088	case ESP_V4_FLOW:
1089	case IPV4_FLOW:
1090		cmd->data |= get_ethtool_ipv4_rss(bp);
1091		break;
1092
1093	case TCP_V6_FLOW:
1094		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1095			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1096				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1097		cmd->data |= get_ethtool_ipv6_rss(bp);
1098		break;
1099	case UDP_V6_FLOW:
1100		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1101			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1102				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1103		fallthrough;
1104	case SCTP_V6_FLOW:
1105	case AH_ESP_V6_FLOW:
1106	case AH_V6_FLOW:
1107	case ESP_V6_FLOW:
1108	case IPV6_FLOW:
1109		cmd->data |= get_ethtool_ipv6_rss(bp);
1110		break;
1111	}
1112	return 0;
1113}
1114
1115#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1116#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1117
1118static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1119{
1120	u32 rss_hash_cfg = bp->rss_hash_cfg;
1121	int tuple, rc = 0;
1122
1123	if (cmd->data == RXH_4TUPLE)
1124		tuple = 4;
1125	else if (cmd->data == RXH_2TUPLE)
1126		tuple = 2;
1127	else if (!cmd->data)
1128		tuple = 0;
1129	else
1130		return -EINVAL;
1131
1132	if (cmd->flow_type == TCP_V4_FLOW) {
1133		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1134		if (tuple == 4)
1135			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1136	} else if (cmd->flow_type == UDP_V4_FLOW) {
1137		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1138			return -EINVAL;
1139		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1140		if (tuple == 4)
1141			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1142	} else if (cmd->flow_type == TCP_V6_FLOW) {
1143		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1144		if (tuple == 4)
1145			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1146	} else if (cmd->flow_type == UDP_V6_FLOW) {
1147		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1148			return -EINVAL;
1149		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1150		if (tuple == 4)
1151			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1152	} else if (tuple == 4) {
1153		return -EINVAL;
1154	}
1155
1156	switch (cmd->flow_type) {
1157	case TCP_V4_FLOW:
1158	case UDP_V4_FLOW:
1159	case SCTP_V4_FLOW:
1160	case AH_ESP_V4_FLOW:
1161	case AH_V4_FLOW:
1162	case ESP_V4_FLOW:
1163	case IPV4_FLOW:
1164		if (tuple == 2)
1165			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1166		else if (!tuple)
1167			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1168		break;
1169
1170	case TCP_V6_FLOW:
1171	case UDP_V6_FLOW:
1172	case SCTP_V6_FLOW:
1173	case AH_ESP_V6_FLOW:
1174	case AH_V6_FLOW:
1175	case ESP_V6_FLOW:
1176	case IPV6_FLOW:
1177		if (tuple == 2)
1178			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1179		else if (!tuple)
1180			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1181		break;
1182	}
1183
1184	if (bp->rss_hash_cfg == rss_hash_cfg)
1185		return 0;
1186
 
 
1187	bp->rss_hash_cfg = rss_hash_cfg;
1188	if (netif_running(bp->dev)) {
1189		bnxt_close_nic(bp, false, false);
1190		rc = bnxt_open_nic(bp, false, false);
1191	}
1192	return rc;
1193}
1194
1195static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1196			  u32 *rule_locs)
1197{
1198	struct bnxt *bp = netdev_priv(dev);
1199	int rc = 0;
1200
1201	switch (cmd->cmd) {
1202#ifdef CONFIG_RFS_ACCEL
1203	case ETHTOOL_GRXRINGS:
1204		cmd->data = bp->rx_nr_rings;
1205		break;
1206
1207	case ETHTOOL_GRXCLSRLCNT:
1208		cmd->rule_cnt = bp->ntp_fltr_count;
1209		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1210		break;
1211
1212	case ETHTOOL_GRXCLSRLALL:
1213		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1214		break;
1215
1216	case ETHTOOL_GRXCLSRULE:
1217		rc = bnxt_grxclsrule(bp, cmd);
1218		break;
1219#endif
1220
1221	case ETHTOOL_GRXFH:
1222		rc = bnxt_grxfh(bp, cmd);
1223		break;
1224
1225	default:
1226		rc = -EOPNOTSUPP;
1227		break;
1228	}
1229
1230	return rc;
1231}
1232
1233static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1234{
1235	struct bnxt *bp = netdev_priv(dev);
1236	int rc;
1237
1238	switch (cmd->cmd) {
1239	case ETHTOOL_SRXFH:
1240		rc = bnxt_srxfh(bp, cmd);
1241		break;
1242
 
 
 
 
 
 
 
 
1243	default:
1244		rc = -EOPNOTSUPP;
1245		break;
1246	}
1247	return rc;
1248}
1249
1250u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1251{
1252	struct bnxt *bp = netdev_priv(dev);
1253
1254	if (bp->flags & BNXT_FLAG_CHIP_P5)
1255		return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
 
1256	return HW_HASH_INDEX_SIZE;
1257}
1258
1259static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1260{
1261	return HW_HASH_KEY_SIZE;
1262}
1263
1264static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1265			 u8 *hfunc)
1266{
1267	struct bnxt *bp = netdev_priv(dev);
1268	struct bnxt_vnic_info *vnic;
1269	u32 i, tbl_size;
1270
1271	if (hfunc)
1272		*hfunc = ETH_RSS_HASH_TOP;
1273
1274	if (!bp->vnic_info)
1275		return 0;
1276
1277	vnic = &bp->vnic_info[0];
1278	if (indir && bp->rss_indir_tbl) {
1279		tbl_size = bnxt_get_rxfh_indir_size(dev);
1280		for (i = 0; i < tbl_size; i++)
1281			indir[i] = bp->rss_indir_tbl[i];
1282	}
1283
1284	if (key && vnic->rss_hash_key)
1285		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1286
1287	return 0;
1288}
1289
1290static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1291			 const u8 *key, const u8 hfunc)
 
1292{
1293	struct bnxt *bp = netdev_priv(dev);
1294	int rc = 0;
1295
1296	if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1297		return -EOPNOTSUPP;
1298
1299	if (key)
1300		return -EOPNOTSUPP;
1301
1302	if (indir) {
1303		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1304
1305		for (i = 0; i < tbl_size; i++)
1306			bp->rss_indir_tbl[i] = indir[i];
1307		pad = bp->rss_indir_tbl_entries - tbl_size;
1308		if (pad)
1309			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1310	}
1311
1312	if (netif_running(bp->dev)) {
1313		bnxt_close_nic(bp, false, false);
1314		rc = bnxt_open_nic(bp, false, false);
1315	}
1316	return rc;
1317}
1318
1319static void bnxt_get_drvinfo(struct net_device *dev,
1320			     struct ethtool_drvinfo *info)
1321{
1322	struct bnxt *bp = netdev_priv(dev);
1323
1324	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1325	strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1326	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1327	info->n_stats = bnxt_get_num_stats(bp);
1328	info->testinfo_len = bp->num_tests;
1329	/* TODO CHIMP_FW: eeprom dump details */
1330	info->eedump_len = 0;
1331	/* TODO CHIMP FW: reg dump details */
1332	info->regdump_len = 0;
1333}
1334
1335static int bnxt_get_regs_len(struct net_device *dev)
1336{
1337	struct bnxt *bp = netdev_priv(dev);
1338	int reg_len;
1339
1340	if (!BNXT_PF(bp))
1341		return -EOPNOTSUPP;
1342
1343	reg_len = BNXT_PXP_REG_LEN;
1344
1345	if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1346		reg_len += sizeof(struct pcie_ctx_hw_stats);
1347
1348	return reg_len;
1349}
1350
1351static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1352			  void *_p)
1353{
1354	struct pcie_ctx_hw_stats *hw_pcie_stats;
1355	struct hwrm_pcie_qstats_input req = {0};
1356	struct bnxt *bp = netdev_priv(dev);
1357	dma_addr_t hw_pcie_stats_addr;
1358	int rc;
1359
1360	regs->version = 0;
1361	bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1362
1363	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1364		return;
1365
1366	hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
1367					   sizeof(*hw_pcie_stats),
1368					   &hw_pcie_stats_addr, GFP_KERNEL);
1369	if (!hw_pcie_stats)
1370		return;
1371
 
 
 
 
 
 
 
1372	regs->version = 1;
1373	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
1374	req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1375	req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1376	mutex_lock(&bp->hwrm_cmd_lock);
1377	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1378	if (!rc) {
1379		__le64 *src = (__le64 *)hw_pcie_stats;
1380		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1381		int i;
1382
1383		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1384			dst[i] = le64_to_cpu(src[i]);
1385	}
1386	mutex_unlock(&bp->hwrm_cmd_lock);
1387	dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
1388			  hw_pcie_stats_addr);
1389}
1390
1391static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1392{
1393	struct bnxt *bp = netdev_priv(dev);
1394
1395	wol->supported = 0;
1396	wol->wolopts = 0;
1397	memset(&wol->sopass, 0, sizeof(wol->sopass));
1398	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1399		wol->supported = WAKE_MAGIC;
1400		if (bp->wol)
1401			wol->wolopts = WAKE_MAGIC;
1402	}
1403}
1404
1405static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1406{
1407	struct bnxt *bp = netdev_priv(dev);
1408
1409	if (wol->wolopts & ~WAKE_MAGIC)
1410		return -EINVAL;
1411
1412	if (wol->wolopts & WAKE_MAGIC) {
1413		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1414			return -EINVAL;
1415		if (!bp->wol) {
1416			if (bnxt_hwrm_alloc_wol_fltr(bp))
1417				return -EBUSY;
1418			bp->wol = 1;
1419		}
1420	} else {
1421		if (bp->wol) {
1422			if (bnxt_hwrm_free_wol_fltr(bp))
1423				return -EBUSY;
1424			bp->wol = 0;
1425		}
1426	}
1427	return 0;
1428}
1429
1430u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1431{
1432	u32 speed_mask = 0;
1433
1434	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1435	/* set the advertised speeds */
1436	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1437		speed_mask |= ADVERTISED_100baseT_Full;
1438	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1439		speed_mask |= ADVERTISED_1000baseT_Full;
1440	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1441		speed_mask |= ADVERTISED_2500baseX_Full;
1442	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1443		speed_mask |= ADVERTISED_10000baseT_Full;
1444	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1445		speed_mask |= ADVERTISED_40000baseCR4_Full;
1446
1447	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1448		speed_mask |= ADVERTISED_Pause;
1449	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1450		speed_mask |= ADVERTISED_Asym_Pause;
1451	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1452		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1453
1454	return speed_mask;
1455}
1456
1457#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1458{									\
1459	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)			\
1460		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1461						     100baseT_Full);	\
1462	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)			\
1463		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1464						     1000baseT_Full);	\
1465	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)			\
1466		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1467						     10000baseT_Full);	\
1468	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)			\
1469		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1470						     25000baseCR_Full);	\
1471	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)			\
1472		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1473						     40000baseCR4_Full);\
1474	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)			\
1475		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1476						     50000baseCR2_Full);\
1477	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)			\
1478		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1479						     100000baseCR4_Full);\
1480	if ((fw_pause) & BNXT_LINK_PAUSE_RX) {				\
1481		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1482						     Pause);		\
1483		if (!((fw_pause) & BNXT_LINK_PAUSE_TX))			\
1484			ethtool_link_ksettings_add_link_mode(		\
1485					lk_ksettings, name, Asym_Pause);\
1486	} else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {			\
1487		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1488						     Asym_Pause);	\
1489	}								\
1490}
1491
1492#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)		\
1493{									\
1494	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1495						  100baseT_Full) ||	\
1496	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1497						  100baseT_Half))	\
1498		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;		\
1499	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1500						  1000baseT_Full) ||	\
1501	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1502						  1000baseT_Half))	\
1503		(fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;			\
1504	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1505						  10000baseT_Full))	\
1506		(fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;		\
1507	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1508						  25000baseCR_Full))	\
1509		(fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;		\
1510	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1511						  40000baseCR4_Full))	\
1512		(fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;		\
1513	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1514						  50000baseCR2_Full))	\
1515		(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;		\
1516	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1517						  100000baseCR4_Full))	\
1518		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;		\
1519}
1520
1521#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name)	\
1522{									\
1523	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB)		\
1524		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1525						     50000baseCR_Full);	\
1526	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB)		\
1527		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1528						     100000baseCR2_Full);\
1529	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB)		\
1530		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1531						     200000baseCR4_Full);\
1532}
1533
1534#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name)	\
1535{									\
1536	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1537						  50000baseCR_Full))	\
1538		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB;		\
1539	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1540						  100000baseCR2_Full))	\
1541		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB;		\
1542	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1543						  200000baseCR4_Full))	\
1544		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB;		\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545}
1546
1547static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
1548				struct ethtool_link_ksettings *lk_ksettings)
1549{
1550	u16 fec_cfg = link_info->fec_cfg;
1551
1552	if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
1553		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1554				 lk_ksettings->link_modes.advertising);
1555		return;
1556	}
1557	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1558		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1559				 lk_ksettings->link_modes.advertising);
1560	if (fec_cfg & BNXT_FEC_ENC_RS)
1561		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1562				 lk_ksettings->link_modes.advertising);
1563	if (fec_cfg & BNXT_FEC_ENC_LLRS)
1564		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1565				 lk_ksettings->link_modes.advertising);
1566}
1567
1568static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1569				struct ethtool_link_ksettings *lk_ksettings)
1570{
1571	u16 fw_speeds = link_info->advertising;
1572	u8 fw_pause = 0;
1573
1574	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1575		fw_pause = link_info->auto_pause_setting;
1576
1577	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1578	fw_speeds = link_info->advertising_pam4;
1579	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
1580	bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
1581}
1582
1583static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1584				struct ethtool_link_ksettings *lk_ksettings)
1585{
1586	u16 fw_speeds = link_info->lp_auto_link_speeds;
1587	u8 fw_pause = 0;
1588
1589	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1590		fw_pause = link_info->lp_pause;
1591
1592	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1593				lp_advertising);
1594	fw_speeds = link_info->lp_auto_pam4_link_speeds;
1595	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
1596}
1597
1598static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
1599				struct ethtool_link_ksettings *lk_ksettings)
1600{
1601	u16 fec_cfg = link_info->fec_cfg;
1602
1603	if (fec_cfg & BNXT_FEC_NONE) {
1604		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1605				 lk_ksettings->link_modes.supported);
1606		return;
1607	}
1608	if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
1609		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1610				 lk_ksettings->link_modes.supported);
1611	if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
1612		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1613				 lk_ksettings->link_modes.supported);
1614	if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
1615		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1616				 lk_ksettings->link_modes.supported);
1617}
1618
1619static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1620				struct ethtool_link_ksettings *lk_ksettings)
1621{
1622	u16 fw_speeds = link_info->support_speeds;
1623
1624	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1625	fw_speeds = link_info->support_pam4_speeds;
1626	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
1627
1628	ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1629	ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1630					     Asym_Pause);
1631
1632	if (link_info->support_auto_speeds ||
1633	    link_info->support_pam4_auto_speeds)
1634		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1635						     Autoneg);
1636	bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
1637}
1638
1639u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1640{
1641	switch (fw_link_speed) {
1642	case BNXT_LINK_SPEED_100MB:
1643		return SPEED_100;
1644	case BNXT_LINK_SPEED_1GB:
1645		return SPEED_1000;
1646	case BNXT_LINK_SPEED_2_5GB:
1647		return SPEED_2500;
1648	case BNXT_LINK_SPEED_10GB:
1649		return SPEED_10000;
1650	case BNXT_LINK_SPEED_20GB:
1651		return SPEED_20000;
1652	case BNXT_LINK_SPEED_25GB:
1653		return SPEED_25000;
1654	case BNXT_LINK_SPEED_40GB:
1655		return SPEED_40000;
1656	case BNXT_LINK_SPEED_50GB:
 
1657		return SPEED_50000;
1658	case BNXT_LINK_SPEED_100GB:
 
 
1659		return SPEED_100000;
 
 
 
 
 
 
 
 
1660	default:
1661		return SPEED_UNKNOWN;
1662	}
1663}
1664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1665static int bnxt_get_link_ksettings(struct net_device *dev,
1666				   struct ethtool_link_ksettings *lk_ksettings)
1667{
 
 
1668	struct bnxt *bp = netdev_priv(dev);
1669	struct bnxt_link_info *link_info = &bp->link_info;
1670	struct ethtool_link_settings *base = &lk_ksettings->base;
1671	u32 ethtool_speed;
1672
 
 
1673	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
 
 
 
 
1674	mutex_lock(&bp->link_lock);
1675	bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
 
 
 
 
 
 
 
 
1676
1677	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1678	if (link_info->autoneg) {
1679		bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1680		ethtool_link_ksettings_add_link_mode(lk_ksettings,
1681						     advertising, Autoneg);
1682		base->autoneg = AUTONEG_ENABLE;
1683		base->duplex = DUPLEX_UNKNOWN;
1684		if (link_info->phy_link_status == BNXT_LINK_LINK) {
1685			bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1686			if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1687				base->duplex = DUPLEX_FULL;
1688			else
1689				base->duplex = DUPLEX_HALF;
1690		}
1691		ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1692	} else {
1693		base->autoneg = AUTONEG_DISABLE;
1694		ethtool_speed =
1695			bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1696		base->duplex = DUPLEX_HALF;
1697		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1698			base->duplex = DUPLEX_FULL;
1699	}
1700	base->speed = ethtool_speed;
1701
1702	base->port = PORT_NONE;
1703	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1704		base->port = PORT_TP;
1705		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1706						     TP);
1707		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1708						     TP);
1709	} else {
1710		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1711						     FIBRE);
1712		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1713						     FIBRE);
1714
1715		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1716			base->port = PORT_DA;
1717		else if (link_info->media_type ==
1718			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1719			base->port = PORT_FIBRE;
1720	}
1721	base->phy_address = link_info->phy_addr;
1722	mutex_unlock(&bp->link_lock);
1723
1724	return 0;
1725}
1726
1727static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
 
1728{
1729	struct bnxt *bp = netdev_priv(dev);
1730	struct bnxt_link_info *link_info = &bp->link_info;
1731	u16 support_pam4_spds = link_info->support_pam4_speeds;
 
1732	u16 support_spds = link_info->support_speeds;
1733	u8 sig_mode = BNXT_SIG_MODE_NRZ;
 
1734	u16 fw_speed = 0;
1735
1736	switch (ethtool_speed) {
1737	case SPEED_100:
1738		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1739			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
1740		break;
1741	case SPEED_1000:
1742		if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
 
1743			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
1744		break;
1745	case SPEED_2500:
1746		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1747			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
1748		break;
1749	case SPEED_10000:
1750		if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
 
1751			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
1752		break;
1753	case SPEED_20000:
1754		if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1755			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
 
 
1756		break;
1757	case SPEED_25000:
1758		if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
 
1759			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
1760		break;
1761	case SPEED_40000:
1762		if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
 
1763			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
 
 
1764		break;
1765	case SPEED_50000:
1766		if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
 
 
1767			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
 
1768		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
1769			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
1770			sig_mode = BNXT_SIG_MODE_PAM4;
 
 
 
1771		}
1772		break;
1773	case SPEED_100000:
1774		if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
 
 
1775			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
 
1776		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
1777			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
1778			sig_mode = BNXT_SIG_MODE_PAM4;
 
 
 
 
 
 
 
 
 
1779		}
1780		break;
1781	case SPEED_200000:
1782		if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
1783			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
1784			sig_mode = BNXT_SIG_MODE_PAM4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1785		}
1786		break;
1787	}
1788
1789	if (!fw_speed) {
1790		netdev_err(dev, "unsupported speed!\n");
1791		return -EINVAL;
1792	}
1793
 
 
 
 
 
1794	if (link_info->req_link_speed == fw_speed &&
1795	    link_info->req_signal_mode == sig_mode &&
1796	    link_info->autoneg == 0)
1797		return -EALREADY;
1798
1799	link_info->req_link_speed = fw_speed;
1800	link_info->req_signal_mode = sig_mode;
1801	link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1802	link_info->autoneg = 0;
1803	link_info->advertising = 0;
1804	link_info->advertising_pam4 = 0;
1805
1806	return 0;
1807}
1808
1809u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1810{
1811	u16 fw_speed_mask = 0;
1812
1813	/* only support autoneg at speed 100, 1000, and 10000 */
1814	if (advertising & (ADVERTISED_100baseT_Full |
1815			   ADVERTISED_100baseT_Half)) {
1816		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1817	}
1818	if (advertising & (ADVERTISED_1000baseT_Full |
1819			   ADVERTISED_1000baseT_Half)) {
1820		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1821	}
1822	if (advertising & ADVERTISED_10000baseT_Full)
1823		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1824
1825	if (advertising & ADVERTISED_40000baseCR4_Full)
1826		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1827
1828	return fw_speed_mask;
1829}
1830
1831static int bnxt_set_link_ksettings(struct net_device *dev,
1832			   const struct ethtool_link_ksettings *lk_ksettings)
1833{
1834	struct bnxt *bp = netdev_priv(dev);
1835	struct bnxt_link_info *link_info = &bp->link_info;
1836	const struct ethtool_link_settings *base = &lk_ksettings->base;
1837	bool set_pause = false;
1838	u32 speed;
1839	int rc = 0;
1840
1841	if (!BNXT_PHY_CFG_ABLE(bp))
1842		return -EOPNOTSUPP;
1843
1844	mutex_lock(&bp->link_lock);
1845	if (base->autoneg == AUTONEG_ENABLE) {
1846		link_info->advertising = 0;
1847		link_info->advertising_pam4 = 0;
1848		BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
1849					advertising);
1850		BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
1851					     lk_ksettings, advertising);
1852		link_info->autoneg |= BNXT_AUTONEG_SPEED;
1853		if (!link_info->advertising && !link_info->advertising_pam4) {
1854			link_info->advertising = link_info->support_auto_speeds;
1855			link_info->advertising_pam4 =
1856				link_info->support_pam4_auto_speeds;
1857		}
1858		/* any change to autoneg will cause link change, therefore the
1859		 * driver should put back the original pause setting in autoneg
1860		 */
1861		set_pause = true;
 
1862	} else {
1863		u8 phy_type = link_info->phy_type;
1864
1865		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1866		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1867		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1868			netdev_err(dev, "10GBase-T devices must autoneg\n");
1869			rc = -EINVAL;
1870			goto set_setting_exit;
1871		}
1872		if (base->duplex == DUPLEX_HALF) {
1873			netdev_err(dev, "HALF DUPLEX is not supported!\n");
1874			rc = -EINVAL;
1875			goto set_setting_exit;
1876		}
1877		speed = base->speed;
1878		rc = bnxt_force_link_speed(dev, speed);
 
1879		if (rc) {
1880			if (rc == -EALREADY)
1881				rc = 0;
1882			goto set_setting_exit;
1883		}
1884	}
1885
1886	if (netif_running(dev))
1887		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1888
1889set_setting_exit:
1890	mutex_unlock(&bp->link_lock);
1891	return rc;
1892}
1893
1894static int bnxt_get_fecparam(struct net_device *dev,
1895			     struct ethtool_fecparam *fec)
1896{
1897	struct bnxt *bp = netdev_priv(dev);
1898	struct bnxt_link_info *link_info;
1899	u8 active_fec;
1900	u16 fec_cfg;
1901
1902	link_info = &bp->link_info;
1903	fec_cfg = link_info->fec_cfg;
1904	active_fec = link_info->active_fec_sig_mode &
1905		     PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
1906	if (fec_cfg & BNXT_FEC_NONE) {
1907		fec->fec = ETHTOOL_FEC_NONE;
1908		fec->active_fec = ETHTOOL_FEC_NONE;
1909		return 0;
1910	}
1911	if (fec_cfg & BNXT_FEC_AUTONEG)
1912		fec->fec |= ETHTOOL_FEC_AUTO;
1913	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1914		fec->fec |= ETHTOOL_FEC_BASER;
1915	if (fec_cfg & BNXT_FEC_ENC_RS)
1916		fec->fec |= ETHTOOL_FEC_RS;
1917	if (fec_cfg & BNXT_FEC_ENC_LLRS)
1918		fec->fec |= ETHTOOL_FEC_LLRS;
1919
1920	switch (active_fec) {
1921	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
1922		fec->active_fec |= ETHTOOL_FEC_BASER;
1923		break;
1924	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
1925	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
1926	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
1927		fec->active_fec |= ETHTOOL_FEC_RS;
1928		break;
1929	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
1930	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1931		fec->active_fec |= ETHTOOL_FEC_LLRS;
1932		break;
 
 
 
1933	}
1934	return 0;
1935}
1936
1937static void bnxt_get_fec_stats(struct net_device *dev,
1938			       struct ethtool_fec_stats *fec_stats)
1939{
1940	struct bnxt *bp = netdev_priv(dev);
1941	u64 *rx;
1942
1943	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
1944		return;
1945
1946	rx = bp->rx_port_stats_ext.sw_stats;
1947	fec_stats->corrected_bits.total =
1948		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
 
 
 
 
 
 
 
 
1949}
1950
1951static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
1952					 u32 fec)
1953{
1954	u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
1955
1956	if (fec & ETHTOOL_FEC_BASER)
1957		fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
1958	else if (fec & ETHTOOL_FEC_RS)
1959		fw_fec |= BNXT_FEC_RS_ON(link_info);
1960	else if (fec & ETHTOOL_FEC_LLRS)
1961		fw_fec |= BNXT_FEC_LLRS_ON;
1962	return fw_fec;
1963}
1964
1965static int bnxt_set_fecparam(struct net_device *dev,
1966			     struct ethtool_fecparam *fecparam)
1967{
1968	struct hwrm_port_phy_cfg_input req = {0};
1969	struct bnxt *bp = netdev_priv(dev);
1970	struct bnxt_link_info *link_info;
1971	u32 new_cfg, fec = fecparam->fec;
1972	u16 fec_cfg;
1973	int rc;
1974
1975	link_info = &bp->link_info;
1976	fec_cfg = link_info->fec_cfg;
1977	if (fec_cfg & BNXT_FEC_NONE)
1978		return -EOPNOTSUPP;
1979
1980	if (fec & ETHTOOL_FEC_OFF) {
1981		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
1982			  BNXT_FEC_ALL_OFF(link_info);
1983		goto apply_fec;
1984	}
1985	if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
1986	    ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
1987	    ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
1988	    ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
1989		return -EINVAL;
1990
1991	if (fec & ETHTOOL_FEC_AUTO) {
1992		if (!link_info->autoneg)
1993			return -EINVAL;
1994		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
1995	} else {
1996		new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
1997	}
1998
1999apply_fec:
2000	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
2001	req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2002	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
2003	/* update current settings */
2004	if (!rc) {
2005		mutex_lock(&bp->link_lock);
2006		bnxt_update_link(bp, false);
2007		mutex_unlock(&bp->link_lock);
2008	}
2009	return rc;
2010}
2011
2012static void bnxt_get_pauseparam(struct net_device *dev,
2013				struct ethtool_pauseparam *epause)
2014{
2015	struct bnxt *bp = netdev_priv(dev);
2016	struct bnxt_link_info *link_info = &bp->link_info;
2017
2018	if (BNXT_VF(bp))
2019		return;
2020	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2021	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2022	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2023}
2024
2025static void bnxt_get_pause_stats(struct net_device *dev,
2026				 struct ethtool_pause_stats *epstat)
2027{
2028	struct bnxt *bp = netdev_priv(dev);
2029	u64 *rx, *tx;
2030
2031	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2032		return;
2033
2034	rx = bp->port_stats.sw_stats;
2035	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2036
2037	epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2038	epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2039}
2040
2041static int bnxt_set_pauseparam(struct net_device *dev,
2042			       struct ethtool_pauseparam *epause)
2043{
2044	int rc = 0;
2045	struct bnxt *bp = netdev_priv(dev);
2046	struct bnxt_link_info *link_info = &bp->link_info;
2047
2048	if (!BNXT_PHY_CFG_ABLE(bp))
2049		return -EOPNOTSUPP;
2050
2051	mutex_lock(&bp->link_lock);
2052	if (epause->autoneg) {
2053		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2054			rc = -EINVAL;
2055			goto pause_exit;
2056		}
2057
2058		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2059		if (bp->hwrm_spec_code >= 0x10201)
2060			link_info->req_flow_ctrl =
2061				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
2062	} else {
2063		/* when transition from auto pause to force pause,
2064		 * force a link change
2065		 */
2066		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2067			link_info->force_link_chng = true;
2068		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2069		link_info->req_flow_ctrl = 0;
2070	}
2071	if (epause->rx_pause)
2072		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2073
2074	if (epause->tx_pause)
2075		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2076
2077	if (netif_running(dev))
2078		rc = bnxt_hwrm_set_pause(bp);
2079
2080pause_exit:
2081	mutex_unlock(&bp->link_lock);
2082	return rc;
2083}
2084
2085static u32 bnxt_get_link(struct net_device *dev)
2086{
2087	struct bnxt *bp = netdev_priv(dev);
2088
2089	/* TODO: handle MF, VF, driver close case */
2090	return bp->link_info.link_up;
2091}
2092
2093int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2094			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2095{
2096	struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
2097	struct hwrm_nvm_get_dev_info_input req = {0};
2098	int rc;
2099
2100	if (BNXT_VF(bp))
2101		return -EOPNOTSUPP;
2102
2103	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
2104	mutex_lock(&bp->hwrm_cmd_lock);
2105	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
2106	if (!rc)
2107		memcpy(nvm_dev_info, resp, sizeof(*resp));
2108	mutex_unlock(&bp->hwrm_cmd_lock);
2109	return rc;
2110}
2111
2112static void bnxt_print_admin_err(struct bnxt *bp)
2113{
2114	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2115}
2116
2117static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2118				u16 ext, u16 *index, u32 *item_length,
2119				u32 *data_length);
2120
2121static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2122			      u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2123			      u32 dir_item_len, const u8 *data,
2124			      size_t data_len)
2125{
2126	struct bnxt *bp = netdev_priv(dev);
 
2127	int rc;
2128	struct hwrm_nvm_write_input req = {0};
2129	dma_addr_t dma_handle;
2130	u8 *kmem = NULL;
2131
2132	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
 
 
2133
2134	req.dir_type = cpu_to_le16(dir_type);
2135	req.dir_ordinal = cpu_to_le16(dir_ordinal);
2136	req.dir_ext = cpu_to_le16(dir_ext);
2137	req.dir_attr = cpu_to_le16(dir_attr);
2138	req.dir_item_length = cpu_to_le32(dir_item_len);
2139	if (data_len && data) {
2140		req.dir_data_length = cpu_to_le32(data_len);
 
2141
2142		kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
2143					  GFP_KERNEL);
2144		if (!kmem)
2145			return -ENOMEM;
 
 
 
2146
2147		memcpy(kmem, data, data_len);
2148		req.host_src_addr = cpu_to_le64(dma_handle);
2149	}
2150
2151	rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
2152	if (kmem)
2153		dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
 
 
 
 
2154
2155	if (rc == -EACCES)
2156		bnxt_print_admin_err(bp);
2157	return rc;
2158}
2159
2160static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2161			    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2162			    const u8 *data, size_t data_len)
2163{
2164	struct bnxt *bp = netdev_priv(dev);
 
2165	int rc;
2166
2167	mutex_lock(&bp->hwrm_cmd_lock);
2168	rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr,
2169				0, data, data_len);
2170	mutex_unlock(&bp->hwrm_cmd_lock);
2171	return rc;
2172}
2173
2174static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2175				    u8 self_reset, u8 flags)
2176{
2177	struct hwrm_fw_reset_input req = {0};
2178	struct bnxt *bp = netdev_priv(dev);
2179	int rc;
2180
2181	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
2182
2183	req.embedded_proc_type = proc_type;
2184	req.selfrst_status = self_reset;
2185	req.flags = flags;
2186
2187	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2188		rc = hwrm_send_message_silent(bp, &req, sizeof(req),
2189					      HWRM_CMD_TIMEOUT);
2190	} else {
2191		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2192		if (rc == -EACCES)
2193			bnxt_print_admin_err(bp);
2194	}
2195	return rc;
2196}
2197
2198static int bnxt_firmware_reset(struct net_device *dev,
2199			       enum bnxt_nvm_directory_type dir_type)
2200{
2201	u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2202	u8 proc_type, flags = 0;
2203
2204	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
2205	/*       (e.g. when firmware isn't already running) */
2206	switch (dir_type) {
2207	case BNX_DIR_TYPE_CHIMP_PATCH:
2208	case BNX_DIR_TYPE_BOOTCODE:
2209	case BNX_DIR_TYPE_BOOTCODE_2:
2210		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2211		/* Self-reset ChiMP upon next PCIe reset: */
2212		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2213		break;
2214	case BNX_DIR_TYPE_APE_FW:
2215	case BNX_DIR_TYPE_APE_PATCH:
2216		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2217		/* Self-reset APE upon next PCIe reset: */
2218		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2219		break;
2220	case BNX_DIR_TYPE_KONG_FW:
2221	case BNX_DIR_TYPE_KONG_PATCH:
2222		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2223		break;
2224	case BNX_DIR_TYPE_BONO_FW:
2225	case BNX_DIR_TYPE_BONO_PATCH:
2226		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2227		break;
2228	default:
2229		return -EINVAL;
2230	}
2231
2232	return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2233}
2234
2235static int bnxt_firmware_reset_chip(struct net_device *dev)
2236{
2237	struct bnxt *bp = netdev_priv(dev);
2238	u8 flags = 0;
2239
2240	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2241		flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2242
2243	return bnxt_hwrm_firmware_reset(dev,
2244					FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2245					FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2246					flags);
2247}
2248
2249static int bnxt_firmware_reset_ap(struct net_device *dev)
2250{
2251	return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2252					FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2253					0);
2254}
2255
2256static int bnxt_flash_firmware(struct net_device *dev,
2257			       u16 dir_type,
2258			       const u8 *fw_data,
2259			       size_t fw_size)
2260{
2261	int	rc = 0;
2262	u16	code_type;
2263	u32	stored_crc;
2264	u32	calculated_crc;
2265	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2266
2267	switch (dir_type) {
2268	case BNX_DIR_TYPE_BOOTCODE:
2269	case BNX_DIR_TYPE_BOOTCODE_2:
2270		code_type = CODE_BOOT;
2271		break;
2272	case BNX_DIR_TYPE_CHIMP_PATCH:
2273		code_type = CODE_CHIMP_PATCH;
2274		break;
2275	case BNX_DIR_TYPE_APE_FW:
2276		code_type = CODE_MCTP_PASSTHRU;
2277		break;
2278	case BNX_DIR_TYPE_APE_PATCH:
2279		code_type = CODE_APE_PATCH;
2280		break;
2281	case BNX_DIR_TYPE_KONG_FW:
2282		code_type = CODE_KONG_FW;
2283		break;
2284	case BNX_DIR_TYPE_KONG_PATCH:
2285		code_type = CODE_KONG_PATCH;
2286		break;
2287	case BNX_DIR_TYPE_BONO_FW:
2288		code_type = CODE_BONO_FW;
2289		break;
2290	case BNX_DIR_TYPE_BONO_PATCH:
2291		code_type = CODE_BONO_PATCH;
2292		break;
2293	default:
2294		netdev_err(dev, "Unsupported directory entry type: %u\n",
2295			   dir_type);
2296		return -EINVAL;
2297	}
2298	if (fw_size < sizeof(struct bnxt_fw_header)) {
2299		netdev_err(dev, "Invalid firmware file size: %u\n",
2300			   (unsigned int)fw_size);
2301		return -EINVAL;
2302	}
2303	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2304		netdev_err(dev, "Invalid firmware signature: %08X\n",
2305			   le32_to_cpu(header->signature));
2306		return -EINVAL;
2307	}
2308	if (header->code_type != code_type) {
2309		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2310			   code_type, header->code_type);
2311		return -EINVAL;
2312	}
2313	if (header->device != DEVICE_CUMULUS_FAMILY) {
2314		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2315			   DEVICE_CUMULUS_FAMILY, header->device);
2316		return -EINVAL;
2317	}
2318	/* Confirm the CRC32 checksum of the file: */
2319	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2320					     sizeof(stored_crc)));
2321	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2322	if (calculated_crc != stored_crc) {
2323		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2324			   (unsigned long)stored_crc,
2325			   (unsigned long)calculated_crc);
2326		return -EINVAL;
2327	}
2328	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2329			      0, 0, fw_data, fw_size);
2330	if (rc == 0)	/* Firmware update successful */
2331		rc = bnxt_firmware_reset(dev, dir_type);
2332
2333	return rc;
2334}
2335
2336static int bnxt_flash_microcode(struct net_device *dev,
2337				u16 dir_type,
2338				const u8 *fw_data,
2339				size_t fw_size)
2340{
2341	struct bnxt_ucode_trailer *trailer;
2342	u32 calculated_crc;
2343	u32 stored_crc;
2344	int rc = 0;
2345
2346	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2347		netdev_err(dev, "Invalid microcode file size: %u\n",
2348			   (unsigned int)fw_size);
2349		return -EINVAL;
2350	}
2351	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2352						sizeof(*trailer)));
2353	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2354		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2355			   le32_to_cpu(trailer->sig));
2356		return -EINVAL;
2357	}
2358	if (le16_to_cpu(trailer->dir_type) != dir_type) {
2359		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2360			   dir_type, le16_to_cpu(trailer->dir_type));
2361		return -EINVAL;
2362	}
2363	if (le16_to_cpu(trailer->trailer_length) <
2364		sizeof(struct bnxt_ucode_trailer)) {
2365		netdev_err(dev, "Invalid microcode trailer length: %d\n",
2366			   le16_to_cpu(trailer->trailer_length));
2367		return -EINVAL;
2368	}
2369
2370	/* Confirm the CRC32 checksum of the file: */
2371	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2372					     sizeof(stored_crc)));
2373	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2374	if (calculated_crc != stored_crc) {
2375		netdev_err(dev,
2376			   "CRC32 (%08lX) does not match calculated: %08lX\n",
2377			   (unsigned long)stored_crc,
2378			   (unsigned long)calculated_crc);
2379		return -EINVAL;
2380	}
2381	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2382			      0, 0, fw_data, fw_size);
2383
2384	return rc;
2385}
2386
2387static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2388{
2389	switch (dir_type) {
2390	case BNX_DIR_TYPE_CHIMP_PATCH:
2391	case BNX_DIR_TYPE_BOOTCODE:
2392	case BNX_DIR_TYPE_BOOTCODE_2:
2393	case BNX_DIR_TYPE_APE_FW:
2394	case BNX_DIR_TYPE_APE_PATCH:
2395	case BNX_DIR_TYPE_KONG_FW:
2396	case BNX_DIR_TYPE_KONG_PATCH:
2397	case BNX_DIR_TYPE_BONO_FW:
2398	case BNX_DIR_TYPE_BONO_PATCH:
2399		return true;
2400	}
2401
2402	return false;
2403}
2404
2405static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2406{
2407	switch (dir_type) {
2408	case BNX_DIR_TYPE_AVS:
2409	case BNX_DIR_TYPE_EXP_ROM_MBA:
2410	case BNX_DIR_TYPE_PCIE:
2411	case BNX_DIR_TYPE_TSCF_UCODE:
2412	case BNX_DIR_TYPE_EXT_PHY:
2413	case BNX_DIR_TYPE_CCM:
2414	case BNX_DIR_TYPE_ISCSI_BOOT:
2415	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2416	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2417		return true;
2418	}
2419
2420	return false;
2421}
2422
2423static bool bnxt_dir_type_is_executable(u16 dir_type)
2424{
2425	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2426		bnxt_dir_type_is_other_exec_format(dir_type);
2427}
2428
2429static int bnxt_flash_firmware_from_file(struct net_device *dev,
2430					 u16 dir_type,
2431					 const char *filename)
2432{
2433	const struct firmware  *fw;
2434	int			rc;
2435
2436	rc = request_firmware(&fw, filename, &dev->dev);
2437	if (rc != 0) {
2438		netdev_err(dev, "Error %d requesting firmware file: %s\n",
2439			   rc, filename);
2440		return rc;
2441	}
2442	if (bnxt_dir_type_is_ape_bin_format(dir_type))
2443		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
2444	else if (bnxt_dir_type_is_other_exec_format(dir_type))
2445		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
2446	else
2447		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2448				      0, 0, fw->data, fw->size);
2449	release_firmware(fw);
2450	return rc;
2451}
2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2453#define BNXT_PKG_DMA_SIZE	0x40000
2454#define BNXT_NVM_MORE_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
2455#define BNXT_NVM_LAST_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
2456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2457int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
2458				   u32 install_type)
2459{
2460	struct hwrm_nvm_install_update_input install = {0};
2461	struct hwrm_nvm_install_update_output resp = {0};
2462	struct hwrm_nvm_modify_input modify = {0};
2463	struct bnxt *bp = netdev_priv(dev);
2464	bool defrag_attempted = false;
2465	dma_addr_t dma_handle;
2466	u8 *kmem = NULL;
2467	u32 modify_len;
2468	u32 item_len;
2469	int rc = 0;
2470	u16 index;
 
 
 
 
 
 
2471
2472	bnxt_hwrm_fw_set_time(bp);
2473
2474	bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
 
 
2475
2476	/* Try allocating a large DMA buffer first.  Older fw will
2477	 * cause excessive NVRAM erases when using small blocks.
2478	 */
2479	modify_len = roundup_pow_of_two(fw->size);
2480	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
2481	while (1) {
2482		kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len,
2483					  &dma_handle, GFP_KERNEL);
2484		if (!kmem && modify_len > PAGE_SIZE)
2485			modify_len /= 2;
2486		else
2487			break;
2488	}
2489	if (!kmem)
 
2490		return -ENOMEM;
 
 
 
 
 
 
 
2491
2492	modify.host_src_addr = cpu_to_le64(dma_handle);
 
2493
2494	bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
 
 
 
2495	if ((install_type & 0xffff) == 0)
2496		install_type >>= 16;
2497	install.install_type = cpu_to_le32(install_type);
2498
2499	do {
2500		u32 copied = 0, len = modify_len;
2501
2502		rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2503					  BNX_DIR_ORDINAL_FIRST,
2504					  BNX_DIR_EXT_NONE,
2505					  &index, &item_len, NULL);
2506		if (rc) {
2507			netdev_err(dev, "PKG update area not created in nvram\n");
2508			break;
2509		}
2510		if (fw->size > item_len) {
2511			netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
2512				   (unsigned long)fw->size);
2513			rc = -EFBIG;
2514			break;
2515		}
2516
2517		modify.dir_idx = cpu_to_le16(index);
2518
2519		if (fw->size > modify_len)
2520			modify.flags = BNXT_NVM_MORE_FLAG;
2521		while (copied < fw->size) {
2522			u32 balance = fw->size - copied;
2523
2524			if (balance <= modify_len) {
2525				len = balance;
2526				if (copied)
2527					modify.flags |= BNXT_NVM_LAST_FLAG;
2528			}
2529			memcpy(kmem, fw->data + copied, len);
2530			modify.len = cpu_to_le32(len);
2531			modify.offset = cpu_to_le32(copied);
2532			rc = hwrm_send_message(bp, &modify, sizeof(modify),
2533					       FLASH_PACKAGE_TIMEOUT);
2534			if (rc)
2535				goto pkg_abort;
2536			copied += len;
2537		}
2538		mutex_lock(&bp->hwrm_cmd_lock);
2539		rc = _hwrm_send_message_silent(bp, &install, sizeof(install),
2540					       INSTALL_PACKAGE_TIMEOUT);
2541		memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
2542
2543		if (defrag_attempted) {
2544			/* We have tried to defragment already in the previous
2545			 * iteration. Return with the result for INSTALL_UPDATE
2546			 */
2547			mutex_unlock(&bp->hwrm_cmd_lock);
2548			break;
2549		}
2550
2551		if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
2552		    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2553			install.flags =
 
 
 
 
 
 
2554				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2555
2556			rc = _hwrm_send_message_silent(bp, &install,
2557						       sizeof(install),
2558						       INSTALL_PACKAGE_TIMEOUT);
2559			memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
2560
2561			if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
2562			    NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
 
2563				/* FW has cleared NVM area, driver will create
2564				 * UPDATE directory and try the flash again
2565				 */
2566				defrag_attempted = true;
2567				install.flags = 0;
2568				rc = __bnxt_flash_nvram(bp->dev,
2569							BNX_DIR_TYPE_UPDATE,
2570							BNX_DIR_ORDINAL_FIRST,
2571							0, 0, item_len, NULL,
2572							0);
2573			} else if (rc) {
2574				netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
2575			}
2576		} else if (rc) {
2577			netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
 
2578		}
2579		mutex_unlock(&bp->hwrm_cmd_lock);
2580	} while (defrag_attempted && !rc);
2581
2582pkg_abort:
2583	dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle);
2584	if (resp.result) {
 
 
2585		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2586			   (s8)resp.result, (int)resp.problem_item);
2587		rc = -ENOPKG;
2588	}
2589	if (rc == -EACCES)
2590		bnxt_print_admin_err(bp);
2591	return rc;
2592}
2593
2594static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2595					u32 install_type)
2596{
2597	const struct firmware *fw;
2598	int rc;
2599
2600	rc = request_firmware(&fw, filename, &dev->dev);
2601	if (rc != 0) {
2602		netdev_err(dev, "PKG error %d requesting file: %s\n",
2603			   rc, filename);
2604		return rc;
2605	}
2606
2607	rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
2608
2609	release_firmware(fw);
2610
2611	return rc;
2612}
2613
2614static int bnxt_flash_device(struct net_device *dev,
2615			     struct ethtool_flash *flash)
2616{
2617	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2618		netdev_err(dev, "flashdev not supported from a virtual function\n");
2619		return -EINVAL;
2620	}
2621
2622	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2623	    flash->region > 0xffff)
2624		return bnxt_flash_package_from_file(dev, flash->data,
2625						    flash->region);
2626
2627	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2628}
2629
2630static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2631{
 
 
2632	struct bnxt *bp = netdev_priv(dev);
2633	int rc;
2634	struct hwrm_nvm_get_dir_info_input req = {0};
2635	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
2636
2637	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
 
 
2638
2639	mutex_lock(&bp->hwrm_cmd_lock);
2640	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2641	if (!rc) {
2642		*entries = le32_to_cpu(output->entries);
2643		*length = le32_to_cpu(output->entry_length);
2644	}
2645	mutex_unlock(&bp->hwrm_cmd_lock);
2646	return rc;
2647}
2648
2649static int bnxt_get_eeprom_len(struct net_device *dev)
2650{
2651	struct bnxt *bp = netdev_priv(dev);
2652
2653	if (BNXT_VF(bp))
2654		return 0;
2655
2656	/* The -1 return value allows the entire 32-bit range of offsets to be
2657	 * passed via the ethtool command-line utility.
2658	 */
2659	return -1;
2660}
2661
2662static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2663{
2664	struct bnxt *bp = netdev_priv(dev);
2665	int rc;
2666	u32 dir_entries;
2667	u32 entry_length;
2668	u8 *buf;
2669	size_t buflen;
2670	dma_addr_t dma_handle;
2671	struct hwrm_nvm_get_dir_entries_input req = {0};
2672
2673	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2674	if (rc != 0)
2675		return rc;
2676
2677	if (!dir_entries || !entry_length)
2678		return -EIO;
2679
2680	/* Insert 2 bytes of directory info (count and size of entries) */
2681	if (len < 2)
2682		return -EINVAL;
2683
2684	*data++ = dir_entries;
2685	*data++ = entry_length;
2686	len -= 2;
2687	memset(data, 0xff, len);
2688
2689	buflen = dir_entries * entry_length;
2690	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
2691				 GFP_KERNEL);
 
 
 
2692	if (!buf) {
2693		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2694			   (unsigned)buflen);
2695		return -ENOMEM;
2696	}
2697	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
2698	req.host_dest_addr = cpu_to_le64(dma_handle);
2699	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
2700	if (rc == 0)
2701		memcpy(data, buf, len > buflen ? buflen : len);
2702	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
2703	return rc;
2704}
2705
2706static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2707			       u32 length, u8 *data)
2708{
2709	struct bnxt *bp = netdev_priv(dev);
2710	int rc;
2711	u8 *buf;
2712	dma_addr_t dma_handle;
2713	struct hwrm_nvm_read_input req = {0};
2714
2715	if (!length)
2716		return -EINVAL;
2717
2718	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
2719				 GFP_KERNEL);
 
 
 
2720	if (!buf) {
2721		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2722			   (unsigned)length);
2723		return -ENOMEM;
2724	}
2725	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
2726	req.host_dest_addr = cpu_to_le64(dma_handle);
2727	req.dir_idx = cpu_to_le16(index);
2728	req.offset = cpu_to_le32(offset);
2729	req.len = cpu_to_le32(length);
2730
2731	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
 
 
 
2732	if (rc == 0)
2733		memcpy(data, buf, length);
2734	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
2735	return rc;
2736}
2737
2738static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2739				u16 ext, u16 *index, u32 *item_length,
2740				u32 *data_length)
2741{
 
 
2742	struct bnxt *bp = netdev_priv(dev);
2743	int rc;
2744	struct hwrm_nvm_find_dir_entry_input req = {0};
2745	struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
2746
2747	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
2748	req.enables = 0;
2749	req.dir_idx = 0;
2750	req.dir_type = cpu_to_le16(type);
2751	req.dir_ordinal = cpu_to_le16(ordinal);
2752	req.dir_ext = cpu_to_le16(ext);
2753	req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2754	mutex_lock(&bp->hwrm_cmd_lock);
2755	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
2756	if (rc == 0) {
2757		if (index)
2758			*index = le16_to_cpu(output->dir_idx);
2759		if (item_length)
2760			*item_length = le32_to_cpu(output->dir_item_length);
2761		if (data_length)
2762			*data_length = le32_to_cpu(output->dir_data_length);
2763	}
2764	mutex_unlock(&bp->hwrm_cmd_lock);
2765	return rc;
2766}
2767
2768static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2769{
2770	char	*retval = NULL;
2771	char	*p;
2772	char	*value;
2773	int	field = 0;
2774
2775	if (datalen < 1)
2776		return NULL;
2777	/* null-terminate the log data (removing last '\n'): */
2778	data[datalen - 1] = 0;
2779	for (p = data; *p != 0; p++) {
2780		field = 0;
2781		retval = NULL;
2782		while (*p != 0 && *p != '\n') {
2783			value = p;
2784			while (*p != 0 && *p != '\t' && *p != '\n')
2785				p++;
2786			if (field == desired_field)
2787				retval = value;
2788			if (*p != '\t')
2789				break;
2790			*p = 0;
2791			field++;
2792			p++;
2793		}
2794		if (*p == 0)
2795			break;
2796		*p = 0;
2797	}
2798	return retval;
2799}
2800
2801static void bnxt_get_pkgver(struct net_device *dev)
2802{
2803	struct bnxt *bp = netdev_priv(dev);
2804	u16 index = 0;
2805	char *pkgver;
2806	u32 pkglen;
2807	u8 *pkgbuf;
2808	int len;
2809
2810	if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2811				 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2812				 &index, NULL, &pkglen) != 0)
2813		return;
 
2814
2815	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2816	if (!pkgbuf) {
2817		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2818			pkglen);
2819		return;
2820	}
2821
2822	if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
 
2823		goto err;
2824
2825	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2826				   pkglen);
2827	if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2828		len = strlen(bp->fw_ver_str);
2829		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2830			 "/pkg %s", pkgver);
2831	}
2832err:
2833	kfree(pkgbuf);
2834}
2835
2836static int bnxt_get_eeprom(struct net_device *dev,
2837			   struct ethtool_eeprom *eeprom,
2838			   u8 *data)
2839{
2840	u32 index;
2841	u32 offset;
2842
2843	if (eeprom->offset == 0) /* special offset value to get directory */
2844		return bnxt_get_nvram_directory(dev, eeprom->len, data);
2845
2846	index = eeprom->offset >> 24;
2847	offset = eeprom->offset & 0xffffff;
2848
2849	if (index == 0) {
2850		netdev_err(dev, "unsupported index value: %d\n", index);
2851		return -EINVAL;
2852	}
2853
2854	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2855}
2856
2857static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2858{
 
2859	struct bnxt *bp = netdev_priv(dev);
2860	struct hwrm_nvm_erase_dir_entry_input req = {0};
2861
2862	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
2863	req.dir_idx = cpu_to_le16(index);
2864	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
2865}
2866
2867static int bnxt_set_eeprom(struct net_device *dev,
2868			   struct ethtool_eeprom *eeprom,
2869			   u8 *data)
2870{
2871	struct bnxt *bp = netdev_priv(dev);
2872	u8 index, dir_op;
2873	u16 type, ext, ordinal, attr;
2874
2875	if (!BNXT_PF(bp)) {
2876		netdev_err(dev, "NVM write not supported from a virtual function\n");
2877		return -EINVAL;
2878	}
2879
2880	type = eeprom->magic >> 16;
2881
2882	if (type == 0xffff) { /* special value for directory operations */
2883		index = eeprom->magic & 0xff;
2884		dir_op = eeprom->magic >> 8;
2885		if (index == 0)
2886			return -EINVAL;
2887		switch (dir_op) {
2888		case 0x0e: /* erase */
2889			if (eeprom->offset != ~eeprom->magic)
2890				return -EINVAL;
2891			return bnxt_erase_nvram_directory(dev, index - 1);
2892		default:
2893			return -EINVAL;
2894		}
2895	}
2896
2897	/* Create or re-write an NVM item: */
2898	if (bnxt_dir_type_is_executable(type))
2899		return -EOPNOTSUPP;
2900	ext = eeprom->magic & 0xffff;
2901	ordinal = eeprom->offset >> 16;
2902	attr = eeprom->offset & 0xffff;
2903
2904	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2905				eeprom->len);
2906}
2907
2908static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2909{
2910	struct bnxt *bp = netdev_priv(dev);
2911	struct ethtool_eee *eee = &bp->eee;
2912	struct bnxt_link_info *link_info = &bp->link_info;
2913	u32 advertising;
2914	int rc = 0;
2915
2916	if (!BNXT_PHY_CFG_ABLE(bp))
2917		return -EOPNOTSUPP;
2918
2919	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
2920		return -EOPNOTSUPP;
2921
2922	mutex_lock(&bp->link_lock);
2923	advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2924	if (!edata->eee_enabled)
2925		goto eee_ok;
2926
2927	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2928		netdev_warn(dev, "EEE requires autoneg\n");
2929		rc = -EINVAL;
2930		goto eee_exit;
2931	}
2932	if (edata->tx_lpi_enabled) {
2933		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2934				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2935			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2936				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2937			rc = -EINVAL;
2938			goto eee_exit;
2939		} else if (!bp->lpi_tmr_hi) {
2940			edata->tx_lpi_timer = eee->tx_lpi_timer;
2941		}
2942	}
2943	if (!edata->advertised) {
2944		edata->advertised = advertising & eee->supported;
2945	} else if (edata->advertised & ~advertising) {
2946		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2947			    edata->advertised, advertising);
2948		rc = -EINVAL;
2949		goto eee_exit;
2950	}
2951
2952	eee->advertised = edata->advertised;
2953	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2954	eee->tx_lpi_timer = edata->tx_lpi_timer;
2955eee_ok:
2956	eee->eee_enabled = edata->eee_enabled;
2957
2958	if (netif_running(dev))
2959		rc = bnxt_hwrm_set_link_setting(bp, false, true);
2960
2961eee_exit:
2962	mutex_unlock(&bp->link_lock);
2963	return rc;
2964}
2965
2966static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2967{
2968	struct bnxt *bp = netdev_priv(dev);
2969
2970	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
2971		return -EOPNOTSUPP;
2972
2973	*edata = bp->eee;
2974	if (!bp->eee.eee_enabled) {
2975		/* Preserve tx_lpi_timer so that the last value will be used
2976		 * by default when it is re-enabled.
2977		 */
2978		edata->advertised = 0;
2979		edata->tx_lpi_enabled = 0;
2980	}
2981
2982	if (!bp->eee.eee_active)
2983		edata->lp_advertised = 0;
2984
2985	return 0;
2986}
2987
2988static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2989					    u16 page_number, u16 start_addr,
2990					    u16 data_length, u8 *buf)
 
2991{
2992	struct hwrm_port_phy_i2c_read_input req = {0};
2993	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2994	int rc, byte_offset = 0;
2995
2996	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2997	req.i2c_slave_addr = i2c_addr;
2998	req.page_number = cpu_to_le16(page_number);
2999	req.port_id = cpu_to_le16(bp->pf.port_id);
 
 
 
 
3000	do {
3001		u16 xfer_size;
3002
3003		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3004		data_length -= xfer_size;
3005		req.page_offset = cpu_to_le16(start_addr + byte_offset);
3006		req.data_length = xfer_size;
3007		req.enables = cpu_to_le32(start_addr + byte_offset ?
3008				 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
3009		mutex_lock(&bp->hwrm_cmd_lock);
3010		rc = _hwrm_send_message(bp, &req, sizeof(req),
3011					HWRM_CMD_TIMEOUT);
 
 
 
3012		if (!rc)
3013			memcpy(buf + byte_offset, output->data, xfer_size);
3014		mutex_unlock(&bp->hwrm_cmd_lock);
3015		byte_offset += xfer_size;
3016	} while (!rc && data_length > 0);
 
3017
3018	return rc;
3019}
3020
3021static int bnxt_get_module_info(struct net_device *dev,
3022				struct ethtool_modinfo *modinfo)
3023{
3024	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3025	struct bnxt *bp = netdev_priv(dev);
3026	int rc;
3027
3028	/* No point in going further if phy status indicates
3029	 * module is not inserted or if it is powered down or
3030	 * if it is of type 10GBase-T
3031	 */
3032	if (bp->link_info.module_status >
3033		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3034		return -EOPNOTSUPP;
3035
3036	/* This feature is not supported in older firmware versions */
3037	if (bp->hwrm_spec_code < 0x10202)
3038		return -EOPNOTSUPP;
3039
3040	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3041					      SFF_DIAG_SUPPORT_OFFSET + 1,
3042					      data);
3043	if (!rc) {
3044		u8 module_id = data[0];
3045		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3046
3047		switch (module_id) {
3048		case SFF_MODULE_ID_SFP:
3049			modinfo->type = ETH_MODULE_SFF_8472;
3050			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3051			if (!diag_supported)
3052				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3053			break;
3054		case SFF_MODULE_ID_QSFP:
3055		case SFF_MODULE_ID_QSFP_PLUS:
3056			modinfo->type = ETH_MODULE_SFF_8436;
3057			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3058			break;
3059		case SFF_MODULE_ID_QSFP28:
3060			modinfo->type = ETH_MODULE_SFF_8636;
3061			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3062			break;
3063		default:
3064			rc = -EOPNOTSUPP;
3065			break;
3066		}
3067	}
3068	return rc;
3069}
3070
3071static int bnxt_get_module_eeprom(struct net_device *dev,
3072				  struct ethtool_eeprom *eeprom,
3073				  u8 *data)
3074{
3075	struct bnxt *bp = netdev_priv(dev);
3076	u16  start = eeprom->offset, length = eeprom->len;
3077	int rc = 0;
3078
3079	memset(data, 0, eeprom->len);
3080
3081	/* Read A0 portion of the EEPROM */
3082	if (start < ETH_MODULE_SFF_8436_LEN) {
3083		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3084			length = ETH_MODULE_SFF_8436_LEN - start;
3085		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
3086						      start, length, data);
3087		if (rc)
3088			return rc;
3089		start += length;
3090		data += length;
3091		length = eeprom->len - length;
3092	}
3093
3094	/* Read A2 portion of the EEPROM */
3095	if (length) {
3096		start -= ETH_MODULE_SFF_8436_LEN;
3097		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
3098						      start, length, data);
3099	}
3100	return rc;
3101}
3102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3103static int bnxt_nway_reset(struct net_device *dev)
3104{
3105	int rc = 0;
3106
3107	struct bnxt *bp = netdev_priv(dev);
3108	struct bnxt_link_info *link_info = &bp->link_info;
3109
3110	if (!BNXT_PHY_CFG_ABLE(bp))
3111		return -EOPNOTSUPP;
3112
3113	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3114		return -EINVAL;
3115
3116	if (netif_running(dev))
3117		rc = bnxt_hwrm_set_link_setting(bp, true, false);
3118
3119	return rc;
3120}
3121
3122static int bnxt_set_phys_id(struct net_device *dev,
3123			    enum ethtool_phys_id_state state)
3124{
3125	struct hwrm_port_led_cfg_input req = {0};
3126	struct bnxt *bp = netdev_priv(dev);
3127	struct bnxt_pf_info *pf = &bp->pf;
3128	struct bnxt_led_cfg *led_cfg;
3129	u8 led_state;
3130	__le16 duration;
3131	int i;
3132
3133	if (!bp->num_leds || BNXT_VF(bp))
3134		return -EOPNOTSUPP;
3135
3136	if (state == ETHTOOL_ID_ACTIVE) {
3137		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3138		duration = cpu_to_le16(500);
3139	} else if (state == ETHTOOL_ID_INACTIVE) {
3140		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3141		duration = cpu_to_le16(0);
3142	} else {
3143		return -EINVAL;
3144	}
3145	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
3146	req.port_id = cpu_to_le16(pf->port_id);
3147	req.num_leds = bp->num_leds;
3148	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
 
 
 
3149	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3150		req.enables |= BNXT_LED_DFLT_ENABLES(i);
3151		led_cfg->led_id = bp->leds[i].led_id;
3152		led_cfg->led_state = led_state;
3153		led_cfg->led_blink_on = duration;
3154		led_cfg->led_blink_off = duration;
3155		led_cfg->led_group_id = bp->leds[i].led_group_id;
3156	}
3157	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3158}
3159
3160static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3161{
3162	struct hwrm_selftest_irq_input req = {0};
 
3163
3164	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
3165	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
 
3166}
3167
3168static int bnxt_test_irq(struct bnxt *bp)
3169{
3170	int i;
3171
3172	for (i = 0; i < bp->cp_nr_rings; i++) {
3173		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3174		int rc;
3175
3176		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3177		if (rc)
3178			return rc;
3179	}
3180	return 0;
3181}
3182
3183static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3184{
3185	struct hwrm_port_mac_cfg_input req = {0};
 
3186
3187	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
 
 
3188
3189	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3190	if (enable)
3191		req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3192	else
3193		req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3194	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3195}
3196
3197static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3198{
3199	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3200	struct hwrm_port_phy_qcaps_input req = {0};
3201	int rc;
3202
3203	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
3204	mutex_lock(&bp->hwrm_cmd_lock);
3205	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
3206	if (!rc)
3207		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
3208
3209	mutex_unlock(&bp->hwrm_cmd_lock);
3210	return rc;
3211}
3212
3213static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
3214				    struct hwrm_port_phy_cfg_input *req)
3215{
3216	struct bnxt_link_info *link_info = &bp->link_info;
3217	u16 fw_advertising;
3218	u16 fw_speed;
3219	int rc;
3220
3221	if (!link_info->autoneg ||
3222	    (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
3223		return 0;
3224
3225	rc = bnxt_query_force_speeds(bp, &fw_advertising);
3226	if (rc)
3227		return rc;
3228
3229	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
3230	if (bp->link_info.link_up)
3231		fw_speed = bp->link_info.link_speed;
3232	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
3233		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
3234	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
3235		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
3236	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
3237		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
3238	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
3239		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
3240
3241	req->force_link_speed = cpu_to_le16(fw_speed);
3242	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
3243				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3244	rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
3245	req->flags = 0;
3246	req->force_link_speed = cpu_to_le16(0);
3247	return rc;
3248}
3249
3250static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
3251{
3252	struct hwrm_port_phy_cfg_input req = {0};
 
 
 
 
 
3253
3254	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
 
3255
3256	if (enable) {
3257		bnxt_disable_an_for_lpbk(bp, &req);
3258		if (ext)
3259			req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
3260		else
3261			req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
3262	} else {
3263		req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
3264	}
3265	req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
3266	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
3267}
3268
3269static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3270			    u32 raw_cons, int pkt_size)
3271{
3272	struct bnxt_napi *bnapi = cpr->bnapi;
3273	struct bnxt_rx_ring_info *rxr;
3274	struct bnxt_sw_rx_bd *rx_buf;
3275	struct rx_cmp *rxcmp;
3276	u16 cp_cons, cons;
3277	u8 *data;
3278	u32 len;
3279	int i;
3280
3281	rxr = bnapi->rx_ring;
3282	cp_cons = RING_CMP(raw_cons);
3283	rxcmp = (struct rx_cmp *)
3284		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3285	cons = rxcmp->rx_cmp_opaque;
3286	rx_buf = &rxr->rx_buf_ring[cons];
3287	data = rx_buf->data_ptr;
3288	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
3289	if (len != pkt_size)
3290		return -EIO;
3291	i = ETH_ALEN;
3292	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
3293		return -EIO;
3294	i += ETH_ALEN;
3295	for (  ; i < pkt_size; i++) {
3296		if (data[i] != (u8)(i & 0xff))
3297			return -EIO;
3298	}
3299	return 0;
3300}
3301
3302static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3303			      int pkt_size)
3304{
3305	struct tx_cmp *txcmp;
3306	int rc = -EIO;
3307	u32 raw_cons;
3308	u32 cons;
3309	int i;
3310
3311	raw_cons = cpr->cp_raw_cons;
3312	for (i = 0; i < 200; i++) {
3313		cons = RING_CMP(raw_cons);
3314		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3315
3316		if (!TX_CMP_VALID(txcmp, raw_cons)) {
3317			udelay(5);
3318			continue;
3319		}
3320
3321		/* The valid test of the entry must be done first before
3322		 * reading any further.
3323		 */
3324		dma_rmb();
3325		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
 
3326			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
3327			raw_cons = NEXT_RAW_CMP(raw_cons);
3328			raw_cons = NEXT_RAW_CMP(raw_cons);
3329			break;
3330		}
3331		raw_cons = NEXT_RAW_CMP(raw_cons);
3332	}
3333	cpr->cp_raw_cons = raw_cons;
3334	return rc;
3335}
3336
3337static int bnxt_run_loopback(struct bnxt *bp)
3338{
3339	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
3340	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
3341	struct bnxt_cp_ring_info *cpr;
3342	int pkt_size, i = 0;
3343	struct sk_buff *skb;
3344	dma_addr_t map;
3345	u8 *data;
3346	int rc;
3347
3348	cpr = &rxr->bnapi->cp_ring;
3349	if (bp->flags & BNXT_FLAG_CHIP_P5)
3350		cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
3351	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
3352	skb = netdev_alloc_skb(bp->dev, pkt_size);
3353	if (!skb)
3354		return -ENOMEM;
3355	data = skb_put(skb, pkt_size);
3356	eth_broadcast_addr(data);
3357	i += ETH_ALEN;
3358	ether_addr_copy(&data[i], bp->dev->dev_addr);
3359	i += ETH_ALEN;
3360	for ( ; i < pkt_size; i++)
3361		data[i] = (u8)(i & 0xff);
3362
3363	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3364			     PCI_DMA_TODEVICE);
3365	if (dma_mapping_error(&bp->pdev->dev, map)) {
3366		dev_kfree_skb(skb);
3367		return -EIO;
3368	}
3369	bnxt_xmit_bd(bp, txr, map, pkt_size);
3370
3371	/* Sync BD data before updating doorbell */
3372	wmb();
3373
3374	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
3375	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
3376
3377	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
3378	dev_kfree_skb(skb);
3379	return rc;
3380}
3381
3382static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3383{
3384	struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
3385	struct hwrm_selftest_exec_input req = {0};
3386	int rc;
3387
3388	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
3389	mutex_lock(&bp->hwrm_cmd_lock);
3390	resp->test_success = 0;
3391	req.flags = test_mask;
3392	rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
 
 
 
 
3393	*test_results = resp->test_success;
3394	mutex_unlock(&bp->hwrm_cmd_lock);
3395	return rc;
3396}
3397
3398#define BNXT_DRV_TESTS			4
3399#define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
3400#define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
3401#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
3402#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
3403
3404static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3405			   u64 *buf)
3406{
3407	struct bnxt *bp = netdev_priv(dev);
3408	bool do_ext_lpbk = false;
3409	bool offline = false;
3410	u8 test_results = 0;
3411	u8 test_mask = 0;
3412	int rc = 0, i;
3413
3414	if (!bp->num_tests || !BNXT_PF(bp))
3415		return;
3416	memset(buf, 0, sizeof(u64) * bp->num_tests);
3417	if (!netif_running(dev)) {
3418		etest->flags |= ETH_TEST_FL_FAILED;
3419		return;
3420	}
3421
3422	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3423	    (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
3424		do_ext_lpbk = true;
3425
3426	if (etest->flags & ETH_TEST_FL_OFFLINE) {
3427		if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
3428			etest->flags |= ETH_TEST_FL_FAILED;
3429			netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
3430			return;
3431		}
3432		offline = true;
3433	}
3434
3435	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3436		u8 bit_val = 1 << i;
3437
3438		if (!(bp->test_info->offline_mask & bit_val))
3439			test_mask |= bit_val;
3440		else if (offline)
3441			test_mask |= bit_val;
3442	}
3443	if (!offline) {
3444		bnxt_run_fw_tests(bp, test_mask, &test_results);
3445	} else {
3446		rc = bnxt_close_nic(bp, false, false);
3447		if (rc)
3448			return;
3449		bnxt_run_fw_tests(bp, test_mask, &test_results);
3450
3451		buf[BNXT_MACLPBK_TEST_IDX] = 1;
3452		bnxt_hwrm_mac_loopback(bp, true);
3453		msleep(250);
3454		rc = bnxt_half_open_nic(bp);
3455		if (rc) {
3456			bnxt_hwrm_mac_loopback(bp, false);
3457			etest->flags |= ETH_TEST_FL_FAILED;
 
3458			return;
3459		}
3460		if (bnxt_run_loopback(bp))
3461			etest->flags |= ETH_TEST_FL_FAILED;
3462		else
3463			buf[BNXT_MACLPBK_TEST_IDX] = 0;
3464
3465		bnxt_hwrm_mac_loopback(bp, false);
3466		bnxt_hwrm_phy_loopback(bp, true, false);
3467		msleep(1000);
3468		if (bnxt_run_loopback(bp)) {
3469			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3470			etest->flags |= ETH_TEST_FL_FAILED;
3471		}
3472		if (do_ext_lpbk) {
3473			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3474			bnxt_hwrm_phy_loopback(bp, true, true);
3475			msleep(1000);
3476			if (bnxt_run_loopback(bp)) {
3477				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3478				etest->flags |= ETH_TEST_FL_FAILED;
3479			}
3480		}
3481		bnxt_hwrm_phy_loopback(bp, false, false);
3482		bnxt_half_close_nic(bp);
3483		rc = bnxt_open_nic(bp, false, true);
 
3484	}
3485	if (rc || bnxt_test_irq(bp)) {
3486		buf[BNXT_IRQ_TEST_IDX] = 1;
3487		etest->flags |= ETH_TEST_FL_FAILED;
3488	}
3489	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3490		u8 bit_val = 1 << i;
3491
3492		if ((test_mask & bit_val) && !(test_results & bit_val)) {
3493			buf[i] = 1;
3494			etest->flags |= ETH_TEST_FL_FAILED;
3495		}
3496	}
3497}
3498
3499static int bnxt_reset(struct net_device *dev, u32 *flags)
3500{
3501	struct bnxt *bp = netdev_priv(dev);
3502	bool reload = false;
3503	u32 req = *flags;
3504
3505	if (!req)
3506		return -EINVAL;
3507
3508	if (!BNXT_PF(bp)) {
3509		netdev_err(dev, "Reset is not supported from a VF\n");
3510		return -EOPNOTSUPP;
3511	}
3512
3513	if (pci_vfs_assigned(bp->pdev) &&
3514	    !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
3515		netdev_err(dev,
3516			   "Reset not allowed when VFs are assigned to VMs\n");
3517		return -EBUSY;
3518	}
3519
3520	if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
3521		/* This feature is not supported in older firmware versions */
3522		if (bp->hwrm_spec_code >= 0x10803) {
3523			if (!bnxt_firmware_reset_chip(dev)) {
3524				netdev_info(dev, "Firmware reset request successful.\n");
3525				if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
3526					reload = true;
3527				*flags &= ~BNXT_FW_RESET_CHIP;
3528			}
3529		} else if (req == BNXT_FW_RESET_CHIP) {
3530			return -EOPNOTSUPP; /* only request, fail hard */
3531		}
3532	}
3533
3534	if (req & BNXT_FW_RESET_AP) {
3535		/* This feature is not supported in older firmware versions */
3536		if (bp->hwrm_spec_code >= 0x10803) {
3537			if (!bnxt_firmware_reset_ap(dev)) {
3538				netdev_info(dev, "Reset application processor successful.\n");
3539				reload = true;
3540				*flags &= ~BNXT_FW_RESET_AP;
3541			}
3542		} else if (req == BNXT_FW_RESET_AP) {
3543			return -EOPNOTSUPP; /* only request, fail hard */
3544		}
3545	}
3546
3547	if (reload)
3548		netdev_info(dev, "Reload driver to complete reset\n");
3549
3550	return 0;
3551}
3552
3553static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
3554				  struct bnxt_hwrm_dbg_dma_info *info)
3555{
3556	struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
3557	struct hwrm_dbg_cmn_input *cmn_req = msg;
3558	__le16 *seq_ptr = msg + info->seq_off;
3559	u16 seq = 0, len, segs_off;
3560	void *resp = cmn_resp;
3561	dma_addr_t dma_handle;
3562	int rc, off = 0;
3563	void *dma_buf;
3564
3565	dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
3566				     GFP_KERNEL);
3567	if (!dma_buf)
3568		return -ENOMEM;
3569
3570	segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
3571			    total_segments);
3572	cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
3573	cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
3574	mutex_lock(&bp->hwrm_cmd_lock);
3575	while (1) {
3576		*seq_ptr = cpu_to_le16(seq);
3577		rc = _hwrm_send_message(bp, msg, msg_len,
3578					HWRM_COREDUMP_TIMEOUT);
3579		if (rc)
3580			break;
3581
3582		len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
3583		if (!seq &&
3584		    cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
3585			info->segs = le16_to_cpu(*((__le16 *)(resp +
3586							      segs_off)));
3587			if (!info->segs) {
3588				rc = -EIO;
3589				break;
3590			}
3591
3592			info->dest_buf_size = info->segs *
3593					sizeof(struct coredump_segment_record);
3594			info->dest_buf = kmalloc(info->dest_buf_size,
3595						 GFP_KERNEL);
3596			if (!info->dest_buf) {
3597				rc = -ENOMEM;
3598				break;
3599			}
3600		}
3601
3602		if (info->dest_buf) {
3603			if ((info->seg_start + off + len) <=
3604			    BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
3605				memcpy(info->dest_buf + off, dma_buf, len);
3606			} else {
3607				rc = -ENOBUFS;
3608				break;
3609			}
3610		}
3611
3612		if (cmn_req->req_type ==
3613				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
3614			info->dest_buf_size += len;
3615
3616		if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
3617			break;
3618
3619		seq++;
3620		off += len;
3621	}
3622	mutex_unlock(&bp->hwrm_cmd_lock);
3623	dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
3624	return rc;
3625}
3626
3627static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
3628				       struct bnxt_coredump *coredump)
3629{
3630	struct hwrm_dbg_coredump_list_input req = {0};
3631	struct bnxt_hwrm_dbg_dma_info info = {NULL};
3632	int rc;
3633
3634	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
3635
3636	info.dma_len = COREDUMP_LIST_BUF_LEN;
3637	info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
3638	info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
3639				     data_len);
3640
3641	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3642	if (!rc) {
3643		coredump->data = info.dest_buf;
3644		coredump->data_size = info.dest_buf_size;
3645		coredump->total_segs = info.segs;
3646	}
3647	return rc;
3648}
3649
3650static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
3651					   u16 segment_id)
3652{
3653	struct hwrm_dbg_coredump_initiate_input req = {0};
3654
3655	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
3656	req.component_id = cpu_to_le16(component_id);
3657	req.segment_id = cpu_to_le16(segment_id);
3658
3659	return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
3660}
3661
3662static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
3663					   u16 segment_id, u32 *seg_len,
3664					   void *buf, u32 buf_len, u32 offset)
3665{
3666	struct hwrm_dbg_coredump_retrieve_input req = {0};
3667	struct bnxt_hwrm_dbg_dma_info info = {NULL};
3668	int rc;
3669
3670	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
3671	req.component_id = cpu_to_le16(component_id);
3672	req.segment_id = cpu_to_le16(segment_id);
3673
3674	info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
3675	info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
3676				seq_no);
3677	info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
3678				     data_len);
3679	if (buf) {
3680		info.dest_buf = buf + offset;
3681		info.buf_len = buf_len;
3682		info.seg_start = offset;
3683	}
3684
3685	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3686	if (!rc)
3687		*seg_len = info.dest_buf_size;
3688
3689	return rc;
3690}
3691
3692static void
3693bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
3694			   struct bnxt_coredump_segment_hdr *seg_hdr,
3695			   struct coredump_segment_record *seg_rec, u32 seg_len,
3696			   int status, u32 duration, u32 instance)
3697{
3698	memset(seg_hdr, 0, sizeof(*seg_hdr));
3699	memcpy(seg_hdr->signature, "sEgM", 4);
3700	if (seg_rec) {
3701		seg_hdr->component_id = (__force __le32)seg_rec->component_id;
3702		seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
3703		seg_hdr->low_version = seg_rec->version_low;
3704		seg_hdr->high_version = seg_rec->version_hi;
3705	} else {
3706		/* For hwrm_ver_get response Component id = 2
3707		 * and Segment id = 0
3708		 */
3709		seg_hdr->component_id = cpu_to_le32(2);
3710		seg_hdr->segment_id = 0;
3711	}
3712	seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
3713	seg_hdr->length = cpu_to_le32(seg_len);
3714	seg_hdr->status = cpu_to_le32(status);
3715	seg_hdr->duration = cpu_to_le32(duration);
3716	seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
3717	seg_hdr->instance = cpu_to_le32(instance);
3718}
3719
3720static void
3721bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
3722			  time64_t start, s16 start_utc, u16 total_segs,
3723			  int status)
3724{
3725	time64_t end = ktime_get_real_seconds();
3726	u32 os_ver_major = 0, os_ver_minor = 0;
3727	struct tm tm;
3728
3729	time64_to_tm(start, 0, &tm);
3730	memset(record, 0, sizeof(*record));
3731	memcpy(record->signature, "cOrE", 4);
3732	record->flags = 0;
3733	record->low_version = 0;
3734	record->high_version = 1;
3735	record->asic_state = 0;
3736	strlcpy(record->system_name, utsname()->nodename,
3737		sizeof(record->system_name));
3738	record->year = cpu_to_le16(tm.tm_year + 1900);
3739	record->month = cpu_to_le16(tm.tm_mon + 1);
3740	record->day = cpu_to_le16(tm.tm_mday);
3741	record->hour = cpu_to_le16(tm.tm_hour);
3742	record->minute = cpu_to_le16(tm.tm_min);
3743	record->second = cpu_to_le16(tm.tm_sec);
3744	record->utc_bias = cpu_to_le16(start_utc);
3745	strcpy(record->commandline, "ethtool -w");
3746	record->total_segments = cpu_to_le32(total_segs);
3747
3748	sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
3749	record->os_ver_major = cpu_to_le32(os_ver_major);
3750	record->os_ver_minor = cpu_to_le32(os_ver_minor);
3751
3752	strlcpy(record->os_name, utsname()->sysname, 32);
3753	time64_to_tm(end, 0, &tm);
3754	record->end_year = cpu_to_le16(tm.tm_year + 1900);
3755	record->end_month = cpu_to_le16(tm.tm_mon + 1);
3756	record->end_day = cpu_to_le16(tm.tm_mday);
3757	record->end_hour = cpu_to_le16(tm.tm_hour);
3758	record->end_minute = cpu_to_le16(tm.tm_min);
3759	record->end_second = cpu_to_le16(tm.tm_sec);
3760	record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
3761	record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
3762				       bp->ver_resp.chip_rev << 8 |
3763				       bp->ver_resp.chip_metal);
3764	record->asic_id2 = 0;
3765	record->coredump_status = cpu_to_le32(status);
3766	record->ioctl_low_version = 0;
3767	record->ioctl_high_version = 0;
3768}
3769
3770static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
3771{
3772	u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
3773	u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
3774	struct coredump_segment_record *seg_record = NULL;
3775	struct bnxt_coredump_segment_hdr seg_hdr;
3776	struct bnxt_coredump coredump = {NULL};
3777	time64_t start_time;
3778	u16 start_utc;
3779	int rc = 0, i;
3780
3781	if (buf)
3782		buf_len = *dump_len;
3783
3784	start_time = ktime_get_real_seconds();
3785	start_utc = sys_tz.tz_minuteswest * 60;
3786	seg_hdr_len = sizeof(seg_hdr);
3787
3788	/* First segment should be hwrm_ver_get response */
3789	*dump_len = seg_hdr_len + ver_get_resp_len;
3790	if (buf) {
3791		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
3792					   0, 0, 0);
3793		memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3794		offset += seg_hdr_len;
3795		memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
3796		offset += ver_get_resp_len;
3797	}
3798
3799	rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
3800	if (rc) {
3801		netdev_err(bp->dev, "Failed to get coredump segment list\n");
3802		goto err;
3803	}
3804
3805	*dump_len += seg_hdr_len * coredump.total_segs;
3806
3807	seg_record = (struct coredump_segment_record *)coredump.data;
3808	seg_record_len = sizeof(*seg_record);
3809
3810	for (i = 0; i < coredump.total_segs; i++) {
3811		u16 comp_id = le16_to_cpu(seg_record->component_id);
3812		u16 seg_id = le16_to_cpu(seg_record->segment_id);
3813		u32 duration = 0, seg_len = 0;
3814		unsigned long start, end;
3815
3816		if (buf && ((offset + seg_hdr_len) >
3817			    BNXT_COREDUMP_BUF_LEN(buf_len))) {
3818			rc = -ENOBUFS;
3819			goto err;
3820		}
3821
3822		start = jiffies;
3823
3824		rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
3825		if (rc) {
3826			netdev_err(bp->dev,
3827				   "Failed to initiate coredump for seg = %d\n",
3828				   seg_record->segment_id);
3829			goto next_seg;
3830		}
3831
3832		/* Write segment data into the buffer */
3833		rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
3834						     &seg_len, buf, buf_len,
3835						     offset + seg_hdr_len);
3836		if (rc && rc == -ENOBUFS)
3837			goto err;
3838		else if (rc)
3839			netdev_err(bp->dev,
3840				   "Failed to retrieve coredump for seg = %d\n",
3841				   seg_record->segment_id);
3842
3843next_seg:
3844		end = jiffies;
3845		duration = jiffies_to_msecs(end - start);
3846		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
3847					   rc, duration, 0);
3848
3849		if (buf) {
3850			/* Write segment header into the buffer */
3851			memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3852			offset += seg_hdr_len + seg_len;
3853		}
3854
3855		*dump_len += seg_len;
3856		seg_record =
3857			(struct coredump_segment_record *)((u8 *)seg_record +
3858							   seg_record_len);
3859	}
3860
3861err:
3862	if (buf)
3863		bnxt_fill_coredump_record(bp, buf + offset, start_time,
3864					  start_utc, coredump.total_segs + 1,
3865					  rc);
3866	kfree(coredump.data);
3867	*dump_len += sizeof(struct bnxt_coredump_record);
3868	if (rc == -ENOBUFS)
3869		netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
3870	return rc;
3871}
3872
3873static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
3874{
3875	struct bnxt *bp = netdev_priv(dev);
3876
3877	if (dump->flag > BNXT_DUMP_CRASH) {
3878		netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
3879		return -EINVAL;
3880	}
3881
3882	if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
3883		netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3884		return -EOPNOTSUPP;
3885	}
3886
3887	bp->dump_flag = dump->flag;
3888	return 0;
3889}
3890
3891static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3892{
3893	struct bnxt *bp = netdev_priv(dev);
3894
3895	if (bp->hwrm_spec_code < 0x10801)
3896		return -EOPNOTSUPP;
3897
3898	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3899			bp->ver_resp.hwrm_fw_min_8b << 16 |
3900			bp->ver_resp.hwrm_fw_bld_8b << 8 |
3901			bp->ver_resp.hwrm_fw_rsvd_8b;
3902
3903	dump->flag = bp->dump_flag;
3904	if (bp->dump_flag == BNXT_DUMP_CRASH)
3905		dump->len = BNXT_CRASH_DUMP_LEN;
3906	else
3907		bnxt_get_coredump(bp, NULL, &dump->len);
3908	return 0;
3909}
3910
3911static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3912			      void *buf)
3913{
3914	struct bnxt *bp = netdev_priv(dev);
3915
3916	if (bp->hwrm_spec_code < 0x10801)
3917		return -EOPNOTSUPP;
3918
3919	memset(buf, 0, dump->len);
3920
3921	dump->flag = bp->dump_flag;
3922	if (dump->flag == BNXT_DUMP_CRASH) {
3923#ifdef CONFIG_TEE_BNXT_FW
3924		return tee_bnxt_copy_coredump(buf, 0, dump->len);
3925#endif
3926	} else {
3927		return bnxt_get_coredump(bp, buf, &dump->len);
3928	}
3929
3930	return 0;
3931}
3932
3933static int bnxt_get_ts_info(struct net_device *dev,
3934			    struct ethtool_ts_info *info)
3935{
3936	struct bnxt *bp = netdev_priv(dev);
3937	struct bnxt_ptp_cfg *ptp;
3938
3939	ptp = bp->ptp_cfg;
3940	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3941				SOF_TIMESTAMPING_RX_SOFTWARE |
3942				SOF_TIMESTAMPING_SOFTWARE;
3943
3944	info->phc_index = -1;
3945	if (!ptp)
3946		return 0;
3947
3948	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
3949				 SOF_TIMESTAMPING_RX_HARDWARE |
3950				 SOF_TIMESTAMPING_RAW_HARDWARE;
3951	if (ptp->ptp_clock)
3952		info->phc_index = ptp_clock_index(ptp->ptp_clock);
3953
3954	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
3955
3956	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3957			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
3958			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
 
 
 
3959	return 0;
3960}
3961
3962void bnxt_ethtool_init(struct bnxt *bp)
3963{
3964	struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
3965	struct hwrm_selftest_qlist_input req = {0};
3966	struct bnxt_test_info *test_info;
3967	struct net_device *dev = bp->dev;
3968	int i, rc;
3969
3970	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3971		bnxt_get_pkgver(dev);
3972
3973	bp->num_tests = 0;
3974	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
3975		return;
3976
3977	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
3978	mutex_lock(&bp->hwrm_cmd_lock);
3979	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3980	if (rc)
3981		goto ethtool_init_exit;
3982
3983	test_info = bp->test_info;
3984	if (!test_info)
3985		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3986	if (!test_info)
 
 
 
 
 
 
 
 
 
 
3987		goto ethtool_init_exit;
3988
3989	bp->test_info = test_info;
3990	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3991	if (bp->num_tests > BNXT_MAX_TEST)
3992		bp->num_tests = BNXT_MAX_TEST;
3993
3994	test_info->offline_mask = resp->offline_tests;
3995	test_info->timeout = le16_to_cpu(resp->test_timeout);
3996	if (!test_info->timeout)
3997		test_info->timeout = HWRM_CMD_TIMEOUT;
3998	for (i = 0; i < bp->num_tests; i++) {
3999		char *str = test_info->string[i];
4000		char *fw_str = resp->test0_name + i * 32;
4001
4002		if (i == BNXT_MACLPBK_TEST_IDX) {
4003			strcpy(str, "Mac loopback test (offline)");
4004		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
4005			strcpy(str, "Phy loopback test (offline)");
4006		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
4007			strcpy(str, "Ext loopback test (offline)");
4008		} else if (i == BNXT_IRQ_TEST_IDX) {
4009			strcpy(str, "Interrupt_test (offline)");
4010		} else {
4011			strlcpy(str, fw_str, ETH_GSTRING_LEN);
4012			strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
4013			if (test_info->offline_mask & (1 << i))
4014				strncat(str, " (offline)",
4015					ETH_GSTRING_LEN - strlen(str));
4016			else
4017				strncat(str, " (online)",
4018					ETH_GSTRING_LEN - strlen(str));
4019		}
4020	}
4021
4022ethtool_init_exit:
4023	mutex_unlock(&bp->hwrm_cmd_lock);
4024}
4025
4026static void bnxt_get_eth_phy_stats(struct net_device *dev,
4027				   struct ethtool_eth_phy_stats *phy_stats)
4028{
4029	struct bnxt *bp = netdev_priv(dev);
4030	u64 *rx;
4031
4032	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4033		return;
4034
4035	rx = bp->rx_port_stats_ext.sw_stats;
4036	phy_stats->SymbolErrorDuringCarrier =
4037		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4038}
4039
4040static void bnxt_get_eth_mac_stats(struct net_device *dev,
4041				   struct ethtool_eth_mac_stats *mac_stats)
4042{
4043	struct bnxt *bp = netdev_priv(dev);
4044	u64 *rx, *tx;
4045
4046	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4047		return;
4048
4049	rx = bp->port_stats.sw_stats;
4050	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4051
4052	mac_stats->FramesReceivedOK =
4053		BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4054	mac_stats->FramesTransmittedOK =
4055		BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4056	mac_stats->FrameCheckSequenceErrors =
4057		BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4058	mac_stats->AlignmentErrors =
4059		BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4060	mac_stats->OutOfRangeLengthField =
4061		BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4062}
4063
4064static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4065				    struct ethtool_eth_ctrl_stats *ctrl_stats)
4066{
4067	struct bnxt *bp = netdev_priv(dev);
4068	u64 *rx;
4069
4070	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4071		return;
4072
4073	rx = bp->port_stats.sw_stats;
4074	ctrl_stats->MACControlFramesReceived =
4075		BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4076}
4077
4078static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4079	{    0,    64 },
4080	{   65,   127 },
4081	{  128,   255 },
4082	{  256,   511 },
4083	{  512,  1023 },
4084	{ 1024,  1518 },
4085	{ 1519,  2047 },
4086	{ 2048,  4095 },
4087	{ 4096,  9216 },
4088	{ 9217, 16383 },
4089	{}
4090};
4091
4092static void bnxt_get_rmon_stats(struct net_device *dev,
4093				struct ethtool_rmon_stats *rmon_stats,
4094				const struct ethtool_rmon_hist_range **ranges)
4095{
4096	struct bnxt *bp = netdev_priv(dev);
4097	u64 *rx, *tx;
4098
4099	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4100		return;
4101
4102	rx = bp->port_stats.sw_stats;
4103	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4104
4105	rmon_stats->jabbers =
4106		BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4107	rmon_stats->oversize_pkts =
4108		BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4109	rmon_stats->undersize_pkts =
4110		BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4111
4112	rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4113	rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4114	rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4115	rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4116	rmon_stats->hist[4] =
4117		BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4118	rmon_stats->hist[5] =
4119		BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4120	rmon_stats->hist[6] =
4121		BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4122	rmon_stats->hist[7] =
4123		BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4124	rmon_stats->hist[8] =
4125		BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4126	rmon_stats->hist[9] =
4127		BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4128
4129	rmon_stats->hist_tx[0] =
4130		BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4131	rmon_stats->hist_tx[1] =
4132		BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4133	rmon_stats->hist_tx[2] =
4134		BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4135	rmon_stats->hist_tx[3] =
4136		BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4137	rmon_stats->hist_tx[4] =
4138		BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4139	rmon_stats->hist_tx[5] =
4140		BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4141	rmon_stats->hist_tx[6] =
4142		BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4143	rmon_stats->hist_tx[7] =
4144		BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4145	rmon_stats->hist_tx[8] =
4146		BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4147	rmon_stats->hist_tx[9] =
4148		BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4149
4150	*ranges = bnxt_rmon_ranges;
4151}
4152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4153void bnxt_ethtool_free(struct bnxt *bp)
4154{
4155	kfree(bp->test_info);
4156	bp->test_info = NULL;
4157}
4158
4159const struct ethtool_ops bnxt_ethtool_ops = {
 
4160	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4161				     ETHTOOL_COALESCE_MAX_FRAMES |
4162				     ETHTOOL_COALESCE_USECS_IRQ |
4163				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4164				     ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4165				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
 
4166	.get_link_ksettings	= bnxt_get_link_ksettings,
4167	.set_link_ksettings	= bnxt_set_link_ksettings,
4168	.get_fec_stats		= bnxt_get_fec_stats,
4169	.get_fecparam		= bnxt_get_fecparam,
4170	.set_fecparam		= bnxt_set_fecparam,
4171	.get_pause_stats	= bnxt_get_pause_stats,
4172	.get_pauseparam		= bnxt_get_pauseparam,
4173	.set_pauseparam		= bnxt_set_pauseparam,
4174	.get_drvinfo		= bnxt_get_drvinfo,
4175	.get_regs_len		= bnxt_get_regs_len,
4176	.get_regs		= bnxt_get_regs,
4177	.get_wol		= bnxt_get_wol,
4178	.set_wol		= bnxt_set_wol,
4179	.get_coalesce		= bnxt_get_coalesce,
4180	.set_coalesce		= bnxt_set_coalesce,
4181	.get_msglevel		= bnxt_get_msglevel,
4182	.set_msglevel		= bnxt_set_msglevel,
4183	.get_sset_count		= bnxt_get_sset_count,
4184	.get_strings		= bnxt_get_strings,
4185	.get_ethtool_stats	= bnxt_get_ethtool_stats,
4186	.set_ringparam		= bnxt_set_ringparam,
4187	.get_ringparam		= bnxt_get_ringparam,
4188	.get_channels		= bnxt_get_channels,
4189	.set_channels		= bnxt_set_channels,
4190	.get_rxnfc		= bnxt_get_rxnfc,
4191	.set_rxnfc		= bnxt_set_rxnfc,
4192	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4193	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4194	.get_rxfh               = bnxt_get_rxfh,
4195	.set_rxfh		= bnxt_set_rxfh,
4196	.flash_device		= bnxt_flash_device,
4197	.get_eeprom_len         = bnxt_get_eeprom_len,
4198	.get_eeprom             = bnxt_get_eeprom,
4199	.set_eeprom		= bnxt_set_eeprom,
4200	.get_link		= bnxt_get_link,
 
4201	.get_eee		= bnxt_get_eee,
4202	.set_eee		= bnxt_set_eee,
4203	.get_module_info	= bnxt_get_module_info,
4204	.get_module_eeprom	= bnxt_get_module_eeprom,
 
4205	.nway_reset		= bnxt_nway_reset,
4206	.set_phys_id		= bnxt_set_phys_id,
4207	.self_test		= bnxt_self_test,
4208	.get_ts_info		= bnxt_get_ts_info,
4209	.reset			= bnxt_reset,
4210	.set_dump		= bnxt_set_dump,
4211	.get_dump_flag		= bnxt_get_dump_flag,
4212	.get_dump_data		= bnxt_get_dump_data,
4213	.get_eth_phy_stats	= bnxt_get_eth_phy_stats,
4214	.get_eth_mac_stats	= bnxt_get_eth_mac_stats,
4215	.get_eth_ctrl_stats	= bnxt_get_eth_ctrl_stats,
4216	.get_rmon_stats		= bnxt_get_rmon_stats,
4217};