Linux Audio

Check our new training course

Loading...
v6.8
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2017 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/ctype.h>
  13#include <linux/stringify.h>
  14#include <linux/ethtool.h>
  15#include <linux/ethtool_netlink.h>
  16#include <linux/linkmode.h>
  17#include <linux/interrupt.h>
  18#include <linux/pci.h>
  19#include <linux/etherdevice.h>
  20#include <linux/crc32.h>
  21#include <linux/firmware.h>
  22#include <linux/utsname.h>
  23#include <linux/time.h>
  24#include <linux/ptp_clock_kernel.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/timecounter.h>
  27#include <net/netlink.h>
  28#include "bnxt_hsi.h"
  29#include "bnxt.h"
  30#include "bnxt_hwrm.h"
  31#include "bnxt_ulp.h"
  32#include "bnxt_xdp.h"
  33#include "bnxt_ptp.h"
  34#include "bnxt_ethtool.h"
  35#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
  36#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
  37#include "bnxt_coredump.h"
  38
  39#define BNXT_NVM_ERR_MSG(dev, extack, msg)			\
  40	do {							\
  41		if (extack)					\
  42			NL_SET_ERR_MSG_MOD(extack, msg);	\
  43		netdev_err(dev, "%s\n", msg);			\
  44	} while (0)
  45
  46static u32 bnxt_get_msglevel(struct net_device *dev)
  47{
  48	struct bnxt *bp = netdev_priv(dev);
  49
  50	return bp->msg_enable;
  51}
  52
  53static void bnxt_set_msglevel(struct net_device *dev, u32 value)
  54{
  55	struct bnxt *bp = netdev_priv(dev);
  56
  57	bp->msg_enable = value;
  58}
  59
  60static int bnxt_get_coalesce(struct net_device *dev,
  61			     struct ethtool_coalesce *coal,
  62			     struct kernel_ethtool_coalesce *kernel_coal,
  63			     struct netlink_ext_ack *extack)
  64{
  65	struct bnxt *bp = netdev_priv(dev);
  66	struct bnxt_coal *hw_coal;
  67	u16 mult;
  68
  69	memset(coal, 0, sizeof(*coal));
  70
  71	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
  72
  73	hw_coal = &bp->rx_coal;
  74	mult = hw_coal->bufs_per_record;
  75	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
  76	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  77	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  78	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  79	if (hw_coal->flags &
  80	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  81		kernel_coal->use_cqe_mode_rx = true;
  82
  83	hw_coal = &bp->tx_coal;
  84	mult = hw_coal->bufs_per_record;
  85	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
  86	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  87	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  88	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  89	if (hw_coal->flags &
  90	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  91		kernel_coal->use_cqe_mode_tx = true;
  92
  93	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
  94
  95	return 0;
  96}
  97
  98static int bnxt_set_coalesce(struct net_device *dev,
  99			     struct ethtool_coalesce *coal,
 100			     struct kernel_ethtool_coalesce *kernel_coal,
 101			     struct netlink_ext_ack *extack)
 102{
 103	struct bnxt *bp = netdev_priv(dev);
 104	bool update_stats = false;
 105	struct bnxt_coal *hw_coal;
 106	int rc = 0;
 107	u16 mult;
 108
 109	if (coal->use_adaptive_rx_coalesce) {
 110		bp->flags |= BNXT_FLAG_DIM;
 111	} else {
 112		if (bp->flags & BNXT_FLAG_DIM) {
 113			bp->flags &= ~(BNXT_FLAG_DIM);
 114			goto reset_coalesce;
 115		}
 116	}
 117
 118	if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
 119	    !(bp->coal_cap.cmpl_params &
 120	      RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
 121		return -EOPNOTSUPP;
 122
 123	hw_coal = &bp->rx_coal;
 124	mult = hw_coal->bufs_per_record;
 125	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
 126	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
 127	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
 128	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
 129	hw_coal->flags &=
 130		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 131	if (kernel_coal->use_cqe_mode_rx)
 132		hw_coal->flags |=
 133			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 134
 135	hw_coal = &bp->tx_coal;
 136	mult = hw_coal->bufs_per_record;
 137	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
 138	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
 139	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
 140	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
 141	hw_coal->flags &=
 142		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 143	if (kernel_coal->use_cqe_mode_tx)
 144		hw_coal->flags |=
 145			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 146
 147	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
 148		u32 stats_ticks = coal->stats_block_coalesce_usecs;
 149
 150		/* Allow 0, which means disable. */
 151		if (stats_ticks)
 152			stats_ticks = clamp_t(u32, stats_ticks,
 153					      BNXT_MIN_STATS_COAL_TICKS,
 154					      BNXT_MAX_STATS_COAL_TICKS);
 155		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
 156		bp->stats_coal_ticks = stats_ticks;
 157		if (bp->stats_coal_ticks)
 158			bp->current_interval =
 159				bp->stats_coal_ticks * HZ / 1000000;
 160		else
 161			bp->current_interval = BNXT_TIMER_INTERVAL;
 162		update_stats = true;
 163	}
 164
 165reset_coalesce:
 166	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
 167		if (update_stats) {
 168			bnxt_close_nic(bp, true, false);
 169			rc = bnxt_open_nic(bp, true, false);
 
 170		} else {
 171			rc = bnxt_hwrm_set_coal(bp);
 172		}
 173	}
 174
 175	return rc;
 176}
 177
 178static const char * const bnxt_ring_rx_stats_str[] = {
 179	"rx_ucast_packets",
 180	"rx_mcast_packets",
 181	"rx_bcast_packets",
 182	"rx_discards",
 183	"rx_errors",
 184	"rx_ucast_bytes",
 185	"rx_mcast_bytes",
 186	"rx_bcast_bytes",
 187};
 188
 189static const char * const bnxt_ring_tx_stats_str[] = {
 190	"tx_ucast_packets",
 191	"tx_mcast_packets",
 192	"tx_bcast_packets",
 193	"tx_errors",
 194	"tx_discards",
 
 195	"tx_ucast_bytes",
 196	"tx_mcast_bytes",
 197	"tx_bcast_bytes",
 198};
 199
 200static const char * const bnxt_ring_tpa_stats_str[] = {
 201	"tpa_packets",
 202	"tpa_bytes",
 203	"tpa_events",
 204	"tpa_aborts",
 205};
 206
 207static const char * const bnxt_ring_tpa2_stats_str[] = {
 208	"rx_tpa_eligible_pkt",
 209	"rx_tpa_eligible_bytes",
 210	"rx_tpa_pkt",
 211	"rx_tpa_bytes",
 212	"rx_tpa_errors",
 213	"rx_tpa_events",
 214};
 215
 216static const char * const bnxt_rx_sw_stats_str[] = {
 217	"rx_l4_csum_errors",
 218	"rx_resets",
 219	"rx_buf_errors",
 220};
 221
 222static const char * const bnxt_cmn_sw_stats_str[] = {
 223	"missed_irqs",
 224};
 225
 226#define BNXT_RX_STATS_ENTRY(counter)	\
 227	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
 228
 229#define BNXT_TX_STATS_ENTRY(counter)	\
 230	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
 231
 232#define BNXT_RX_STATS_EXT_ENTRY(counter)	\
 233	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 234
 235#define BNXT_TX_STATS_EXT_ENTRY(counter)	\
 236	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 237
 238#define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
 239	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
 240	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
 241
 242#define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
 243	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
 244	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
 245
 246#define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
 247	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
 248	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
 249	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
 250	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
 251	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
 252	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
 253	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
 254	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
 255
 256#define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
 257	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
 258	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
 259	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
 260	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
 261	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
 262	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
 263	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
 264	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
 265
 266#define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
 267	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
 268	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
 269
 270#define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
 271	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
 272	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
 273
 274#define BNXT_RX_STATS_EXT_COS_ENTRIES				\
 275	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
 276	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
 277	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
 278	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
 279	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
 280	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
 281	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
 282	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
 283
 284#define BNXT_TX_STATS_EXT_COS_ENTRIES				\
 285	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
 286	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
 287	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
 288	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
 289	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
 290	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
 291	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
 292	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
 293
 294#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
 295	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
 296	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
 297
 298#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
 299	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
 300	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
 301	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
 302	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
 303	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
 304	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
 305	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
 306	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
 307
 308#define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
 309	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
 310	  __stringify(counter##_pri##n) }
 311
 312#define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
 313	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
 314	  __stringify(counter##_pri##n) }
 315
 316#define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
 317	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
 318	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
 319	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
 320	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
 321	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
 322	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
 323	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
 324	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
 325
 326#define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
 327	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
 328	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
 329	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
 330	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
 331	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
 332	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
 333	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
 334	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
 335
 
 
 
 336enum {
 337	RX_TOTAL_DISCARDS,
 338	TX_TOTAL_DISCARDS,
 339	RX_NETPOLL_DISCARDS,
 340};
 341
 342static const char *const bnxt_ring_err_stats_arr[] = {
 343	"rx_total_l4_csum_errors",
 344	"rx_total_resets",
 345	"rx_total_buf_errors",
 346	"rx_total_oom_discards",
 347	"rx_total_netpoll_discards",
 348	"rx_total_ring_discards",
 349	"tx_total_resets",
 350	"tx_total_ring_discards",
 351	"total_missed_irqs",
 352};
 353
 354#define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
 355#define NUM_RING_CMN_SW_STATS		ARRAY_SIZE(bnxt_cmn_sw_stats_str)
 356#define NUM_RING_RX_HW_STATS		ARRAY_SIZE(bnxt_ring_rx_stats_str)
 357#define NUM_RING_TX_HW_STATS		ARRAY_SIZE(bnxt_ring_tx_stats_str)
 358
 359static const struct {
 360	long offset;
 361	char string[ETH_GSTRING_LEN];
 362} bnxt_port_stats_arr[] = {
 363	BNXT_RX_STATS_ENTRY(rx_64b_frames),
 364	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
 365	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
 366	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
 367	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
 368	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
 369	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
 370	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
 371	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
 372	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
 373	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
 374	BNXT_RX_STATS_ENTRY(rx_total_frames),
 375	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
 376	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
 377	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
 378	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
 379	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
 380	BNXT_RX_STATS_ENTRY(rx_pause_frames),
 381	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
 382	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
 383	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
 384	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
 385	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
 386	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
 387	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
 388	BNXT_RX_STATS_ENTRY(rx_good_frames),
 389	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
 390	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
 391	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
 392	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
 393	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
 394	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
 395	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
 396	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
 397	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
 398	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
 399	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
 400	BNXT_RX_STATS_ENTRY(rx_bytes),
 401	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
 402	BNXT_RX_STATS_ENTRY(rx_runt_frames),
 403	BNXT_RX_STATS_ENTRY(rx_stat_discard),
 404	BNXT_RX_STATS_ENTRY(rx_stat_err),
 405
 406	BNXT_TX_STATS_ENTRY(tx_64b_frames),
 407	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
 408	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
 409	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
 410	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
 411	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
 412	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
 413	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
 414	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
 415	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
 416	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
 417	BNXT_TX_STATS_ENTRY(tx_good_frames),
 418	BNXT_TX_STATS_ENTRY(tx_total_frames),
 419	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
 420	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
 421	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
 422	BNXT_TX_STATS_ENTRY(tx_pause_frames),
 423	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
 424	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
 425	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
 426	BNXT_TX_STATS_ENTRY(tx_err),
 427	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
 428	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
 429	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
 430	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
 431	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
 432	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
 433	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
 434	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
 435	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
 436	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
 437	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
 438	BNXT_TX_STATS_ENTRY(tx_total_collisions),
 439	BNXT_TX_STATS_ENTRY(tx_bytes),
 440	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
 441	BNXT_TX_STATS_ENTRY(tx_stat_discard),
 442	BNXT_TX_STATS_ENTRY(tx_stat_error),
 443};
 444
 445static const struct {
 446	long offset;
 447	char string[ETH_GSTRING_LEN];
 448} bnxt_port_stats_ext_arr[] = {
 449	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
 450	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
 451	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
 452	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
 453	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
 454	BNXT_RX_STATS_EXT_COS_ENTRIES,
 455	BNXT_RX_STATS_EXT_PFC_ENTRIES,
 456	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
 457	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
 458	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
 459	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
 460	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
 461	BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
 462	BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
 463	BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
 464};
 465
 466static const struct {
 467	long offset;
 468	char string[ETH_GSTRING_LEN];
 469} bnxt_tx_port_stats_ext_arr[] = {
 470	BNXT_TX_STATS_EXT_COS_ENTRIES,
 471	BNXT_TX_STATS_EXT_PFC_ENTRIES,
 472};
 473
 474static const struct {
 475	long base_off;
 476	char string[ETH_GSTRING_LEN];
 477} bnxt_rx_bytes_pri_arr[] = {
 478	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
 479};
 480
 481static const struct {
 482	long base_off;
 483	char string[ETH_GSTRING_LEN];
 484} bnxt_rx_pkts_pri_arr[] = {
 485	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
 486};
 487
 488static const struct {
 489	long base_off;
 490	char string[ETH_GSTRING_LEN];
 491} bnxt_tx_bytes_pri_arr[] = {
 492	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
 493};
 494
 495static const struct {
 496	long base_off;
 497	char string[ETH_GSTRING_LEN];
 498} bnxt_tx_pkts_pri_arr[] = {
 499	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
 500};
 501
 502#define BNXT_NUM_RING_ERR_STATS	ARRAY_SIZE(bnxt_ring_err_stats_arr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
 504#define BNXT_NUM_STATS_PRI			\
 505	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
 506	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
 507	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
 508	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
 
 509
 510static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
 511{
 512	if (BNXT_SUPPORTS_TPA(bp)) {
 513		if (bp->max_tpa_v2) {
 514			if (BNXT_CHIP_P5(bp))
 515				return BNXT_NUM_TPA_RING_STATS_P5;
 516			return BNXT_NUM_TPA_RING_STATS_P7;
 517		}
 518		return BNXT_NUM_TPA_RING_STATS;
 519	}
 520	return 0;
 521}
 522
 523static int bnxt_get_num_ring_stats(struct bnxt *bp)
 524{
 525	int rx, tx, cmn;
 526
 527	rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
 528	     bnxt_get_num_tpa_ring_stats(bp);
 529	tx = NUM_RING_TX_HW_STATS;
 530	cmn = NUM_RING_CMN_SW_STATS;
 531	return rx * bp->rx_nr_rings +
 532	       tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
 533	       cmn * bp->cp_nr_rings;
 534}
 535
 536static int bnxt_get_num_stats(struct bnxt *bp)
 537{
 538	int num_stats = bnxt_get_num_ring_stats(bp);
 539	int len;
 540
 541	num_stats += BNXT_NUM_RING_ERR_STATS;
 542
 543	if (bp->flags & BNXT_FLAG_PORT_STATS)
 544		num_stats += BNXT_NUM_PORT_STATS;
 545
 546	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 547		len = min_t(int, bp->fw_rx_stats_ext_size,
 548			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 549		num_stats += len;
 550		len = min_t(int, bp->fw_tx_stats_ext_size,
 551			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 552		num_stats += len;
 553		if (bp->pri2cos_valid)
 554			num_stats += BNXT_NUM_STATS_PRI;
 555	}
 556
 
 
 
 557	return num_stats;
 558}
 559
 560static int bnxt_get_sset_count(struct net_device *dev, int sset)
 561{
 562	struct bnxt *bp = netdev_priv(dev);
 563
 564	switch (sset) {
 565	case ETH_SS_STATS:
 566		return bnxt_get_num_stats(bp);
 567	case ETH_SS_TEST:
 568		if (!bp->num_tests)
 569			return -EOPNOTSUPP;
 570		return bp->num_tests;
 571	default:
 572		return -EOPNOTSUPP;
 573	}
 574}
 575
 576static bool is_rx_ring(struct bnxt *bp, int ring_num)
 577{
 578	return ring_num < bp->rx_nr_rings;
 579}
 580
 581static bool is_tx_ring(struct bnxt *bp, int ring_num)
 582{
 583	int tx_base = 0;
 584
 585	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
 586		tx_base = bp->rx_nr_rings;
 587
 588	if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
 589		return true;
 590	return false;
 591}
 592
 593static void bnxt_get_ethtool_stats(struct net_device *dev,
 594				   struct ethtool_stats *stats, u64 *buf)
 595{
 596	struct bnxt_total_ring_err_stats ring_err_stats = {0};
 597	struct bnxt *bp = netdev_priv(dev);
 598	u64 *curr, *prev;
 599	u32 tpa_stats;
 600	u32 i, j = 0;
 
 
 
 601
 602	if (!bp->bnapi) {
 603		j += bnxt_get_num_ring_stats(bp);
 604		goto skip_ring_stats;
 605	}
 606
 607	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
 
 
 608	for (i = 0; i < bp->cp_nr_rings; i++) {
 609		struct bnxt_napi *bnapi = bp->bnapi[i];
 610		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 611		u64 *sw_stats = cpr->stats.sw_stats;
 612		u64 *sw;
 613		int k;
 614
 615		if (is_rx_ring(bp, i)) {
 616			for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
 617				buf[j] = sw_stats[k];
 618		}
 619		if (is_tx_ring(bp, i)) {
 620			k = NUM_RING_RX_HW_STATS;
 621			for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 622			       j++, k++)
 623				buf[j] = sw_stats[k];
 624		}
 625		if (!tpa_stats || !is_rx_ring(bp, i))
 626			goto skip_tpa_ring_stats;
 627
 628		k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 629		for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
 630			   tpa_stats; j++, k++)
 631			buf[j] = sw_stats[k];
 632
 633skip_tpa_ring_stats:
 634		sw = (u64 *)&cpr->sw_stats.rx;
 635		if (is_rx_ring(bp, i)) {
 636			for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
 637				buf[j] = sw[k];
 638		}
 639
 640		sw = (u64 *)&cpr->sw_stats.cmn;
 641		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
 642			buf[j] = sw[k];
 643	}
 644
 645	bnxt_get_ring_err_stats(bp, &ring_err_stats);
 
 646
 647skip_ring_stats:
 648	curr = &ring_err_stats.rx_total_l4_csum_errors;
 649	prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
 650	for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
 651		buf[j] = *curr + *prev;
 652
 653	if (bp->flags & BNXT_FLAG_PORT_STATS) {
 654		u64 *port_stats = bp->port_stats.sw_stats;
 655
 656		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
 657			buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
 
 
 658	}
 659	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 660		u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
 661		u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
 662		u32 len;
 663
 664		len = min_t(u32, bp->fw_rx_stats_ext_size,
 665			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 666		for (i = 0; i < len; i++, j++) {
 667			buf[j] = *(rx_port_stats_ext +
 668				   bnxt_port_stats_ext_arr[i].offset);
 669		}
 670		len = min_t(u32, bp->fw_tx_stats_ext_size,
 671			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 672		for (i = 0; i < len; i++, j++) {
 673			buf[j] = *(tx_port_stats_ext +
 674				   bnxt_tx_port_stats_ext_arr[i].offset);
 675		}
 676		if (bp->pri2cos_valid) {
 677			for (i = 0; i < 8; i++, j++) {
 678				long n = bnxt_rx_bytes_pri_arr[i].base_off +
 679					 bp->pri2cos_idx[i];
 680
 681				buf[j] = *(rx_port_stats_ext + n);
 682			}
 683			for (i = 0; i < 8; i++, j++) {
 684				long n = bnxt_rx_pkts_pri_arr[i].base_off +
 685					 bp->pri2cos_idx[i];
 686
 687				buf[j] = *(rx_port_stats_ext + n);
 688			}
 689			for (i = 0; i < 8; i++, j++) {
 690				long n = bnxt_tx_bytes_pri_arr[i].base_off +
 691					 bp->pri2cos_idx[i];
 692
 693				buf[j] = *(tx_port_stats_ext + n);
 694			}
 695			for (i = 0; i < 8; i++, j++) {
 696				long n = bnxt_tx_pkts_pri_arr[i].base_off +
 697					 bp->pri2cos_idx[i];
 698
 699				buf[j] = *(tx_port_stats_ext + n);
 700			}
 701		}
 702	}
 
 
 
 
 
 
 
 
 703}
 704
 705static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 706{
 707	struct bnxt *bp = netdev_priv(dev);
 708	static const char * const *str;
 709	u32 i, j, num_str;
 710
 711	switch (stringset) {
 712	case ETH_SS_STATS:
 713		for (i = 0; i < bp->cp_nr_rings; i++) {
 714			if (is_rx_ring(bp, i)) {
 715				num_str = NUM_RING_RX_HW_STATS;
 716				for (j = 0; j < num_str; j++) {
 717					sprintf(buf, "[%d]: %s", i,
 718						bnxt_ring_rx_stats_str[j]);
 719					buf += ETH_GSTRING_LEN;
 720				}
 721			}
 722			if (is_tx_ring(bp, i)) {
 723				num_str = NUM_RING_TX_HW_STATS;
 724				for (j = 0; j < num_str; j++) {
 725					sprintf(buf, "[%d]: %s", i,
 726						bnxt_ring_tx_stats_str[j]);
 727					buf += ETH_GSTRING_LEN;
 728				}
 729			}
 730			num_str = bnxt_get_num_tpa_ring_stats(bp);
 731			if (!num_str || !is_rx_ring(bp, i))
 732				goto skip_tpa_stats;
 733
 734			if (bp->max_tpa_v2)
 
 735				str = bnxt_ring_tpa2_stats_str;
 736			else
 
 737				str = bnxt_ring_tpa_stats_str;
 738
 739			for (j = 0; j < num_str; j++) {
 740				sprintf(buf, "[%d]: %s", i, str[j]);
 741				buf += ETH_GSTRING_LEN;
 742			}
 743skip_tpa_stats:
 744			if (is_rx_ring(bp, i)) {
 745				num_str = NUM_RING_RX_SW_STATS;
 746				for (j = 0; j < num_str; j++) {
 747					sprintf(buf, "[%d]: %s", i,
 748						bnxt_rx_sw_stats_str[j]);
 749					buf += ETH_GSTRING_LEN;
 750				}
 751			}
 752			num_str = NUM_RING_CMN_SW_STATS;
 753			for (j = 0; j < num_str; j++) {
 754				sprintf(buf, "[%d]: %s", i,
 755					bnxt_cmn_sw_stats_str[j]);
 756				buf += ETH_GSTRING_LEN;
 757			}
 758		}
 759		for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
 760			strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
 761			buf += ETH_GSTRING_LEN;
 762		}
 763
 764		if (bp->flags & BNXT_FLAG_PORT_STATS) {
 765			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
 766				strcpy(buf, bnxt_port_stats_arr[i].string);
 767				buf += ETH_GSTRING_LEN;
 768			}
 769		}
 770		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 771			u32 len;
 772
 773			len = min_t(u32, bp->fw_rx_stats_ext_size,
 774				    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 775			for (i = 0; i < len; i++) {
 776				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
 777				buf += ETH_GSTRING_LEN;
 778			}
 779			len = min_t(u32, bp->fw_tx_stats_ext_size,
 780				    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 781			for (i = 0; i < len; i++) {
 782				strcpy(buf,
 783				       bnxt_tx_port_stats_ext_arr[i].string);
 784				buf += ETH_GSTRING_LEN;
 785			}
 786			if (bp->pri2cos_valid) {
 787				for (i = 0; i < 8; i++) {
 788					strcpy(buf,
 789					       bnxt_rx_bytes_pri_arr[i].string);
 790					buf += ETH_GSTRING_LEN;
 791				}
 792				for (i = 0; i < 8; i++) {
 793					strcpy(buf,
 794					       bnxt_rx_pkts_pri_arr[i].string);
 795					buf += ETH_GSTRING_LEN;
 796				}
 797				for (i = 0; i < 8; i++) {
 798					strcpy(buf,
 799					       bnxt_tx_bytes_pri_arr[i].string);
 800					buf += ETH_GSTRING_LEN;
 801				}
 802				for (i = 0; i < 8; i++) {
 803					strcpy(buf,
 804					       bnxt_tx_pkts_pri_arr[i].string);
 805					buf += ETH_GSTRING_LEN;
 806				}
 807			}
 808		}
 
 
 
 
 
 
 809		break;
 810	case ETH_SS_TEST:
 811		if (bp->num_tests)
 812			memcpy(buf, bp->test_info->string,
 813			       bp->num_tests * ETH_GSTRING_LEN);
 814		break;
 815	default:
 816		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
 817			   stringset);
 818		break;
 819	}
 820}
 821
 822static void bnxt_get_ringparam(struct net_device *dev,
 823			       struct ethtool_ringparam *ering,
 824			       struct kernel_ethtool_ringparam *kernel_ering,
 825			       struct netlink_ext_ack *extack)
 826{
 827	struct bnxt *bp = netdev_priv(dev);
 828
 829	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 830		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
 831		ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
 832		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
 833	} else {
 834		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
 835		ering->rx_jumbo_max_pending = 0;
 836		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
 837	}
 838	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
 839
 840	ering->rx_pending = bp->rx_ring_size;
 841	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
 842	ering->tx_pending = bp->tx_ring_size;
 843}
 844
 845static int bnxt_set_ringparam(struct net_device *dev,
 846			      struct ethtool_ringparam *ering,
 847			      struct kernel_ethtool_ringparam *kernel_ering,
 848			      struct netlink_ext_ack *extack)
 849{
 850	struct bnxt *bp = netdev_priv(dev);
 851
 852	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
 853	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
 854	    (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
 855		return -EINVAL;
 856
 857	if (netif_running(dev))
 858		bnxt_close_nic(bp, false, false);
 859
 860	bp->rx_ring_size = ering->rx_pending;
 861	bp->tx_ring_size = ering->tx_pending;
 862	bnxt_set_ring_params(bp);
 863
 864	if (netif_running(dev))
 865		return bnxt_open_nic(bp, false, false);
 866
 867	return 0;
 868}
 869
 870static void bnxt_get_channels(struct net_device *dev,
 871			      struct ethtool_channels *channel)
 872{
 873	struct bnxt *bp = netdev_priv(dev);
 874	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 875	int max_rx_rings, max_tx_rings, tcs;
 876	int max_tx_sch_inputs, tx_grps;
 877
 878	/* Get the most up-to-date max_tx_sch_inputs. */
 879	if (netif_running(dev) && BNXT_NEW_RM(bp))
 880		bnxt_hwrm_func_resc_qcaps(bp, false);
 881	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 882
 883	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 884	if (max_tx_sch_inputs)
 885		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 886
 887	tcs = bp->num_tc;
 888	tx_grps = max(tcs, 1);
 889	if (bp->tx_nr_rings_xdp)
 890		tx_grps++;
 891	max_tx_rings /= tx_grps;
 892	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 893
 894	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
 895		max_rx_rings = 0;
 896		max_tx_rings = 0;
 897	}
 898	if (max_tx_sch_inputs)
 899		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 900
 
 901	if (tcs > 1)
 902		max_tx_rings /= tcs;
 903
 904	channel->max_rx = max_rx_rings;
 905	channel->max_tx = max_tx_rings;
 906	channel->max_other = 0;
 907	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
 908		channel->combined_count = bp->rx_nr_rings;
 909		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 910			channel->combined_count--;
 911	} else {
 912		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 913			channel->rx_count = bp->rx_nr_rings;
 914			channel->tx_count = bp->tx_nr_rings_per_tc;
 915		}
 916	}
 917}
 918
 919static int bnxt_set_channels(struct net_device *dev,
 920			     struct ethtool_channels *channel)
 921{
 922	struct bnxt *bp = netdev_priv(dev);
 923	int req_tx_rings, req_rx_rings, tcs;
 924	bool sh = false;
 925	int tx_xdp = 0;
 926	int rc = 0;
 927	int tx_cp;
 928
 929	if (channel->other_count)
 930		return -EINVAL;
 931
 932	if (!channel->combined_count &&
 933	    (!channel->rx_count || !channel->tx_count))
 934		return -EINVAL;
 935
 936	if (channel->combined_count &&
 937	    (channel->rx_count || channel->tx_count))
 938		return -EINVAL;
 939
 940	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
 941					    channel->tx_count))
 942		return -EINVAL;
 943
 944	if (channel->combined_count)
 945		sh = true;
 946
 947	tcs = bp->num_tc;
 948
 949	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
 950	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
 951	if (bp->tx_nr_rings_xdp) {
 952		if (!sh) {
 953			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
 954			return -EINVAL;
 955		}
 956		tx_xdp = req_rx_rings;
 957	}
 958	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
 959	if (rc) {
 960		netdev_warn(dev, "Unable to allocate the requested rings\n");
 961		return rc;
 962	}
 963
 964	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
 965	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
 966	    netif_is_rxfh_configured(dev)) {
 967		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
 968		return -EINVAL;
 969	}
 970
 971	if (netif_running(dev)) {
 972		if (BNXT_PF(bp)) {
 973			/* TODO CHIMP_FW: Send message to all VF's
 974			 * before PF unload
 975			 */
 976		}
 977		bnxt_close_nic(bp, true, false);
 
 
 
 
 
 978	}
 979
 980	if (sh) {
 981		bp->flags |= BNXT_FLAG_SHARED_RINGS;
 982		bp->rx_nr_rings = channel->combined_count;
 983		bp->tx_nr_rings_per_tc = channel->combined_count;
 984	} else {
 985		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
 986		bp->rx_nr_rings = channel->rx_count;
 987		bp->tx_nr_rings_per_tc = channel->tx_count;
 988	}
 989	bp->tx_nr_rings_xdp = tx_xdp;
 990	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
 991	if (tcs > 1)
 992		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 993
 994	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
 995	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
 996			       tx_cp + bp->rx_nr_rings;
 997
 998	/* After changing number of rx channels, update NTUPLE feature. */
 999	netdev_update_features(dev);
1000	if (netif_running(dev)) {
1001		rc = bnxt_open_nic(bp, true, false);
1002		if ((!rc) && BNXT_PF(bp)) {
1003			/* TODO CHIMP_FW: Send message to all VF's
1004			 * to renable
1005			 */
1006		}
1007	} else {
1008		rc = bnxt_reserve_rings(bp, true);
1009	}
1010
1011	return rc;
1012}
1013
1014static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1015				     int tbl_size, u32 *ids, u32 start,
1016				     u32 id_cnt)
1017{
1018	int i, j = start;
1019
1020	if (j >= id_cnt)
1021		return j;
1022	for (i = 0; i < tbl_size; i++) {
1023		struct hlist_head *head;
1024		struct bnxt_filter_base *fltr;
1025
1026		head = &tbl[i];
1027		hlist_for_each_entry_rcu(fltr, head, hash) {
1028			if (!fltr->flags ||
1029			    test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1030				continue;
1031			ids[j++] = fltr->sw_id;
1032			if (j == id_cnt)
1033				return j;
1034		}
1035	}
1036	return j;
1037}
1038
1039static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1040						      struct hlist_head tbl[],
1041						      int tbl_size, u32 id)
1042{
1043	int i;
1044
1045	for (i = 0; i < tbl_size; i++) {
 
1046		struct hlist_head *head;
1047		struct bnxt_filter_base *fltr;
1048
1049		head = &tbl[i];
 
1050		hlist_for_each_entry_rcu(fltr, head, hash) {
1051			if (fltr->flags && fltr->sw_id == id)
1052				return fltr;
 
1053		}
 
 
 
1054	}
1055	return NULL;
1056}
1057
1058static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1059			    u32 *rule_locs)
1060{
1061	cmd->data = bp->ntp_fltr_count;
1062	rcu_read_lock();
1063	cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1064						  BNXT_NTP_FLTR_HASH_SIZE,
1065						  rule_locs, 0, cmd->rule_cnt);
1066	rcu_read_unlock();
1067
1068	return 0;
1069}
1070
1071static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1072{
1073	struct ethtool_rx_flow_spec *fs =
1074		(struct ethtool_rx_flow_spec *)&cmd->fs;
1075	struct bnxt_filter_base *fltr_base;
1076	struct bnxt_ntuple_filter *fltr;
1077	struct flow_keys *fkeys;
1078	int rc = -EINVAL;
1079
1080	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1081		return rc;
1082
1083	rcu_read_lock();
1084	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1085					  BNXT_NTP_FLTR_HASH_SIZE,
1086					  fs->location);
1087	if (!fltr_base) {
 
 
 
 
1088		rcu_read_unlock();
1089		return rc;
1090	}
1091	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1092
 
1093	fkeys = &fltr->fkeys;
1094	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1095		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1096			fs->flow_type = TCP_V4_FLOW;
1097		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1098			fs->flow_type = UDP_V4_FLOW;
1099		else
1100			goto fltr_err;
1101
1102		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1103			fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1104			fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1105		}
1106		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1107			fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1108			fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1109		}
1110		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1111			fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1112			fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1113		}
1114		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1115			fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1116			fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1117		}
1118	} else {
 
 
1119		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1120			fs->flow_type = TCP_V6_FLOW;
1121		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1122			fs->flow_type = UDP_V6_FLOW;
1123		else
1124			goto fltr_err;
1125
1126		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1127			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1128				fkeys->addrs.v6addrs.src;
1129			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
1130		}
1131		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1132			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1133				fkeys->addrs.v6addrs.dst;
1134			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
1135		}
1136		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1137			fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1138			fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1139		}
1140		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1141			fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1142			fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1143		}
 
 
 
 
 
1144	}
1145
1146	fs->ring_cookie = fltr->base.rxq;
1147	rc = 0;
1148
1149fltr_err:
1150	rcu_read_unlock();
1151
1152	return rc;
1153}
1154
1155#define IPV4_ALL_MASK		((__force __be32)~0)
1156#define L4_PORT_ALL_MASK	((__force __be16)~0)
1157
1158static bool ipv6_mask_is_full(__be32 mask[4])
1159{
1160	return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
1161}
1162
1163static bool ipv6_mask_is_zero(__be32 mask[4])
1164{
1165	return !(mask[0] | mask[1] | mask[2] | mask[3]);
1166}
1167
1168static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1169				    struct ethtool_rx_flow_spec *fs)
1170{
1171	u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1172	u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1173	struct bnxt_ntuple_filter *new_fltr, *fltr;
1174	struct bnxt_l2_filter *l2_fltr;
1175	u32 flow_type = fs->flow_type;
1176	struct flow_keys *fkeys;
1177	u32 idx;
1178	int rc;
1179
1180	if (!bp->vnic_info)
1181		return -EAGAIN;
1182
1183	if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1184		return -EOPNOTSUPP;
1185
1186	new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1187	if (!new_fltr)
1188		return -ENOMEM;
1189
1190	l2_fltr = bp->vnic_info[0].l2_filters[0];
1191	atomic_inc(&l2_fltr->refcnt);
1192	new_fltr->l2_fltr = l2_fltr;
1193	fkeys = &new_fltr->fkeys;
1194
1195	rc = -EOPNOTSUPP;
1196	switch (flow_type) {
1197	case TCP_V4_FLOW:
1198	case UDP_V4_FLOW: {
1199		struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1200		struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1201
1202		fkeys->basic.ip_proto = IPPROTO_TCP;
1203		if (flow_type == UDP_V4_FLOW)
1204			fkeys->basic.ip_proto = IPPROTO_UDP;
1205		fkeys->basic.n_proto = htons(ETH_P_IP);
1206
1207		if (ip_mask->ip4src == IPV4_ALL_MASK) {
1208			fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1209			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1210		} else if (ip_mask->ip4src) {
1211			goto ntuple_err;
1212		}
1213		if (ip_mask->ip4dst == IPV4_ALL_MASK) {
1214			fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1215			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1216		} else if (ip_mask->ip4dst) {
1217			goto ntuple_err;
1218		}
1219
1220		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1221			fkeys->ports.src = ip_spec->psrc;
1222			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1223		} else if (ip_mask->psrc) {
1224			goto ntuple_err;
1225		}
1226		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1227			fkeys->ports.dst = ip_spec->pdst;
1228			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1229		} else if (ip_mask->pdst) {
1230			goto ntuple_err;
1231		}
1232		break;
1233	}
1234	case TCP_V6_FLOW:
1235	case UDP_V6_FLOW: {
1236		struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1237		struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1238
1239		fkeys->basic.ip_proto = IPPROTO_TCP;
1240		if (flow_type == UDP_V6_FLOW)
1241			fkeys->basic.ip_proto = IPPROTO_UDP;
1242		fkeys->basic.n_proto = htons(ETH_P_IPV6);
1243
1244		if (ipv6_mask_is_full(ip_mask->ip6src)) {
1245			fkeys->addrs.v6addrs.src =
1246				*(struct in6_addr *)&ip_spec->ip6src;
1247			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1248		} else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
1249			goto ntuple_err;
1250		}
1251		if (ipv6_mask_is_full(ip_mask->ip6dst)) {
1252			fkeys->addrs.v6addrs.dst =
1253				*(struct in6_addr *)&ip_spec->ip6dst;
1254			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1255		} else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
1256			goto ntuple_err;
1257		}
1258
1259		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1260			fkeys->ports.src = ip_spec->psrc;
1261			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1262		} else if (ip_mask->psrc) {
1263			goto ntuple_err;
1264		}
1265		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1266			fkeys->ports.dst = ip_spec->pdst;
1267			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1268		} else if (ip_mask->pdst) {
1269			goto ntuple_err;
1270		}
1271		break;
1272	}
1273	default:
1274		rc = -EOPNOTSUPP;
1275		goto ntuple_err;
1276	}
1277	if (!new_fltr->ntuple_flags)
1278		goto ntuple_err;
1279
1280	idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1281	rcu_read_lock();
1282	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1283	if (fltr) {
1284		rcu_read_unlock();
1285		rc = -EEXIST;
1286		goto ntuple_err;
1287	}
1288	rcu_read_unlock();
1289
1290	new_fltr->base.rxq = ring;
1291	new_fltr->base.flags = BNXT_ACT_NO_AGING;
1292	__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1293	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1294	if (!rc) {
1295		rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1296		if (rc) {
1297			bnxt_del_ntp_filter(bp, new_fltr);
1298			return rc;
1299		}
1300		fs->location = new_fltr->base.sw_id;
1301		return 0;
1302	}
1303
1304ntuple_err:
1305	atomic_dec(&l2_fltr->refcnt);
1306	kfree(new_fltr);
1307	return rc;
1308}
1309
1310static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1311{
1312	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1313	u32 ring, flow_type;
1314	int rc;
1315	u8 vf;
1316
1317	if (!netif_running(bp->dev))
1318		return -EAGAIN;
1319	if (!(bp->flags & BNXT_FLAG_RFS))
1320		return -EPERM;
1321	if (fs->location != RX_CLS_LOC_ANY)
1322		return -EINVAL;
1323
1324	ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1325	vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1326	if (BNXT_VF(bp) && vf)
1327		return -EINVAL;
1328	if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1329		return -EINVAL;
1330	if (!vf && ring >= bp->rx_nr_rings)
1331		return -EINVAL;
1332
1333	flow_type = fs->flow_type;
1334	if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
1335		return -EINVAL;
1336	flow_type &= ~FLOW_EXT;
1337	if (flow_type == ETHER_FLOW)
1338		rc = -EOPNOTSUPP;
1339	else
1340		rc = bnxt_add_ntuple_cls_rule(bp, fs);
1341	return rc;
1342}
1343
1344static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1345{
1346	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1347	struct bnxt_filter_base *fltr_base;
1348	struct bnxt_ntuple_filter *fltr;
1349
1350	rcu_read_lock();
1351	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1352					  BNXT_NTP_FLTR_HASH_SIZE,
1353					  fs->location);
1354	if (!fltr_base) {
1355		rcu_read_unlock();
1356		return -ENOENT;
1357	}
1358
1359	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1360	if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1361		rcu_read_unlock();
1362		return -EINVAL;
1363	}
1364	rcu_read_unlock();
1365	bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1366	bnxt_del_ntp_filter(bp, fltr);
1367	return 0;
1368}
1369
1370static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1371{
1372	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1373		return RXH_IP_SRC | RXH_IP_DST;
1374	return 0;
1375}
1376
1377static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1378{
1379	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1380		return RXH_IP_SRC | RXH_IP_DST;
1381	return 0;
1382}
1383
1384static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1385{
1386	cmd->data = 0;
1387	switch (cmd->flow_type) {
1388	case TCP_V4_FLOW:
1389		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1390			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1391				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1392		cmd->data |= get_ethtool_ipv4_rss(bp);
1393		break;
1394	case UDP_V4_FLOW:
1395		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1396			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1397				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1398		fallthrough;
1399	case SCTP_V4_FLOW:
1400	case AH_ESP_V4_FLOW:
1401	case AH_V4_FLOW:
1402	case ESP_V4_FLOW:
1403	case IPV4_FLOW:
1404		cmd->data |= get_ethtool_ipv4_rss(bp);
1405		break;
1406
1407	case TCP_V6_FLOW:
1408		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1409			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1410				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1411		cmd->data |= get_ethtool_ipv6_rss(bp);
1412		break;
1413	case UDP_V6_FLOW:
1414		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1415			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1416				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1417		fallthrough;
1418	case SCTP_V6_FLOW:
1419	case AH_ESP_V6_FLOW:
1420	case AH_V6_FLOW:
1421	case ESP_V6_FLOW:
1422	case IPV6_FLOW:
1423		cmd->data |= get_ethtool_ipv6_rss(bp);
1424		break;
1425	}
1426	return 0;
1427}
1428
1429#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1430#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1431
1432static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1433{
1434	u32 rss_hash_cfg = bp->rss_hash_cfg;
1435	int tuple, rc = 0;
1436
1437	if (cmd->data == RXH_4TUPLE)
1438		tuple = 4;
1439	else if (cmd->data == RXH_2TUPLE)
1440		tuple = 2;
1441	else if (!cmd->data)
1442		tuple = 0;
1443	else
1444		return -EINVAL;
1445
1446	if (cmd->flow_type == TCP_V4_FLOW) {
1447		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1448		if (tuple == 4)
1449			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1450	} else if (cmd->flow_type == UDP_V4_FLOW) {
1451		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1452			return -EINVAL;
1453		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1454		if (tuple == 4)
1455			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1456	} else if (cmd->flow_type == TCP_V6_FLOW) {
1457		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1458		if (tuple == 4)
1459			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1460	} else if (cmd->flow_type == UDP_V6_FLOW) {
1461		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1462			return -EINVAL;
1463		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1464		if (tuple == 4)
1465			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1466	} else if (tuple == 4) {
1467		return -EINVAL;
1468	}
1469
1470	switch (cmd->flow_type) {
1471	case TCP_V4_FLOW:
1472	case UDP_V4_FLOW:
1473	case SCTP_V4_FLOW:
1474	case AH_ESP_V4_FLOW:
1475	case AH_V4_FLOW:
1476	case ESP_V4_FLOW:
1477	case IPV4_FLOW:
1478		if (tuple == 2)
1479			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1480		else if (!tuple)
1481			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1482		break;
1483
1484	case TCP_V6_FLOW:
1485	case UDP_V6_FLOW:
1486	case SCTP_V6_FLOW:
1487	case AH_ESP_V6_FLOW:
1488	case AH_V6_FLOW:
1489	case ESP_V6_FLOW:
1490	case IPV6_FLOW:
1491		if (tuple == 2)
1492			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1493		else if (!tuple)
1494			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1495		break;
1496	}
1497
1498	if (bp->rss_hash_cfg == rss_hash_cfg)
1499		return 0;
1500
1501	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1502		bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1503	bp->rss_hash_cfg = rss_hash_cfg;
1504	if (netif_running(bp->dev)) {
1505		bnxt_close_nic(bp, false, false);
1506		rc = bnxt_open_nic(bp, false, false);
1507	}
1508	return rc;
1509}
1510
1511static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1512			  u32 *rule_locs)
1513{
1514	struct bnxt *bp = netdev_priv(dev);
1515	int rc = 0;
1516
1517	switch (cmd->cmd) {
 
1518	case ETHTOOL_GRXRINGS:
1519		cmd->data = bp->rx_nr_rings;
1520		break;
1521
1522	case ETHTOOL_GRXCLSRLCNT:
1523		cmd->rule_cnt = bp->ntp_fltr_count;
1524		cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
1525		break;
1526
1527	case ETHTOOL_GRXCLSRLALL:
1528		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1529		break;
1530
1531	case ETHTOOL_GRXCLSRULE:
1532		rc = bnxt_grxclsrule(bp, cmd);
1533		break;
 
1534
1535	case ETHTOOL_GRXFH:
1536		rc = bnxt_grxfh(bp, cmd);
1537		break;
1538
1539	default:
1540		rc = -EOPNOTSUPP;
1541		break;
1542	}
1543
1544	return rc;
1545}
1546
1547static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1548{
1549	struct bnxt *bp = netdev_priv(dev);
1550	int rc;
1551
1552	switch (cmd->cmd) {
1553	case ETHTOOL_SRXFH:
1554		rc = bnxt_srxfh(bp, cmd);
1555		break;
1556
1557	case ETHTOOL_SRXCLSRLINS:
1558		rc = bnxt_srxclsrlins(bp, cmd);
1559		break;
1560
1561	case ETHTOOL_SRXCLSRLDEL:
1562		rc = bnxt_srxclsrldel(bp, cmd);
1563		break;
1564
1565	default:
1566		rc = -EOPNOTSUPP;
1567		break;
1568	}
1569	return rc;
1570}
1571
1572u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1573{
1574	struct bnxt *bp = netdev_priv(dev);
1575
1576	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1577		return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1578		       BNXT_RSS_TABLE_ENTRIES_P5;
1579	return HW_HASH_INDEX_SIZE;
1580}
1581
1582static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1583{
1584	return HW_HASH_KEY_SIZE;
1585}
1586
1587static int bnxt_get_rxfh(struct net_device *dev,
1588			 struct ethtool_rxfh_param *rxfh)
1589{
1590	struct bnxt *bp = netdev_priv(dev);
1591	struct bnxt_vnic_info *vnic;
1592	u32 i, tbl_size;
1593
1594	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
1595
1596	if (!bp->vnic_info)
1597		return 0;
1598
1599	vnic = &bp->vnic_info[0];
1600	if (rxfh->indir && bp->rss_indir_tbl) {
1601		tbl_size = bnxt_get_rxfh_indir_size(dev);
1602		for (i = 0; i < tbl_size; i++)
1603			rxfh->indir[i] = bp->rss_indir_tbl[i];
1604	}
1605
1606	if (rxfh->key && vnic->rss_hash_key)
1607		memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1608
1609	return 0;
1610}
1611
1612static int bnxt_set_rxfh(struct net_device *dev,
1613			 struct ethtool_rxfh_param *rxfh,
1614			 struct netlink_ext_ack *extack)
1615{
1616	struct bnxt *bp = netdev_priv(dev);
1617	int rc = 0;
1618
1619	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1620		return -EOPNOTSUPP;
1621
1622	if (rxfh->key)
1623		return -EOPNOTSUPP;
1624
1625	if (rxfh->indir) {
1626		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1627
1628		for (i = 0; i < tbl_size; i++)
1629			bp->rss_indir_tbl[i] = rxfh->indir[i];
1630		pad = bp->rss_indir_tbl_entries - tbl_size;
1631		if (pad)
1632			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1633	}
1634
1635	if (netif_running(bp->dev)) {
1636		bnxt_close_nic(bp, false, false);
1637		rc = bnxt_open_nic(bp, false, false);
1638	}
1639	return rc;
1640}
1641
1642static void bnxt_get_drvinfo(struct net_device *dev,
1643			     struct ethtool_drvinfo *info)
1644{
1645	struct bnxt *bp = netdev_priv(dev);
1646
1647	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1648	strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1649	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
 
1650	info->n_stats = bnxt_get_num_stats(bp);
1651	info->testinfo_len = bp->num_tests;
1652	/* TODO CHIMP_FW: eeprom dump details */
1653	info->eedump_len = 0;
1654	/* TODO CHIMP FW: reg dump details */
1655	info->regdump_len = 0;
1656}
1657
1658static int bnxt_get_regs_len(struct net_device *dev)
1659{
1660	struct bnxt *bp = netdev_priv(dev);
1661	int reg_len;
1662
1663	if (!BNXT_PF(bp))
1664		return -EOPNOTSUPP;
1665
1666	reg_len = BNXT_PXP_REG_LEN;
1667
1668	if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1669		reg_len += sizeof(struct pcie_ctx_hw_stats);
1670
1671	return reg_len;
1672}
1673
1674static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1675			  void *_p)
1676{
1677	struct pcie_ctx_hw_stats *hw_pcie_stats;
1678	struct hwrm_pcie_qstats_input *req;
1679	struct bnxt *bp = netdev_priv(dev);
1680	dma_addr_t hw_pcie_stats_addr;
1681	int rc;
1682
1683	regs->version = 0;
1684	bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1685
1686	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1687		return;
1688
1689	if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1690		return;
1691
1692	hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1693					   &hw_pcie_stats_addr);
1694	if (!hw_pcie_stats) {
1695		hwrm_req_drop(bp, req);
1696		return;
1697	}
1698
1699	regs->version = 1;
1700	hwrm_req_hold(bp, req); /* hold on to slice */
1701	req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1702	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1703	rc = hwrm_req_send(bp, req);
1704	if (!rc) {
1705		__le64 *src = (__le64 *)hw_pcie_stats;
1706		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1707		int i;
1708
1709		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1710			dst[i] = le64_to_cpu(src[i]);
1711	}
1712	hwrm_req_drop(bp, req);
1713}
1714
1715static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1716{
1717	struct bnxt *bp = netdev_priv(dev);
1718
1719	wol->supported = 0;
1720	wol->wolopts = 0;
1721	memset(&wol->sopass, 0, sizeof(wol->sopass));
1722	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1723		wol->supported = WAKE_MAGIC;
1724		if (bp->wol)
1725			wol->wolopts = WAKE_MAGIC;
1726	}
1727}
1728
1729static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1730{
1731	struct bnxt *bp = netdev_priv(dev);
1732
1733	if (wol->wolopts & ~WAKE_MAGIC)
1734		return -EINVAL;
1735
1736	if (wol->wolopts & WAKE_MAGIC) {
1737		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1738			return -EINVAL;
1739		if (!bp->wol) {
1740			if (bnxt_hwrm_alloc_wol_fltr(bp))
1741				return -EBUSY;
1742			bp->wol = 1;
1743		}
1744	} else {
1745		if (bp->wol) {
1746			if (bnxt_hwrm_free_wol_fltr(bp))
1747				return -EBUSY;
1748			bp->wol = 0;
1749		}
1750	}
1751	return 0;
1752}
1753
1754u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1755{
1756	u32 speed_mask = 0;
1757
1758	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1759	/* set the advertised speeds */
1760	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1761		speed_mask |= ADVERTISED_100baseT_Full;
1762	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1763		speed_mask |= ADVERTISED_1000baseT_Full;
1764	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1765		speed_mask |= ADVERTISED_2500baseX_Full;
1766	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1767		speed_mask |= ADVERTISED_10000baseT_Full;
1768	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1769		speed_mask |= ADVERTISED_40000baseCR4_Full;
1770
1771	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1772		speed_mask |= ADVERTISED_Pause;
1773	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1774		speed_mask |= ADVERTISED_Asym_Pause;
1775	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1776		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1777
1778	return speed_mask;
1779}
1780
1781enum bnxt_media_type {
1782	BNXT_MEDIA_UNKNOWN = 0,
1783	BNXT_MEDIA_TP,
1784	BNXT_MEDIA_CR,
1785	BNXT_MEDIA_SR,
1786	BNXT_MEDIA_LR_ER_FR,
1787	BNXT_MEDIA_KR,
1788	BNXT_MEDIA_KX,
1789	BNXT_MEDIA_X,
1790	__BNXT_MEDIA_END,
1791};
1792
1793static const enum bnxt_media_type bnxt_phy_types[] = {
1794	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
1795	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] =  BNXT_MEDIA_KR,
1796	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
1797	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
1798	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
1799	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
1800	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
1801	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
1802	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
1803	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
1804	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
1805	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
1806	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
1807	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
1808	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
1809	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1810	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1811	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
1812	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
1813	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
1814	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1815	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1816	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
1817	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
1818	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
1819	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
1820	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
1821	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
1822	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1823	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1824	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
1825	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
1826	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1827	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1828	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
1829	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
1830	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1831	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1832	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
1833	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
1834	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1835	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1836	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
1837	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
1838	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1839	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1840	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
1841	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
1842	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
1843	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
1844	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
1845	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
1846	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1847	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1848};
1849
1850static enum bnxt_media_type
1851bnxt_get_media(struct bnxt_link_info *link_info)
1852{
1853	switch (link_info->media_type) {
1854	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
1855		return BNXT_MEDIA_TP;
1856	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
1857		return BNXT_MEDIA_CR;
1858	default:
1859		if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
1860			return bnxt_phy_types[link_info->phy_type];
1861		return BNXT_MEDIA_UNKNOWN;
1862	}
1863}
1864
1865enum bnxt_link_speed_indices {
1866	BNXT_LINK_SPEED_UNKNOWN = 0,
1867	BNXT_LINK_SPEED_100MB_IDX,
1868	BNXT_LINK_SPEED_1GB_IDX,
1869	BNXT_LINK_SPEED_10GB_IDX,
1870	BNXT_LINK_SPEED_25GB_IDX,
1871	BNXT_LINK_SPEED_40GB_IDX,
1872	BNXT_LINK_SPEED_50GB_IDX,
1873	BNXT_LINK_SPEED_100GB_IDX,
1874	BNXT_LINK_SPEED_200GB_IDX,
1875	BNXT_LINK_SPEED_400GB_IDX,
1876	__BNXT_LINK_SPEED_END
1877};
1878
1879static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
1880{
1881	switch (speed) {
1882	case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
1883	case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
1884	case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
1885	case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
1886	case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
1887	case BNXT_LINK_SPEED_50GB:
1888	case BNXT_LINK_SPEED_50GB_PAM4:
1889		return BNXT_LINK_SPEED_50GB_IDX;
1890	case BNXT_LINK_SPEED_100GB:
1891	case BNXT_LINK_SPEED_100GB_PAM4:
1892	case BNXT_LINK_SPEED_100GB_PAM4_112:
1893		return BNXT_LINK_SPEED_100GB_IDX;
1894	case BNXT_LINK_SPEED_200GB:
1895	case BNXT_LINK_SPEED_200GB_PAM4:
1896	case BNXT_LINK_SPEED_200GB_PAM4_112:
1897		return BNXT_LINK_SPEED_200GB_IDX;
1898	case BNXT_LINK_SPEED_400GB:
1899	case BNXT_LINK_SPEED_400GB_PAM4:
1900	case BNXT_LINK_SPEED_400GB_PAM4_112:
1901		return BNXT_LINK_SPEED_400GB_IDX;
1902	default: return BNXT_LINK_SPEED_UNKNOWN;
1903	}
1904}
1905
1906static const enum ethtool_link_mode_bit_indices
1907bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
1908	[BNXT_LINK_SPEED_100MB_IDX] = {
1909		{
1910			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1911		},
1912	},
1913	[BNXT_LINK_SPEED_1GB_IDX] = {
1914		{
1915			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1916			/* historically baseT, but DAC is more correctly baseX */
1917			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1918			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1919			[BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1920			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1921		},
1922	},
1923	[BNXT_LINK_SPEED_10GB_IDX] = {
1924		{
1925			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1926			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1927			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1928			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1929			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1930			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1931		},
1932	},
1933	[BNXT_LINK_SPEED_25GB_IDX] = {
1934		{
1935			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1936			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1937			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1938		},
1939	},
1940	[BNXT_LINK_SPEED_40GB_IDX] = {
1941		{
1942			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1943			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1944			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1945			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1946		},
1947	},
1948	[BNXT_LINK_SPEED_50GB_IDX] = {
1949		[BNXT_SIG_MODE_NRZ] = {
1950			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1951			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1952			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1953		},
1954		[BNXT_SIG_MODE_PAM4] = {
1955			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1956			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1957			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1958			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1959		},
1960	},
1961	[BNXT_LINK_SPEED_100GB_IDX] = {
1962		[BNXT_SIG_MODE_NRZ] = {
1963			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1964			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1965			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1966			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1967		},
1968		[BNXT_SIG_MODE_PAM4] = {
1969			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
1970			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
1971			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
1972			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
1973		},
1974		[BNXT_SIG_MODE_PAM4_112] = {
1975			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
1976			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
1977			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
1978			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
1979		},
1980	},
1981	[BNXT_LINK_SPEED_200GB_IDX] = {
1982		[BNXT_SIG_MODE_PAM4] = {
1983			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1984			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1985			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1986			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1987		},
1988		[BNXT_SIG_MODE_PAM4_112] = {
1989			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
1990			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
1991			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
1992			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
1993		},
1994	},
1995	[BNXT_LINK_SPEED_400GB_IDX] = {
1996		[BNXT_SIG_MODE_PAM4] = {
1997			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
1998			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
1999			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2000			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2001		},
2002		[BNXT_SIG_MODE_PAM4_112] = {
2003			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2004			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2005			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2006			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2007		},
2008	},
2009};
2010
2011#define BNXT_LINK_MODE_UNKNOWN -1
2012
2013static enum ethtool_link_mode_bit_indices
2014bnxt_get_link_mode(struct bnxt_link_info *link_info)
2015{
2016	enum ethtool_link_mode_bit_indices link_mode;
2017	enum bnxt_link_speed_indices speed;
2018	enum bnxt_media_type media;
2019	u8 sig_mode;
2020
2021	if (link_info->phy_link_status != BNXT_LINK_LINK)
2022		return BNXT_LINK_MODE_UNKNOWN;
2023
2024	media = bnxt_get_media(link_info);
2025	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2026		speed = bnxt_fw_speed_idx(link_info->link_speed);
2027		sig_mode = link_info->active_fec_sig_mode &
2028			PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2029	} else {
2030		speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2031		sig_mode = link_info->req_signal_mode;
2032	}
2033	if (sig_mode >= BNXT_SIG_MODE_MAX)
2034		return BNXT_LINK_MODE_UNKNOWN;
2035
2036	/* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2037	 * link mode, but since no such devices exist, the zeroes in the
2038	 * map can be conveniently used to represent unknown link modes.
2039	 */
2040	link_mode = bnxt_link_modes[speed][sig_mode][media];
2041	if (!link_mode)
2042		return BNXT_LINK_MODE_UNKNOWN;
2043
2044	switch (link_mode) {
2045	case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2046		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2047			link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2048		break;
2049	case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2050		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2051			link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2052		break;
2053	default:
2054		break;
2055	}
2056
2057	return link_mode;
2058}
2059
2060static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2061				   struct ethtool_link_ksettings *lk_ksettings)
2062{
2063	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2064
2065	if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2066		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2067				 lk_ksettings->link_modes.supported);
2068		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2069				 lk_ksettings->link_modes.supported);
2070	}
2071
2072	if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2073	    link_info->support_pam4_auto_speeds)
2074		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2075				 lk_ksettings->link_modes.supported);
2076
2077	if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2078		return;
2079
2080	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2081		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2082				 lk_ksettings->link_modes.advertising);
2083	if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2084		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2085				 lk_ksettings->link_modes.advertising);
2086	if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2087		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2088				 lk_ksettings->link_modes.lp_advertising);
2089	if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2090		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2091				 lk_ksettings->link_modes.lp_advertising);
2092}
2093
2094static const u16 bnxt_nrz_speed_masks[] = {
2095	[BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2096	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2097	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2098	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2099	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2100	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2101	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2102	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2103};
2104
2105static const u16 bnxt_pam4_speed_masks[] = {
2106	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2107	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2108	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2109	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2110};
2111
2112static const u16 bnxt_nrz_speeds2_masks[] = {
2113	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2114	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2115	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2116	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2117	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2118	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2119	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2120};
2121
2122static const u16 bnxt_pam4_speeds2_masks[] = {
2123	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2124	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2125	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2126	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2127};
2128
2129static const u16 bnxt_pam4_112_speeds2_masks[] = {
2130	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2131	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2132	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2133};
2134
2135static enum bnxt_link_speed_indices
2136bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2137{
2138	const u16 *speeds;
2139	int idx, len;
2140
2141	switch (sig_mode) {
2142	case BNXT_SIG_MODE_NRZ:
2143		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2144			speeds = bnxt_nrz_speeds2_masks;
2145			len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2146		} else {
2147			speeds = bnxt_nrz_speed_masks;
2148			len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2149		}
2150		break;
2151	case BNXT_SIG_MODE_PAM4:
2152		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2153			speeds = bnxt_pam4_speeds2_masks;
2154			len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2155		} else {
2156			speeds = bnxt_pam4_speed_masks;
2157			len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2158		}
2159		break;
2160	case BNXT_SIG_MODE_PAM4_112:
2161		speeds = bnxt_pam4_112_speeds2_masks;
2162		len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2163		break;
2164	default:
2165		return BNXT_LINK_SPEED_UNKNOWN;
2166	}
2167
2168	for (idx = 0; idx < len; idx++) {
2169		if (speeds[idx] == speed_msk)
2170			return idx;
2171	}
2172
2173	return BNXT_LINK_SPEED_UNKNOWN;
2174}
2175
2176#define BNXT_FW_SPEED_MSK_BITS 16
2177
2178static void
2179__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2180			  u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2181{
2182	enum ethtool_link_mode_bit_indices link_mode;
2183	enum bnxt_link_speed_indices speed;
2184	u8 bit;
2185
2186	for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2187		speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2188		if (!speed)
2189			continue;
2190
2191		link_mode = bnxt_link_modes[speed][sig_mode][media];
2192		if (!link_mode)
2193			continue;
2194
2195		linkmode_set_bit(link_mode, et_mask);
2196	}
2197}
2198
2199static void
2200bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2201			u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2202{
2203	if (media) {
2204		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2205					  et_mask);
2206		return;
2207	}
2208
2209	/* list speeds for all media if unknown */
2210	for (media = 1; media < __BNXT_MEDIA_END; media++)
2211		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2212					  et_mask);
2213}
2214
2215static void
2216bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2217				    enum bnxt_media_type media,
2218				    struct ethtool_link_ksettings *lk_ksettings)
2219{
2220	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2221	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2222	u16 phy_flags = bp->phy_flags;
2223
2224	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2225		sp_nrz = link_info->support_speeds2;
2226		sp_pam4 = link_info->support_speeds2;
2227		sp_pam4_112 = link_info->support_speeds2;
2228	} else {
2229		sp_nrz = link_info->support_speeds;
2230		sp_pam4 = link_info->support_pam4_speeds;
2231	}
2232	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2233				lk_ksettings->link_modes.supported);
2234	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2235				lk_ksettings->link_modes.supported);
2236	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2237				phy_flags, lk_ksettings->link_modes.supported);
2238}
2239
2240static void
2241bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2242				enum bnxt_media_type media,
2243				struct ethtool_link_ksettings *lk_ksettings)
2244{
2245	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2246	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2247	u16 phy_flags = bp->phy_flags;
2248
2249	sp_nrz = link_info->advertising;
2250	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2251		sp_pam4 = link_info->advertising;
2252		sp_pam4_112 = link_info->advertising;
2253	} else {
2254		sp_pam4 = link_info->advertising_pam4;
2255	}
2256	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2257				lk_ksettings->link_modes.advertising);
2258	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2259				lk_ksettings->link_modes.advertising);
2260	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2261				phy_flags, lk_ksettings->link_modes.advertising);
2262}
2263
2264static void
2265bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2266			       enum bnxt_media_type media,
2267			       struct ethtool_link_ksettings *lk_ksettings)
2268{
2269	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2270	u16 phy_flags = bp->phy_flags;
2271
2272	bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2273				BNXT_SIG_MODE_NRZ, phy_flags,
2274				lk_ksettings->link_modes.lp_advertising);
2275	bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2276				BNXT_SIG_MODE_PAM4, phy_flags,
2277				lk_ksettings->link_modes.lp_advertising);
2278}
2279
2280static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2281			      u16 speed_msk, const unsigned long *et_mask,
2282			      enum ethtool_link_mode_bit_indices mode)
2283{
2284	bool mode_desired = linkmode_test_bit(mode, et_mask);
2285
2286	if (!mode)
2287		return;
2288
2289	/* enabled speeds for installed media should override */
2290	if (installed_media && mode_desired) {
2291		*speeds |= speed_msk;
2292		*delta |= speed_msk;
2293		return;
2294	}
2295
2296	/* many to one mapping, only allow one change per fw_speed bit */
2297	if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2298		*speeds ^= speed_msk;
2299		*delta |= speed_msk;
2300	}
2301}
2302
2303static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2304				    const unsigned long *et_mask)
2305{
2306	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2307	u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2308	enum bnxt_media_type media = bnxt_get_media(link_info);
2309	u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2310	u32 delta_pam4_112 = 0;
2311	u32 delta_pam4 = 0;
2312	u32 delta_nrz = 0;
2313	int i, m;
2314
2315	adv = &link_info->advertising;
2316	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2317		adv_pam4 = &link_info->advertising;
2318		adv_pam4_112 = &link_info->advertising;
2319		sp_msks = bnxt_nrz_speeds2_masks;
2320		sp_pam4_msks = bnxt_pam4_speeds2_masks;
2321		sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2322	} else {
2323		adv_pam4 = &link_info->advertising_pam4;
2324		sp_msks = bnxt_nrz_speed_masks;
2325		sp_pam4_msks = bnxt_pam4_speed_masks;
2326	}
2327	for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2328		/* accept any legal media from user */
2329		for (m = 1; m < __BNXT_MEDIA_END; m++) {
2330			bnxt_update_speed(&delta_nrz, m == media,
2331					  adv, sp_msks[i], et_mask,
2332					  bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2333			bnxt_update_speed(&delta_pam4, m == media,
2334					  adv_pam4, sp_pam4_msks[i], et_mask,
2335					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2336			if (!adv_pam4_112)
2337				continue;
2338
2339			bnxt_update_speed(&delta_pam4_112, m == media,
2340					  adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2341					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2342		}
2343	}
2344}
2345
2346static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2347				struct ethtool_link_ksettings *lk_ksettings)
2348{
2349	u16 fec_cfg = link_info->fec_cfg;
2350
2351	if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2352		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2353				 lk_ksettings->link_modes.advertising);
2354		return;
2355	}
2356	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2357		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2358				 lk_ksettings->link_modes.advertising);
2359	if (fec_cfg & BNXT_FEC_ENC_RS)
2360		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2361				 lk_ksettings->link_modes.advertising);
2362	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2363		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2364				 lk_ksettings->link_modes.advertising);
2365}
2366
2367static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2368				struct ethtool_link_ksettings *lk_ksettings)
2369{
2370	u16 fec_cfg = link_info->fec_cfg;
2371
2372	if (fec_cfg & BNXT_FEC_NONE) {
2373		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2374				 lk_ksettings->link_modes.supported);
2375		return;
2376	}
2377	if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2378		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2379				 lk_ksettings->link_modes.supported);
2380	if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2381		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2382				 lk_ksettings->link_modes.supported);
2383	if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2384		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2385				 lk_ksettings->link_modes.supported);
2386}
2387
2388u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2389{
2390	switch (fw_link_speed) {
2391	case BNXT_LINK_SPEED_100MB:
2392		return SPEED_100;
2393	case BNXT_LINK_SPEED_1GB:
2394		return SPEED_1000;
2395	case BNXT_LINK_SPEED_2_5GB:
2396		return SPEED_2500;
2397	case BNXT_LINK_SPEED_10GB:
2398		return SPEED_10000;
2399	case BNXT_LINK_SPEED_20GB:
2400		return SPEED_20000;
2401	case BNXT_LINK_SPEED_25GB:
2402		return SPEED_25000;
2403	case BNXT_LINK_SPEED_40GB:
2404		return SPEED_40000;
2405	case BNXT_LINK_SPEED_50GB:
2406	case BNXT_LINK_SPEED_50GB_PAM4:
2407		return SPEED_50000;
2408	case BNXT_LINK_SPEED_100GB:
2409	case BNXT_LINK_SPEED_100GB_PAM4:
2410	case BNXT_LINK_SPEED_100GB_PAM4_112:
2411		return SPEED_100000;
2412	case BNXT_LINK_SPEED_200GB:
2413	case BNXT_LINK_SPEED_200GB_PAM4:
2414	case BNXT_LINK_SPEED_200GB_PAM4_112:
2415		return SPEED_200000;
2416	case BNXT_LINK_SPEED_400GB:
2417	case BNXT_LINK_SPEED_400GB_PAM4:
2418	case BNXT_LINK_SPEED_400GB_PAM4_112:
2419		return SPEED_400000;
2420	default:
2421		return SPEED_UNKNOWN;
2422	}
2423}
2424
2425static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2426				    struct bnxt_link_info *link_info)
2427{
2428	struct ethtool_link_settings *base = &lk_ksettings->base;
2429
2430	if (link_info->link_state == BNXT_LINK_STATE_UP) {
2431		base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2432		base->duplex = DUPLEX_HALF;
2433		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2434			base->duplex = DUPLEX_FULL;
2435		lk_ksettings->lanes = link_info->active_lanes;
2436	} else if (!link_info->autoneg) {
2437		base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2438		base->duplex = DUPLEX_HALF;
2439		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2440			base->duplex = DUPLEX_FULL;
2441	}
2442}
2443
2444static int bnxt_get_link_ksettings(struct net_device *dev,
2445				   struct ethtool_link_ksettings *lk_ksettings)
2446{
2447	struct ethtool_link_settings *base = &lk_ksettings->base;
2448	enum ethtool_link_mode_bit_indices link_mode;
2449	struct bnxt *bp = netdev_priv(dev);
2450	struct bnxt_link_info *link_info;
2451	enum bnxt_media_type media;
 
2452
2453	ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2454	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2455	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2456	base->duplex = DUPLEX_UNKNOWN;
2457	base->speed = SPEED_UNKNOWN;
2458	link_info = &bp->link_info;
2459
2460	mutex_lock(&bp->link_lock);
2461	bnxt_get_ethtool_modes(link_info, lk_ksettings);
2462	media = bnxt_get_media(link_info);
2463	bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2464	bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2465	link_mode = bnxt_get_link_mode(link_info);
2466	if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2467		ethtool_params_from_link_mode(lk_ksettings, link_mode);
2468	else
2469		bnxt_get_default_speeds(lk_ksettings, link_info);
2470
 
2471	if (link_info->autoneg) {
2472		bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2473		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2474				 lk_ksettings->link_modes.advertising);
2475		base->autoneg = AUTONEG_ENABLE;
2476		bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2477		if (link_info->phy_link_status == BNXT_LINK_LINK)
2478			bnxt_get_all_ethtool_lp_speeds(link_info, media,
2479						       lk_ksettings);
 
 
 
 
 
 
2480	} else {
2481		base->autoneg = AUTONEG_DISABLE;
 
 
 
 
 
2482	}
 
2483
2484	base->port = PORT_NONE;
2485	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2486		base->port = PORT_TP;
2487		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2488				 lk_ksettings->link_modes.supported);
2489		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2490				 lk_ksettings->link_modes.advertising);
2491	} else {
2492		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2493				 lk_ksettings->link_modes.supported);
2494		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2495				 lk_ksettings->link_modes.advertising);
2496
2497		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2498			base->port = PORT_DA;
2499		else
 
2500			base->port = PORT_FIBRE;
2501	}
2502	base->phy_address = link_info->phy_addr;
2503	mutex_unlock(&bp->link_lock);
2504
2505	return 0;
2506}
2507
2508static int
2509bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2510{
2511	struct bnxt *bp = netdev_priv(dev);
2512	struct bnxt_link_info *link_info = &bp->link_info;
2513	u16 support_pam4_spds = link_info->support_pam4_speeds;
2514	u16 support_spds2 = link_info->support_speeds2;
2515	u16 support_spds = link_info->support_speeds;
2516	u8 sig_mode = BNXT_SIG_MODE_NRZ;
2517	u32 lanes_needed = 1;
2518	u16 fw_speed = 0;
2519
2520	switch (ethtool_speed) {
2521	case SPEED_100:
2522		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2523			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2524		break;
2525	case SPEED_1000:
2526		if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2527		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2528			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2529		break;
2530	case SPEED_2500:
2531		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2532			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2533		break;
2534	case SPEED_10000:
2535		if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2536		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2537			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2538		break;
2539	case SPEED_20000:
2540		if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2541			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2542			lanes_needed = 2;
2543		}
2544		break;
2545	case SPEED_25000:
2546		if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2547		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2548			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2549		break;
2550	case SPEED_40000:
2551		if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2552		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2553			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2554			lanes_needed = 4;
2555		}
2556		break;
2557	case SPEED_50000:
2558		if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2559		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2560		    lanes != 1) {
2561			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2562			lanes_needed = 2;
2563		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2564			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2565			sig_mode = BNXT_SIG_MODE_PAM4;
2566		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2567			fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2568			sig_mode = BNXT_SIG_MODE_PAM4;
2569		}
2570		break;
2571	case SPEED_100000:
2572		if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2573		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2574		    lanes != 2 && lanes != 1) {
2575			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2576			lanes_needed = 4;
2577		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2578			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2579			sig_mode = BNXT_SIG_MODE_PAM4;
2580			lanes_needed = 2;
2581		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2582			   lanes != 1) {
2583			fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2584			sig_mode = BNXT_SIG_MODE_PAM4;
2585			lanes_needed = 2;
2586		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2587			fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2588			sig_mode = BNXT_SIG_MODE_PAM4_112;
2589		}
2590		break;
2591	case SPEED_200000:
2592		if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2593			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2594			sig_mode = BNXT_SIG_MODE_PAM4;
2595			lanes_needed = 4;
2596		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2597			   lanes != 2) {
2598			fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2599			sig_mode = BNXT_SIG_MODE_PAM4;
2600			lanes_needed = 4;
2601		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2602			fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2603			sig_mode = BNXT_SIG_MODE_PAM4_112;
2604			lanes_needed = 2;
2605		}
2606		break;
2607	case SPEED_400000:
2608		if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2609		    lanes != 4) {
2610			fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2611			sig_mode = BNXT_SIG_MODE_PAM4;
2612			lanes_needed = 8;
2613		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2614			fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2615			sig_mode = BNXT_SIG_MODE_PAM4_112;
2616			lanes_needed = 4;
2617		}
2618		break;
2619	}
2620
2621	if (!fw_speed) {
2622		netdev_err(dev, "unsupported speed!\n");
2623		return -EINVAL;
2624	}
2625
2626	if (lanes && lanes != lanes_needed) {
2627		netdev_err(dev, "unsupported number of lanes for speed\n");
2628		return -EINVAL;
2629	}
2630
2631	if (link_info->req_link_speed == fw_speed &&
2632	    link_info->req_signal_mode == sig_mode &&
2633	    link_info->autoneg == 0)
2634		return -EALREADY;
2635
2636	link_info->req_link_speed = fw_speed;
2637	link_info->req_signal_mode = sig_mode;
2638	link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2639	link_info->autoneg = 0;
2640	link_info->advertising = 0;
2641	link_info->advertising_pam4 = 0;
2642
2643	return 0;
2644}
2645
2646u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
2647{
2648	u16 fw_speed_mask = 0;
2649
2650	/* only support autoneg at speed 100, 1000, and 10000 */
2651	if (advertising & (ADVERTISED_100baseT_Full |
2652			   ADVERTISED_100baseT_Half)) {
2653		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
2654	}
2655	if (advertising & (ADVERTISED_1000baseT_Full |
2656			   ADVERTISED_1000baseT_Half)) {
2657		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
2658	}
2659	if (advertising & ADVERTISED_10000baseT_Full)
2660		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
2661
2662	if (advertising & ADVERTISED_40000baseCR4_Full)
2663		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
2664
2665	return fw_speed_mask;
2666}
2667
2668static int bnxt_set_link_ksettings(struct net_device *dev,
2669			   const struct ethtool_link_ksettings *lk_ksettings)
2670{
2671	struct bnxt *bp = netdev_priv(dev);
2672	struct bnxt_link_info *link_info = &bp->link_info;
2673	const struct ethtool_link_settings *base = &lk_ksettings->base;
2674	bool set_pause = false;
2675	u32 speed, lanes = 0;
 
2676	int rc = 0;
2677
2678	if (!BNXT_PHY_CFG_ABLE(bp))
2679		return -EOPNOTSUPP;
2680
2681	mutex_lock(&bp->link_lock);
2682	if (base->autoneg == AUTONEG_ENABLE) {
2683		bnxt_set_ethtool_speeds(link_info,
2684					lk_ksettings->link_modes.advertising);
2685		link_info->autoneg |= BNXT_AUTONEG_SPEED;
2686		if (!link_info->advertising && !link_info->advertising_pam4) {
2687			link_info->advertising = link_info->support_auto_speeds;
2688			link_info->advertising_pam4 =
2689				link_info->support_pam4_auto_speeds;
2690		}
2691		/* any change to autoneg will cause link change, therefore the
2692		 * driver should put back the original pause setting in autoneg
2693		 */
2694		if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2695			set_pause = true;
2696	} else {
 
2697		u8 phy_type = link_info->phy_type;
2698
2699		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
2700		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
2701		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2702			netdev_err(dev, "10GBase-T devices must autoneg\n");
2703			rc = -EINVAL;
2704			goto set_setting_exit;
2705		}
2706		if (base->duplex == DUPLEX_HALF) {
2707			netdev_err(dev, "HALF DUPLEX is not supported!\n");
2708			rc = -EINVAL;
2709			goto set_setting_exit;
2710		}
2711		speed = base->speed;
2712		lanes = lk_ksettings->lanes;
2713		rc = bnxt_force_link_speed(dev, speed, lanes);
2714		if (rc) {
2715			if (rc == -EALREADY)
2716				rc = 0;
2717			goto set_setting_exit;
2718		}
 
 
 
 
2719	}
2720
2721	if (netif_running(dev))
2722		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
2723
2724set_setting_exit:
2725	mutex_unlock(&bp->link_lock);
2726	return rc;
2727}
2728
2729static int bnxt_get_fecparam(struct net_device *dev,
2730			     struct ethtool_fecparam *fec)
2731{
2732	struct bnxt *bp = netdev_priv(dev);
2733	struct bnxt_link_info *link_info;
2734	u8 active_fec;
2735	u16 fec_cfg;
2736
2737	link_info = &bp->link_info;
2738	fec_cfg = link_info->fec_cfg;
2739	active_fec = link_info->active_fec_sig_mode &
2740		     PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
2741	if (fec_cfg & BNXT_FEC_NONE) {
2742		fec->fec = ETHTOOL_FEC_NONE;
2743		fec->active_fec = ETHTOOL_FEC_NONE;
2744		return 0;
2745	}
2746	if (fec_cfg & BNXT_FEC_AUTONEG)
2747		fec->fec |= ETHTOOL_FEC_AUTO;
2748	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2749		fec->fec |= ETHTOOL_FEC_BASER;
2750	if (fec_cfg & BNXT_FEC_ENC_RS)
2751		fec->fec |= ETHTOOL_FEC_RS;
2752	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2753		fec->fec |= ETHTOOL_FEC_LLRS;
2754
2755	switch (active_fec) {
2756	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
2757		fec->active_fec |= ETHTOOL_FEC_BASER;
2758		break;
2759	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
2760	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
2761	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
2762		fec->active_fec |= ETHTOOL_FEC_RS;
2763		break;
2764	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
2765	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
2766		fec->active_fec |= ETHTOOL_FEC_LLRS;
2767		break;
2768	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
2769		fec->active_fec |= ETHTOOL_FEC_OFF;
2770		break;
2771	}
2772	return 0;
2773}
2774
2775static void bnxt_get_fec_stats(struct net_device *dev,
2776			       struct ethtool_fec_stats *fec_stats)
2777{
2778	struct bnxt *bp = netdev_priv(dev);
2779	u64 *rx;
2780
2781	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2782		return;
2783
2784	rx = bp->rx_port_stats_ext.sw_stats;
2785	fec_stats->corrected_bits.total =
2786		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2787
2788	if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2789		return;
2790
2791	fec_stats->corrected_blocks.total =
2792		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2793	fec_stats->uncorrectable_blocks.total =
2794		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2795}
2796
2797static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2798					 u32 fec)
2799{
2800	u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2801
2802	if (fec & ETHTOOL_FEC_BASER)
2803		fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2804	else if (fec & ETHTOOL_FEC_RS)
2805		fw_fec |= BNXT_FEC_RS_ON(link_info);
2806	else if (fec & ETHTOOL_FEC_LLRS)
2807		fw_fec |= BNXT_FEC_LLRS_ON;
2808	return fw_fec;
2809}
2810
2811static int bnxt_set_fecparam(struct net_device *dev,
2812			     struct ethtool_fecparam *fecparam)
2813{
2814	struct hwrm_port_phy_cfg_input *req;
2815	struct bnxt *bp = netdev_priv(dev);
2816	struct bnxt_link_info *link_info;
2817	u32 new_cfg, fec = fecparam->fec;
2818	u16 fec_cfg;
2819	int rc;
2820
2821	link_info = &bp->link_info;
2822	fec_cfg = link_info->fec_cfg;
2823	if (fec_cfg & BNXT_FEC_NONE)
2824		return -EOPNOTSUPP;
2825
2826	if (fec & ETHTOOL_FEC_OFF) {
2827		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2828			  BNXT_FEC_ALL_OFF(link_info);
2829		goto apply_fec;
2830	}
2831	if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2832	    ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2833	    ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2834	    ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2835		return -EINVAL;
2836
2837	if (fec & ETHTOOL_FEC_AUTO) {
2838		if (!link_info->autoneg)
2839			return -EINVAL;
2840		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2841	} else {
2842		new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2843	}
2844
2845apply_fec:
2846	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2847	if (rc)
2848		return rc;
2849	req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2850	rc = hwrm_req_send(bp, req);
2851	/* update current settings */
2852	if (!rc) {
2853		mutex_lock(&bp->link_lock);
2854		bnxt_update_link(bp, false);
2855		mutex_unlock(&bp->link_lock);
2856	}
2857	return rc;
2858}
2859
2860static void bnxt_get_pauseparam(struct net_device *dev,
2861				struct ethtool_pauseparam *epause)
2862{
2863	struct bnxt *bp = netdev_priv(dev);
2864	struct bnxt_link_info *link_info = &bp->link_info;
2865
2866	if (BNXT_VF(bp))
2867		return;
2868	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2869	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2870	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2871}
2872
2873static void bnxt_get_pause_stats(struct net_device *dev,
2874				 struct ethtool_pause_stats *epstat)
2875{
2876	struct bnxt *bp = netdev_priv(dev);
2877	u64 *rx, *tx;
2878
2879	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2880		return;
2881
2882	rx = bp->port_stats.sw_stats;
2883	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2884
2885	epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2886	epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2887}
2888
2889static int bnxt_set_pauseparam(struct net_device *dev,
2890			       struct ethtool_pauseparam *epause)
2891{
2892	int rc = 0;
2893	struct bnxt *bp = netdev_priv(dev);
2894	struct bnxt_link_info *link_info = &bp->link_info;
2895
2896	if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2897		return -EOPNOTSUPP;
2898
2899	mutex_lock(&bp->link_lock);
2900	if (epause->autoneg) {
2901		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2902			rc = -EINVAL;
2903			goto pause_exit;
2904		}
2905
2906		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2907		link_info->req_flow_ctrl = 0;
 
 
2908	} else {
2909		/* when transition from auto pause to force pause,
2910		 * force a link change
2911		 */
2912		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2913			link_info->force_link_chng = true;
2914		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2915		link_info->req_flow_ctrl = 0;
2916	}
2917	if (epause->rx_pause)
2918		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2919
2920	if (epause->tx_pause)
2921		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2922
2923	if (netif_running(dev))
2924		rc = bnxt_hwrm_set_pause(bp);
2925
2926pause_exit:
2927	mutex_unlock(&bp->link_lock);
2928	return rc;
2929}
2930
2931static u32 bnxt_get_link(struct net_device *dev)
2932{
2933	struct bnxt *bp = netdev_priv(dev);
2934
2935	/* TODO: handle MF, VF, driver close case */
2936	return BNXT_LINK_IS_UP(bp);
2937}
2938
2939int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2940			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2941{
2942	struct hwrm_nvm_get_dev_info_output *resp;
2943	struct hwrm_nvm_get_dev_info_input *req;
2944	int rc;
2945
2946	if (BNXT_VF(bp))
2947		return -EOPNOTSUPP;
2948
2949	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2950	if (rc)
2951		return rc;
2952
2953	resp = hwrm_req_hold(bp, req);
2954	rc = hwrm_req_send(bp, req);
2955	if (!rc)
2956		memcpy(nvm_dev_info, resp, sizeof(*resp));
2957	hwrm_req_drop(bp, req);
2958	return rc;
2959}
2960
2961static void bnxt_print_admin_err(struct bnxt *bp)
2962{
2963	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2964}
2965
2966int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2967			 u16 ext, u16 *index, u32 *item_length,
2968			 u32 *data_length);
2969
2970int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2971		     u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2972		     u32 dir_item_len, const u8 *data,
2973		     size_t data_len)
 
 
 
2974{
2975	struct bnxt *bp = netdev_priv(dev);
2976	struct hwrm_nvm_write_input *req;
2977	int rc;
 
 
 
2978
2979	rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2980	if (rc)
2981		return rc;
2982
2983	if (data_len && data) {
2984		dma_addr_t dma_handle;
2985		u8 *kmem;
2986
2987		kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2988		if (!kmem) {
2989			hwrm_req_drop(bp, req);
2990			return -ENOMEM;
2991		}
2992
2993		req->dir_data_length = cpu_to_le32(data_len);
 
 
 
 
2994
2995		memcpy(kmem, data, data_len);
2996		req->host_src_addr = cpu_to_le64(dma_handle);
 
 
 
 
2997	}
 
 
2998
2999	hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3000	req->dir_type = cpu_to_le16(dir_type);
3001	req->dir_ordinal = cpu_to_le16(dir_ordinal);
3002	req->dir_ext = cpu_to_le16(dir_ext);
3003	req->dir_attr = cpu_to_le16(dir_attr);
3004	req->dir_item_length = cpu_to_le32(dir_item_len);
3005	rc = hwrm_req_send(bp, req);
3006
3007	if (rc == -EACCES)
3008		bnxt_print_admin_err(bp);
3009	return rc;
3010}
3011
3012int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3013			     u8 self_reset, u8 flags)
3014{
 
3015	struct bnxt *bp = netdev_priv(dev);
3016	struct hwrm_fw_reset_input *req;
3017	int rc;
3018
3019	if (!bnxt_hwrm_reset_permitted(bp)) {
3020		netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3021		return -EPERM;
3022	}
3023
3024	rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3025	if (rc)
3026		return rc;
3027
3028	req->embedded_proc_type = proc_type;
3029	req->selfrst_status = self_reset;
3030	req->flags = flags;
3031
3032	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3033		rc = hwrm_req_send_silent(bp, req);
3034	} else {
3035		rc = hwrm_req_send(bp, req);
3036		if (rc == -EACCES)
3037			bnxt_print_admin_err(bp);
3038	}
3039	return rc;
3040}
3041
3042static int bnxt_firmware_reset(struct net_device *dev,
3043			       enum bnxt_nvm_directory_type dir_type)
3044{
3045	u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3046	u8 proc_type, flags = 0;
3047
3048	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3049	/*       (e.g. when firmware isn't already running) */
3050	switch (dir_type) {
3051	case BNX_DIR_TYPE_CHIMP_PATCH:
3052	case BNX_DIR_TYPE_BOOTCODE:
3053	case BNX_DIR_TYPE_BOOTCODE_2:
3054		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3055		/* Self-reset ChiMP upon next PCIe reset: */
3056		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3057		break;
3058	case BNX_DIR_TYPE_APE_FW:
3059	case BNX_DIR_TYPE_APE_PATCH:
3060		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3061		/* Self-reset APE upon next PCIe reset: */
3062		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3063		break;
3064	case BNX_DIR_TYPE_KONG_FW:
3065	case BNX_DIR_TYPE_KONG_PATCH:
3066		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
 
3067		break;
3068	case BNX_DIR_TYPE_BONO_FW:
3069	case BNX_DIR_TYPE_BONO_PATCH:
3070		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
 
 
 
 
 
 
 
3071		break;
3072	default:
3073		return -EINVAL;
3074	}
3075
3076	return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3077}
3078
3079static int bnxt_firmware_reset_chip(struct net_device *dev)
3080{
3081	struct bnxt *bp = netdev_priv(dev);
3082	u8 flags = 0;
3083
3084	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3085		flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3086
3087	return bnxt_hwrm_firmware_reset(dev,
3088					FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3089					FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3090					flags);
3091}
3092
3093static int bnxt_firmware_reset_ap(struct net_device *dev)
3094{
3095	return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3096					FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3097					0);
3098}
3099
3100static int bnxt_flash_firmware(struct net_device *dev,
3101			       u16 dir_type,
3102			       const u8 *fw_data,
3103			       size_t fw_size)
3104{
3105	int	rc = 0;
3106	u16	code_type;
3107	u32	stored_crc;
3108	u32	calculated_crc;
3109	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3110
3111	switch (dir_type) {
3112	case BNX_DIR_TYPE_BOOTCODE:
3113	case BNX_DIR_TYPE_BOOTCODE_2:
3114		code_type = CODE_BOOT;
3115		break;
3116	case BNX_DIR_TYPE_CHIMP_PATCH:
3117		code_type = CODE_CHIMP_PATCH;
3118		break;
3119	case BNX_DIR_TYPE_APE_FW:
3120		code_type = CODE_MCTP_PASSTHRU;
3121		break;
3122	case BNX_DIR_TYPE_APE_PATCH:
3123		code_type = CODE_APE_PATCH;
3124		break;
3125	case BNX_DIR_TYPE_KONG_FW:
3126		code_type = CODE_KONG_FW;
3127		break;
3128	case BNX_DIR_TYPE_KONG_PATCH:
3129		code_type = CODE_KONG_PATCH;
3130		break;
3131	case BNX_DIR_TYPE_BONO_FW:
3132		code_type = CODE_BONO_FW;
3133		break;
3134	case BNX_DIR_TYPE_BONO_PATCH:
3135		code_type = CODE_BONO_PATCH;
3136		break;
3137	default:
3138		netdev_err(dev, "Unsupported directory entry type: %u\n",
3139			   dir_type);
3140		return -EINVAL;
3141	}
3142	if (fw_size < sizeof(struct bnxt_fw_header)) {
3143		netdev_err(dev, "Invalid firmware file size: %u\n",
3144			   (unsigned int)fw_size);
3145		return -EINVAL;
3146	}
3147	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3148		netdev_err(dev, "Invalid firmware signature: %08X\n",
3149			   le32_to_cpu(header->signature));
3150		return -EINVAL;
3151	}
3152	if (header->code_type != code_type) {
3153		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3154			   code_type, header->code_type);
3155		return -EINVAL;
3156	}
3157	if (header->device != DEVICE_CUMULUS_FAMILY) {
3158		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3159			   DEVICE_CUMULUS_FAMILY, header->device);
3160		return -EINVAL;
3161	}
3162	/* Confirm the CRC32 checksum of the file: */
3163	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3164					     sizeof(stored_crc)));
3165	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3166	if (calculated_crc != stored_crc) {
3167		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3168			   (unsigned long)stored_crc,
3169			   (unsigned long)calculated_crc);
3170		return -EINVAL;
3171	}
3172	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3173			      0, 0, 0, fw_data, fw_size);
3174	if (rc == 0)	/* Firmware update successful */
3175		rc = bnxt_firmware_reset(dev, dir_type);
3176
3177	return rc;
3178}
3179
3180static int bnxt_flash_microcode(struct net_device *dev,
3181				u16 dir_type,
3182				const u8 *fw_data,
3183				size_t fw_size)
3184{
3185	struct bnxt_ucode_trailer *trailer;
3186	u32 calculated_crc;
3187	u32 stored_crc;
3188	int rc = 0;
3189
3190	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3191		netdev_err(dev, "Invalid microcode file size: %u\n",
3192			   (unsigned int)fw_size);
3193		return -EINVAL;
3194	}
3195	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3196						sizeof(*trailer)));
3197	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3198		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3199			   le32_to_cpu(trailer->sig));
3200		return -EINVAL;
3201	}
3202	if (le16_to_cpu(trailer->dir_type) != dir_type) {
3203		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3204			   dir_type, le16_to_cpu(trailer->dir_type));
3205		return -EINVAL;
3206	}
3207	if (le16_to_cpu(trailer->trailer_length) <
3208		sizeof(struct bnxt_ucode_trailer)) {
3209		netdev_err(dev, "Invalid microcode trailer length: %d\n",
3210			   le16_to_cpu(trailer->trailer_length));
3211		return -EINVAL;
3212	}
3213
3214	/* Confirm the CRC32 checksum of the file: */
3215	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3216					     sizeof(stored_crc)));
3217	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3218	if (calculated_crc != stored_crc) {
3219		netdev_err(dev,
3220			   "CRC32 (%08lX) does not match calculated: %08lX\n",
3221			   (unsigned long)stored_crc,
3222			   (unsigned long)calculated_crc);
3223		return -EINVAL;
3224	}
3225	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3226			      0, 0, 0, fw_data, fw_size);
3227
3228	return rc;
3229}
3230
3231static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3232{
3233	switch (dir_type) {
3234	case BNX_DIR_TYPE_CHIMP_PATCH:
3235	case BNX_DIR_TYPE_BOOTCODE:
3236	case BNX_DIR_TYPE_BOOTCODE_2:
3237	case BNX_DIR_TYPE_APE_FW:
3238	case BNX_DIR_TYPE_APE_PATCH:
3239	case BNX_DIR_TYPE_KONG_FW:
3240	case BNX_DIR_TYPE_KONG_PATCH:
3241	case BNX_DIR_TYPE_BONO_FW:
3242	case BNX_DIR_TYPE_BONO_PATCH:
3243		return true;
3244	}
3245
3246	return false;
3247}
3248
3249static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3250{
3251	switch (dir_type) {
3252	case BNX_DIR_TYPE_AVS:
3253	case BNX_DIR_TYPE_EXP_ROM_MBA:
3254	case BNX_DIR_TYPE_PCIE:
3255	case BNX_DIR_TYPE_TSCF_UCODE:
3256	case BNX_DIR_TYPE_EXT_PHY:
3257	case BNX_DIR_TYPE_CCM:
3258	case BNX_DIR_TYPE_ISCSI_BOOT:
3259	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3260	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3261		return true;
3262	}
3263
3264	return false;
3265}
3266
3267static bool bnxt_dir_type_is_executable(u16 dir_type)
3268{
3269	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3270		bnxt_dir_type_is_other_exec_format(dir_type);
3271}
3272
3273static int bnxt_flash_firmware_from_file(struct net_device *dev,
3274					 u16 dir_type,
3275					 const char *filename)
3276{
3277	const struct firmware  *fw;
3278	int			rc;
3279
3280	rc = request_firmware(&fw, filename, &dev->dev);
3281	if (rc != 0) {
3282		netdev_err(dev, "Error %d requesting firmware file: %s\n",
3283			   rc, filename);
3284		return rc;
3285	}
3286	if (bnxt_dir_type_is_ape_bin_format(dir_type))
3287		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3288	else if (bnxt_dir_type_is_other_exec_format(dir_type))
3289		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3290	else
3291		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3292				      0, 0, 0, fw->data, fw->size);
3293	release_firmware(fw);
3294	return rc;
3295}
3296
3297#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3298#define MSG_INVALID_PKG "PKG install error : Invalid package"
3299#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3300#define MSG_INVALID_DEV "PKG install error : Invalid device"
3301#define MSG_INTERNAL_ERR "PKG install error : Internal error"
3302#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3303#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3304#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3305#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3306#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3307
3308static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3309				    struct netlink_ext_ack *extack)
3310{
3311	switch (result) {
3312	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3313	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3314	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3315	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3316	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3317	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3318		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3319		return -EINVAL;
3320	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3321	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3322	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3323	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3324	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3325	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3326	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3327	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3328	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3329	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3330	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3331	case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3332	case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3333		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3334		return -ENOPKG;
3335	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3336		BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3337		return -EPERM;
3338	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3339	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3340	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3341	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3342	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3343		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3344		return -EOPNOTSUPP;
3345	default:
3346		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3347		return -EIO;
3348	}
3349}
3350
3351#define BNXT_PKG_DMA_SIZE	0x40000
3352#define BNXT_NVM_MORE_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3353#define BNXT_NVM_LAST_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3354
3355static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3356				    struct netlink_ext_ack *extack)
3357{
3358	u32 item_len;
3359	int rc;
3360
3361	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3362				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3363				  &item_len, NULL);
3364	if (rc) {
3365		BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3366		return rc;
3367	}
3368
3369	if (fw_size > item_len) {
3370		rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3371				      BNX_DIR_ORDINAL_FIRST, 0, 1,
3372				      round_up(fw_size, 4096), NULL, 0);
3373		if (rc) {
3374			BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3375			return rc;
3376		}
3377	}
3378	return 0;
3379}
3380
3381int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3382				   u32 install_type, struct netlink_ext_ack *extack)
3383{
3384	struct hwrm_nvm_install_update_input *install;
3385	struct hwrm_nvm_install_update_output *resp;
3386	struct hwrm_nvm_modify_input *modify;
3387	struct bnxt *bp = netdev_priv(dev);
3388	bool defrag_attempted = false;
3389	dma_addr_t dma_handle;
3390	u8 *kmem = NULL;
3391	u32 modify_len;
3392	u32 item_len;
3393	u8 cmd_err;
3394	u16 index;
3395	int rc;
3396
3397	/* resize before flashing larger image than available space */
3398	rc = bnxt_resize_update_entry(dev, fw->size, extack);
3399	if (rc)
3400		return rc;
3401
3402	bnxt_hwrm_fw_set_time(bp);
3403
3404	rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3405	if (rc)
3406		return rc;
3407
3408	/* Try allocating a large DMA buffer first.  Older fw will
3409	 * cause excessive NVRAM erases when using small blocks.
3410	 */
3411	modify_len = roundup_pow_of_two(fw->size);
3412	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3413	while (1) {
3414		kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3415		if (!kmem && modify_len > PAGE_SIZE)
3416			modify_len /= 2;
3417		else
3418			break;
3419	}
3420	if (!kmem) {
3421		hwrm_req_drop(bp, modify);
3422		return -ENOMEM;
3423	}
3424
3425	rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3426	if (rc) {
3427		hwrm_req_drop(bp, modify);
 
3428		return rc;
3429	}
3430
3431	hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3432	hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3433
3434	hwrm_req_hold(bp, modify);
3435	modify->host_src_addr = cpu_to_le64(dma_handle);
3436
3437	resp = hwrm_req_hold(bp, install);
3438	if ((install_type & 0xffff) == 0)
3439		install_type >>= 16;
3440	install->install_type = cpu_to_le32(install_type);
3441
3442	do {
3443		u32 copied = 0, len = modify_len;
3444
3445		rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3446					  BNX_DIR_ORDINAL_FIRST,
3447					  BNX_DIR_EXT_NONE,
3448					  &index, &item_len, NULL);
3449		if (rc) {
3450			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3451			break;
3452		}
3453		if (fw->size > item_len) {
3454			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3455			rc = -EFBIG;
3456			break;
3457		}
3458
3459		modify->dir_idx = cpu_to_le16(index);
3460
3461		if (fw->size > modify_len)
3462			modify->flags = BNXT_NVM_MORE_FLAG;
3463		while (copied < fw->size) {
3464			u32 balance = fw->size - copied;
3465
3466			if (balance <= modify_len) {
3467				len = balance;
3468				if (copied)
3469					modify->flags |= BNXT_NVM_LAST_FLAG;
3470			}
3471			memcpy(kmem, fw->data + copied, len);
3472			modify->len = cpu_to_le32(len);
3473			modify->offset = cpu_to_le32(copied);
3474			rc = hwrm_req_send(bp, modify);
3475			if (rc)
3476				goto pkg_abort;
3477			copied += len;
3478		}
3479
3480		rc = hwrm_req_send_silent(bp, install);
3481		if (!rc)
3482			break;
 
 
 
 
 
 
 
3483
3484		if (defrag_attempted) {
3485			/* We have tried to defragment already in the previous
3486			 * iteration. Return with the result for INSTALL_UPDATE
3487			 */
3488			break;
3489		}
 
 
 
 
3490
3491		cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3492
3493		switch (cmd_err) {
3494		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3495			BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3496			rc = -EALREADY;
3497			break;
3498		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3499			install->flags =
3500				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3501
3502			rc = hwrm_req_send_silent(bp, install);
3503			if (!rc)
3504				break;
3505
3506			cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3507
3508			if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3509				/* FW has cleared NVM area, driver will create
3510				 * UPDATE directory and try the flash again
3511				 */
3512				defrag_attempted = true;
3513				install->flags = 0;
3514				rc = bnxt_flash_nvram(bp->dev,
3515						      BNX_DIR_TYPE_UPDATE,
3516						      BNX_DIR_ORDINAL_FIRST,
3517						      0, 0, item_len, NULL, 0);
3518				if (!rc)
3519					break;
3520			}
3521			fallthrough;
3522		default:
3523			BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3524		}
3525	} while (defrag_attempted && !rc);
3526
3527pkg_abort:
3528	hwrm_req_drop(bp, modify);
3529	hwrm_req_drop(bp, install);
3530
3531	if (resp->result) {
3532		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3533			   (s8)resp->result, (int)resp->problem_item);
3534		rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3535	}
3536	if (rc == -EACCES)
 
 
 
3537		bnxt_print_admin_err(bp);
3538	return rc;
3539}
3540
3541static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3542					u32 install_type, struct netlink_ext_ack *extack)
3543{
3544	const struct firmware *fw;
3545	int rc;
3546
3547	rc = request_firmware(&fw, filename, &dev->dev);
3548	if (rc != 0) {
3549		netdev_err(dev, "PKG error %d requesting file: %s\n",
3550			   rc, filename);
3551		return rc;
3552	}
3553
3554	rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3555
3556	release_firmware(fw);
3557
3558	return rc;
3559}
3560
3561static int bnxt_flash_device(struct net_device *dev,
3562			     struct ethtool_flash *flash)
3563{
3564	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3565		netdev_err(dev, "flashdev not supported from a virtual function\n");
3566		return -EINVAL;
3567	}
3568
3569	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3570	    flash->region > 0xffff)
3571		return bnxt_flash_package_from_file(dev, flash->data,
3572						    flash->region, NULL);
3573
3574	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3575}
3576
3577static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3578{
3579	struct hwrm_nvm_get_dir_info_output *output;
3580	struct hwrm_nvm_get_dir_info_input *req;
3581	struct bnxt *bp = netdev_priv(dev);
3582	int rc;
 
 
3583
3584	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3585	if (rc)
3586		return rc;
3587
3588	output = hwrm_req_hold(bp, req);
3589	rc = hwrm_req_send(bp, req);
3590	if (!rc) {
3591		*entries = le32_to_cpu(output->entries);
3592		*length = le32_to_cpu(output->entry_length);
3593	}
3594	hwrm_req_drop(bp, req);
3595	return rc;
3596}
3597
3598static int bnxt_get_eeprom_len(struct net_device *dev)
3599{
3600	struct bnxt *bp = netdev_priv(dev);
3601
3602	if (BNXT_VF(bp))
3603		return 0;
3604
3605	/* The -1 return value allows the entire 32-bit range of offsets to be
3606	 * passed via the ethtool command-line utility.
3607	 */
3608	return -1;
3609}
3610
3611static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3612{
3613	struct bnxt *bp = netdev_priv(dev);
3614	int rc;
3615	u32 dir_entries;
3616	u32 entry_length;
3617	u8 *buf;
3618	size_t buflen;
3619	dma_addr_t dma_handle;
3620	struct hwrm_nvm_get_dir_entries_input *req;
3621
3622	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3623	if (rc != 0)
3624		return rc;
3625
3626	if (!dir_entries || !entry_length)
3627		return -EIO;
3628
3629	/* Insert 2 bytes of directory info (count and size of entries) */
3630	if (len < 2)
3631		return -EINVAL;
3632
3633	*data++ = dir_entries;
3634	*data++ = entry_length;
3635	len -= 2;
3636	memset(data, 0xff, len);
3637
3638	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3639	if (rc)
3640		return rc;
3641
3642	buflen = mul_u32_u32(dir_entries, entry_length);
3643	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
3644	if (!buf) {
3645		hwrm_req_drop(bp, req);
 
3646		return -ENOMEM;
3647	}
3648	req->host_dest_addr = cpu_to_le64(dma_handle);
3649
3650	hwrm_req_hold(bp, req); /* hold the slice */
3651	rc = hwrm_req_send(bp, req);
3652	if (rc == 0)
3653		memcpy(data, buf, len > buflen ? buflen : len);
3654	hwrm_req_drop(bp, req);
3655	return rc;
3656}
3657
3658int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
3659			u32 length, u8 *data)
3660{
3661	struct bnxt *bp = netdev_priv(dev);
3662	int rc;
3663	u8 *buf;
3664	dma_addr_t dma_handle;
3665	struct hwrm_nvm_read_input *req;
3666
3667	if (!length)
3668		return -EINVAL;
3669
3670	rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
3671	if (rc)
3672		return rc;
3673
3674	buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
3675	if (!buf) {
3676		hwrm_req_drop(bp, req);
 
3677		return -ENOMEM;
3678	}
 
 
 
 
 
3679
3680	req->host_dest_addr = cpu_to_le64(dma_handle);
3681	req->dir_idx = cpu_to_le16(index);
3682	req->offset = cpu_to_le32(offset);
3683	req->len = cpu_to_le32(length);
3684
3685	hwrm_req_hold(bp, req); /* hold the slice */
3686	rc = hwrm_req_send(bp, req);
3687	if (rc == 0)
3688		memcpy(data, buf, length);
3689	hwrm_req_drop(bp, req);
3690	return rc;
3691}
3692
3693int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3694			 u16 ext, u16 *index, u32 *item_length,
3695			 u32 *data_length)
3696{
3697	struct hwrm_nvm_find_dir_entry_output *output;
3698	struct hwrm_nvm_find_dir_entry_input *req;
3699	struct bnxt *bp = netdev_priv(dev);
3700	int rc;
 
 
3701
3702	rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
3703	if (rc)
3704		return rc;
3705
3706	req->enables = 0;
3707	req->dir_idx = 0;
3708	req->dir_type = cpu_to_le16(type);
3709	req->dir_ordinal = cpu_to_le16(ordinal);
3710	req->dir_ext = cpu_to_le16(ext);
3711	req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
3712	output = hwrm_req_hold(bp, req);
3713	rc = hwrm_req_send_silent(bp, req);
3714	if (rc == 0) {
3715		if (index)
3716			*index = le16_to_cpu(output->dir_idx);
3717		if (item_length)
3718			*item_length = le32_to_cpu(output->dir_item_length);
3719		if (data_length)
3720			*data_length = le32_to_cpu(output->dir_data_length);
3721	}
3722	hwrm_req_drop(bp, req);
3723	return rc;
3724}
3725
3726static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
3727{
3728	char	*retval = NULL;
3729	char	*p;
3730	char	*value;
3731	int	field = 0;
3732
3733	if (datalen < 1)
3734		return NULL;
3735	/* null-terminate the log data (removing last '\n'): */
3736	data[datalen - 1] = 0;
3737	for (p = data; *p != 0; p++) {
3738		field = 0;
3739		retval = NULL;
3740		while (*p != 0 && *p != '\n') {
3741			value = p;
3742			while (*p != 0 && *p != '\t' && *p != '\n')
3743				p++;
3744			if (field == desired_field)
3745				retval = value;
3746			if (*p != '\t')
3747				break;
3748			*p = 0;
3749			field++;
3750			p++;
3751		}
3752		if (*p == 0)
3753			break;
3754		*p = 0;
3755	}
3756	return retval;
3757}
3758
3759int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
3760{
3761	struct bnxt *bp = netdev_priv(dev);
3762	u16 index = 0;
3763	char *pkgver;
3764	u32 pkglen;
3765	u8 *pkgbuf;
3766	int rc;
3767
3768	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
3769				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
3770				  &index, NULL, &pkglen);
3771	if (rc)
3772		return rc;
3773
3774	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
3775	if (!pkgbuf) {
3776		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3777			pkglen);
3778		return -ENOMEM;
3779	}
3780
3781	rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3782	if (rc)
3783		goto err;
3784
3785	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3786				   pkglen);
3787	if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3788		strscpy(ver, pkgver, size);
3789	else
3790		rc = -ENOENT;
3791
3792err:
3793	kfree(pkgbuf);
3794
3795	return rc;
3796}
3797
3798static void bnxt_get_pkgver(struct net_device *dev)
3799{
3800	struct bnxt *bp = netdev_priv(dev);
3801	char buf[FW_VER_STR_LEN];
3802	int len;
3803
3804	if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3805		len = strlen(bp->fw_ver_str);
3806		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3807			 "/pkg %s", buf);
3808	}
 
 
3809}
3810
3811static int bnxt_get_eeprom(struct net_device *dev,
3812			   struct ethtool_eeprom *eeprom,
3813			   u8 *data)
3814{
3815	u32 index;
3816	u32 offset;
3817
3818	if (eeprom->offset == 0) /* special offset value to get directory */
3819		return bnxt_get_nvram_directory(dev, eeprom->len, data);
3820
3821	index = eeprom->offset >> 24;
3822	offset = eeprom->offset & 0xffffff;
3823
3824	if (index == 0) {
3825		netdev_err(dev, "unsupported index value: %d\n", index);
3826		return -EINVAL;
3827	}
3828
3829	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3830}
3831
3832static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3833{
3834	struct hwrm_nvm_erase_dir_entry_input *req;
3835	struct bnxt *bp = netdev_priv(dev);
3836	int rc;
3837
3838	rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3839	if (rc)
3840		return rc;
3841
3842	req->dir_idx = cpu_to_le16(index);
3843	return hwrm_req_send(bp, req);
 
3844}
3845
3846static int bnxt_set_eeprom(struct net_device *dev,
3847			   struct ethtool_eeprom *eeprom,
3848			   u8 *data)
3849{
3850	struct bnxt *bp = netdev_priv(dev);
3851	u8 index, dir_op;
3852	u16 type, ext, ordinal, attr;
3853
3854	if (!BNXT_PF(bp)) {
3855		netdev_err(dev, "NVM write not supported from a virtual function\n");
3856		return -EINVAL;
3857	}
3858
3859	type = eeprom->magic >> 16;
3860
3861	if (type == 0xffff) { /* special value for directory operations */
3862		index = eeprom->magic & 0xff;
3863		dir_op = eeprom->magic >> 8;
3864		if (index == 0)
3865			return -EINVAL;
3866		switch (dir_op) {
3867		case 0x0e: /* erase */
3868			if (eeprom->offset != ~eeprom->magic)
3869				return -EINVAL;
3870			return bnxt_erase_nvram_directory(dev, index - 1);
3871		default:
3872			return -EINVAL;
3873		}
3874	}
3875
3876	/* Create or re-write an NVM item: */
3877	if (bnxt_dir_type_is_executable(type))
3878		return -EOPNOTSUPP;
3879	ext = eeprom->magic & 0xffff;
3880	ordinal = eeprom->offset >> 16;
3881	attr = eeprom->offset & 0xffff;
3882
3883	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3884				eeprom->len);
3885}
3886
3887static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3888{
3889	struct bnxt *bp = netdev_priv(dev);
3890	struct ethtool_eee *eee = &bp->eee;
3891	struct bnxt_link_info *link_info = &bp->link_info;
3892	u32 advertising;
 
3893	int rc = 0;
3894
3895	if (!BNXT_PHY_CFG_ABLE(bp))
3896		return -EOPNOTSUPP;
3897
3898	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3899		return -EOPNOTSUPP;
3900
3901	mutex_lock(&bp->link_lock);
3902	advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3903	if (!edata->eee_enabled)
3904		goto eee_ok;
3905
3906	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3907		netdev_warn(dev, "EEE requires autoneg\n");
3908		rc = -EINVAL;
3909		goto eee_exit;
3910	}
3911	if (edata->tx_lpi_enabled) {
3912		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3913				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3914			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3915				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3916			rc = -EINVAL;
3917			goto eee_exit;
3918		} else if (!bp->lpi_tmr_hi) {
3919			edata->tx_lpi_timer = eee->tx_lpi_timer;
3920		}
3921	}
3922	if (!edata->advertised) {
3923		edata->advertised = advertising & eee->supported;
3924	} else if (edata->advertised & ~advertising) {
3925		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3926			    edata->advertised, advertising);
3927		rc = -EINVAL;
3928		goto eee_exit;
3929	}
3930
3931	eee->advertised = edata->advertised;
3932	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3933	eee->tx_lpi_timer = edata->tx_lpi_timer;
3934eee_ok:
3935	eee->eee_enabled = edata->eee_enabled;
3936
3937	if (netif_running(dev))
3938		rc = bnxt_hwrm_set_link_setting(bp, false, true);
3939
3940eee_exit:
3941	mutex_unlock(&bp->link_lock);
3942	return rc;
3943}
3944
3945static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3946{
3947	struct bnxt *bp = netdev_priv(dev);
3948
3949	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3950		return -EOPNOTSUPP;
3951
3952	*edata = bp->eee;
3953	if (!bp->eee.eee_enabled) {
3954		/* Preserve tx_lpi_timer so that the last value will be used
3955		 * by default when it is re-enabled.
3956		 */
3957		edata->advertised = 0;
3958		edata->tx_lpi_enabled = 0;
3959	}
3960
3961	if (!bp->eee.eee_active)
3962		edata->lp_advertised = 0;
3963
3964	return 0;
3965}
3966
3967static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3968					    u16 page_number, u8 bank,
3969					    u16 start_addr, u16 data_length,
3970					    u8 *buf)
3971{
3972	struct hwrm_port_phy_i2c_read_output *output;
3973	struct hwrm_port_phy_i2c_read_input *req;
3974	int rc, byte_offset = 0;
3975
3976	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3977	if (rc)
3978		return rc;
3979
3980	output = hwrm_req_hold(bp, req);
3981	req->i2c_slave_addr = i2c_addr;
3982	req->page_number = cpu_to_le16(page_number);
3983	req->port_id = cpu_to_le16(bp->pf.port_id);
3984	do {
3985		u16 xfer_size;
3986
3987		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3988		data_length -= xfer_size;
3989		req->page_offset = cpu_to_le16(start_addr + byte_offset);
3990		req->data_length = xfer_size;
3991		req->enables =
3992			cpu_to_le32((start_addr + byte_offset ?
3993				     PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3994				     0) |
3995				    (bank ?
3996				     PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3997				     0));
3998		rc = hwrm_req_send(bp, req);
3999		if (!rc)
4000			memcpy(buf + byte_offset, output->data, xfer_size);
 
4001		byte_offset += xfer_size;
4002	} while (!rc && data_length > 0);
4003	hwrm_req_drop(bp, req);
4004
4005	return rc;
4006}
4007
4008static int bnxt_get_module_info(struct net_device *dev,
4009				struct ethtool_modinfo *modinfo)
4010{
4011	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4012	struct bnxt *bp = netdev_priv(dev);
4013	int rc;
4014
4015	/* No point in going further if phy status indicates
4016	 * module is not inserted or if it is powered down or
4017	 * if it is of type 10GBase-T
4018	 */
4019	if (bp->link_info.module_status >
4020		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4021		return -EOPNOTSUPP;
4022
4023	/* This feature is not supported in older firmware versions */
4024	if (bp->hwrm_spec_code < 0x10202)
4025		return -EOPNOTSUPP;
4026
4027	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4028					      SFF_DIAG_SUPPORT_OFFSET + 1,
4029					      data);
4030	if (!rc) {
4031		u8 module_id = data[0];
4032		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4033
4034		switch (module_id) {
4035		case SFF_MODULE_ID_SFP:
4036			modinfo->type = ETH_MODULE_SFF_8472;
4037			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4038			if (!diag_supported)
4039				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4040			break;
4041		case SFF_MODULE_ID_QSFP:
4042		case SFF_MODULE_ID_QSFP_PLUS:
4043			modinfo->type = ETH_MODULE_SFF_8436;
4044			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4045			break;
4046		case SFF_MODULE_ID_QSFP28:
4047			modinfo->type = ETH_MODULE_SFF_8636;
4048			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4049			break;
4050		default:
4051			rc = -EOPNOTSUPP;
4052			break;
4053		}
4054	}
4055	return rc;
4056}
4057
4058static int bnxt_get_module_eeprom(struct net_device *dev,
4059				  struct ethtool_eeprom *eeprom,
4060				  u8 *data)
4061{
4062	struct bnxt *bp = netdev_priv(dev);
4063	u16  start = eeprom->offset, length = eeprom->len;
4064	int rc = 0;
4065
4066	memset(data, 0, eeprom->len);
4067
4068	/* Read A0 portion of the EEPROM */
4069	if (start < ETH_MODULE_SFF_8436_LEN) {
4070		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4071			length = ETH_MODULE_SFF_8436_LEN - start;
4072		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4073						      start, length, data);
4074		if (rc)
4075			return rc;
4076		start += length;
4077		data += length;
4078		length = eeprom->len - length;
4079	}
4080
4081	/* Read A2 portion of the EEPROM */
4082	if (length) {
4083		start -= ETH_MODULE_SFF_8436_LEN;
4084		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4085						      start, length, data);
4086	}
4087	return rc;
4088}
4089
4090static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4091{
4092	if (bp->link_info.module_status <=
4093	    PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4094		return 0;
4095
4096	switch (bp->link_info.module_status) {
4097	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4098		NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4099		break;
4100	case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4101		NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4102		break;
4103	case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4104		NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4105		break;
4106	default:
4107		NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4108		break;
4109	}
4110	return -EINVAL;
4111}
4112
4113static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4114					  const struct ethtool_module_eeprom *page_data,
4115					  struct netlink_ext_ack *extack)
4116{
4117	struct bnxt *bp = netdev_priv(dev);
4118	int rc;
4119
4120	rc = bnxt_get_module_status(bp, extack);
4121	if (rc)
4122		return rc;
4123
4124	if (bp->hwrm_spec_code < 0x10202) {
4125		NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4126		return -EINVAL;
4127	}
4128
4129	if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4130		NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4131		return -EINVAL;
4132	}
4133
4134	rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4135					      page_data->page, page_data->bank,
4136					      page_data->offset,
4137					      page_data->length,
4138					      page_data->data);
4139	if (rc) {
4140		NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4141		return rc;
4142	}
4143	return page_data->length;
4144}
4145
4146static int bnxt_nway_reset(struct net_device *dev)
4147{
4148	int rc = 0;
4149
4150	struct bnxt *bp = netdev_priv(dev);
4151	struct bnxt_link_info *link_info = &bp->link_info;
4152
4153	if (!BNXT_PHY_CFG_ABLE(bp))
4154		return -EOPNOTSUPP;
4155
4156	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4157		return -EINVAL;
4158
4159	if (netif_running(dev))
4160		rc = bnxt_hwrm_set_link_setting(bp, true, false);
4161
4162	return rc;
4163}
4164
4165static int bnxt_set_phys_id(struct net_device *dev,
4166			    enum ethtool_phys_id_state state)
4167{
4168	struct hwrm_port_led_cfg_input *req;
4169	struct bnxt *bp = netdev_priv(dev);
4170	struct bnxt_pf_info *pf = &bp->pf;
4171	struct bnxt_led_cfg *led_cfg;
4172	u8 led_state;
4173	__le16 duration;
4174	int rc, i;
4175
4176	if (!bp->num_leds || BNXT_VF(bp))
4177		return -EOPNOTSUPP;
4178
4179	if (state == ETHTOOL_ID_ACTIVE) {
4180		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4181		duration = cpu_to_le16(500);
4182	} else if (state == ETHTOOL_ID_INACTIVE) {
4183		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4184		duration = cpu_to_le16(0);
4185	} else {
4186		return -EINVAL;
4187	}
4188	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4189	if (rc)
4190		return rc;
4191
4192	req->port_id = cpu_to_le16(pf->port_id);
4193	req->num_leds = bp->num_leds;
4194	led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4195	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4196		req->enables |= BNXT_LED_DFLT_ENABLES(i);
4197		led_cfg->led_id = bp->leds[i].led_id;
4198		led_cfg->led_state = led_state;
4199		led_cfg->led_blink_on = duration;
4200		led_cfg->led_blink_off = duration;
4201		led_cfg->led_group_id = bp->leds[i].led_group_id;
4202	}
4203	return hwrm_req_send(bp, req);
 
4204}
4205
4206static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4207{
4208	struct hwrm_selftest_irq_input *req;
4209	int rc;
4210
4211	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4212	if (rc)
4213		return rc;
4214
4215	req->cmpl_ring = cpu_to_le16(cmpl_ring);
4216	return hwrm_req_send(bp, req);
4217}
4218
4219static int bnxt_test_irq(struct bnxt *bp)
4220{
4221	int i;
4222
4223	for (i = 0; i < bp->cp_nr_rings; i++) {
4224		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4225		int rc;
4226
4227		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4228		if (rc)
4229			return rc;
4230	}
4231	return 0;
4232}
4233
4234static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4235{
4236	struct hwrm_port_mac_cfg_input *req;
4237	int rc;
4238
4239	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4240	if (rc)
4241		return rc;
4242
4243	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4244	if (enable)
4245		req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4246	else
4247		req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4248	return hwrm_req_send(bp, req);
4249}
4250
4251static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4252{
4253	struct hwrm_port_phy_qcaps_output *resp;
4254	struct hwrm_port_phy_qcaps_input *req;
4255	int rc;
4256
4257	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4258	if (rc)
4259		return rc;
4260
4261	resp = hwrm_req_hold(bp, req);
4262	rc = hwrm_req_send(bp, req);
4263	if (!rc)
4264		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4265
4266	hwrm_req_drop(bp, req);
4267	return rc;
4268}
4269
4270static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4271				    struct hwrm_port_phy_cfg_input *req)
4272{
4273	struct bnxt_link_info *link_info = &bp->link_info;
4274	u16 fw_advertising;
4275	u16 fw_speed;
4276	int rc;
4277
4278	if (!link_info->autoneg ||
4279	    (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4280		return 0;
4281
4282	rc = bnxt_query_force_speeds(bp, &fw_advertising);
4283	if (rc)
4284		return rc;
4285
4286	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4287	if (BNXT_LINK_IS_UP(bp))
4288		fw_speed = bp->link_info.link_speed;
4289	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4290		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4291	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4292		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4293	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4294		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4295	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4296		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4297
4298	req->force_link_speed = cpu_to_le16(fw_speed);
4299	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4300				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4301	rc = hwrm_req_send(bp, req);
4302	req->flags = 0;
4303	req->force_link_speed = cpu_to_le16(0);
4304	return rc;
4305}
4306
4307static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4308{
4309	struct hwrm_port_phy_cfg_input *req;
4310	int rc;
4311
4312	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4313	if (rc)
4314		return rc;
4315
4316	/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4317	hwrm_req_hold(bp, req);
4318
4319	if (enable) {
4320		bnxt_disable_an_for_lpbk(bp, req);
4321		if (ext)
4322			req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4323		else
4324			req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4325	} else {
4326		req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4327	}
4328	req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4329	rc = hwrm_req_send(bp, req);
4330	hwrm_req_drop(bp, req);
4331	return rc;
4332}
4333
4334static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4335			    u32 raw_cons, int pkt_size)
4336{
4337	struct bnxt_napi *bnapi = cpr->bnapi;
4338	struct bnxt_rx_ring_info *rxr;
4339	struct bnxt_sw_rx_bd *rx_buf;
4340	struct rx_cmp *rxcmp;
4341	u16 cp_cons, cons;
4342	u8 *data;
4343	u32 len;
4344	int i;
4345
4346	rxr = bnapi->rx_ring;
4347	cp_cons = RING_CMP(raw_cons);
4348	rxcmp = (struct rx_cmp *)
4349		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4350	cons = rxcmp->rx_cmp_opaque;
4351	rx_buf = &rxr->rx_buf_ring[cons];
4352	data = rx_buf->data_ptr;
4353	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4354	if (len != pkt_size)
4355		return -EIO;
4356	i = ETH_ALEN;
4357	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4358		return -EIO;
4359	i += ETH_ALEN;
4360	for (  ; i < pkt_size; i++) {
4361		if (data[i] != (u8)(i & 0xff))
4362			return -EIO;
4363	}
4364	return 0;
4365}
4366
4367static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4368			      int pkt_size)
4369{
4370	struct tx_cmp *txcmp;
4371	int rc = -EIO;
4372	u32 raw_cons;
4373	u32 cons;
4374	int i;
4375
4376	raw_cons = cpr->cp_raw_cons;
4377	for (i = 0; i < 200; i++) {
4378		cons = RING_CMP(raw_cons);
4379		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4380
4381		if (!TX_CMP_VALID(txcmp, raw_cons)) {
4382			udelay(5);
4383			continue;
4384		}
4385
4386		/* The valid test of the entry must be done first before
4387		 * reading any further.
4388		 */
4389		dma_rmb();
4390		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4391		    TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4392			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4393			raw_cons = NEXT_RAW_CMP(raw_cons);
4394			raw_cons = NEXT_RAW_CMP(raw_cons);
4395			break;
4396		}
4397		raw_cons = NEXT_RAW_CMP(raw_cons);
4398	}
4399	cpr->cp_raw_cons = raw_cons;
4400	return rc;
4401}
4402
4403static int bnxt_run_loopback(struct bnxt *bp)
4404{
4405	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4406	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4407	struct bnxt_cp_ring_info *cpr;
4408	int pkt_size, i = 0;
4409	struct sk_buff *skb;
4410	dma_addr_t map;
4411	u8 *data;
4412	int rc;
4413
4414	cpr = &rxr->bnapi->cp_ring;
4415	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4416		cpr = rxr->rx_cpr;
4417	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4418	skb = netdev_alloc_skb(bp->dev, pkt_size);
4419	if (!skb)
4420		return -ENOMEM;
4421	data = skb_put(skb, pkt_size);
4422	ether_addr_copy(&data[i], bp->dev->dev_addr);
4423	i += ETH_ALEN;
4424	ether_addr_copy(&data[i], bp->dev->dev_addr);
4425	i += ETH_ALEN;
4426	for ( ; i < pkt_size; i++)
4427		data[i] = (u8)(i & 0xff);
4428
4429	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4430			     DMA_TO_DEVICE);
4431	if (dma_mapping_error(&bp->pdev->dev, map)) {
4432		dev_kfree_skb(skb);
4433		return -EIO;
4434	}
4435	bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4436
4437	/* Sync BD data before updating doorbell */
4438	wmb();
4439
4440	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4441	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4442
4443	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4444	dev_kfree_skb(skb);
4445	return rc;
4446}
4447
4448static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4449{
4450	struct hwrm_selftest_exec_output *resp;
4451	struct hwrm_selftest_exec_input *req;
4452	int rc;
4453
4454	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4455	if (rc)
4456		return rc;
4457
4458	hwrm_req_timeout(bp, req, bp->test_info->timeout);
4459	req->flags = test_mask;
4460
4461	resp = hwrm_req_hold(bp, req);
4462	rc = hwrm_req_send(bp, req);
4463	*test_results = resp->test_success;
4464	hwrm_req_drop(bp, req);
4465	return rc;
4466}
4467
4468#define BNXT_DRV_TESTS			4
4469#define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
4470#define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
4471#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
4472#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
4473
4474static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4475			   u64 *buf)
4476{
4477	struct bnxt *bp = netdev_priv(dev);
4478	bool do_ext_lpbk = false;
4479	bool offline = false;
4480	u8 test_results = 0;
4481	u8 test_mask = 0;
4482	int rc = 0, i;
4483
4484	if (!bp->num_tests || !BNXT_PF(bp))
4485		return;
4486	memset(buf, 0, sizeof(u64) * bp->num_tests);
4487	if (!netif_running(dev)) {
4488		etest->flags |= ETH_TEST_FL_FAILED;
4489		return;
4490	}
4491
4492	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4493	    (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4494		do_ext_lpbk = true;
4495
4496	if (etest->flags & ETH_TEST_FL_OFFLINE) {
4497		if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4498			etest->flags |= ETH_TEST_FL_FAILED;
4499			netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4500			return;
4501		}
4502		offline = true;
4503	}
4504
4505	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4506		u8 bit_val = 1 << i;
4507
4508		if (!(bp->test_info->offline_mask & bit_val))
4509			test_mask |= bit_val;
4510		else if (offline)
4511			test_mask |= bit_val;
4512	}
4513	if (!offline) {
4514		bnxt_run_fw_tests(bp, test_mask, &test_results);
4515	} else {
4516		bnxt_ulp_stop(bp);
4517		bnxt_close_nic(bp, true, false);
 
4518		bnxt_run_fw_tests(bp, test_mask, &test_results);
4519
4520		buf[BNXT_MACLPBK_TEST_IDX] = 1;
4521		bnxt_hwrm_mac_loopback(bp, true);
4522		msleep(250);
4523		rc = bnxt_half_open_nic(bp);
4524		if (rc) {
4525			bnxt_hwrm_mac_loopback(bp, false);
4526			etest->flags |= ETH_TEST_FL_FAILED;
4527			bnxt_ulp_start(bp, rc);
4528			return;
4529		}
4530		if (bnxt_run_loopback(bp))
4531			etest->flags |= ETH_TEST_FL_FAILED;
4532		else
4533			buf[BNXT_MACLPBK_TEST_IDX] = 0;
4534
4535		bnxt_hwrm_mac_loopback(bp, false);
4536		bnxt_hwrm_phy_loopback(bp, true, false);
4537		msleep(1000);
4538		if (bnxt_run_loopback(bp)) {
4539			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4540			etest->flags |= ETH_TEST_FL_FAILED;
4541		}
4542		if (do_ext_lpbk) {
4543			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4544			bnxt_hwrm_phy_loopback(bp, true, true);
4545			msleep(1000);
4546			if (bnxt_run_loopback(bp)) {
4547				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4548				etest->flags |= ETH_TEST_FL_FAILED;
4549			}
4550		}
4551		bnxt_hwrm_phy_loopback(bp, false, false);
4552		bnxt_half_close_nic(bp);
4553		rc = bnxt_open_nic(bp, true, true);
4554		bnxt_ulp_start(bp, rc);
4555	}
4556	if (rc || bnxt_test_irq(bp)) {
4557		buf[BNXT_IRQ_TEST_IDX] = 1;
4558		etest->flags |= ETH_TEST_FL_FAILED;
4559	}
4560	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4561		u8 bit_val = 1 << i;
4562
4563		if ((test_mask & bit_val) && !(test_results & bit_val)) {
4564			buf[i] = 1;
4565			etest->flags |= ETH_TEST_FL_FAILED;
4566		}
4567	}
4568}
4569
4570static int bnxt_reset(struct net_device *dev, u32 *flags)
4571{
4572	struct bnxt *bp = netdev_priv(dev);
4573	bool reload = false;
4574	u32 req = *flags;
4575
4576	if (!req)
4577		return -EINVAL;
4578
4579	if (!BNXT_PF(bp)) {
4580		netdev_err(dev, "Reset is not supported from a VF\n");
4581		return -EOPNOTSUPP;
4582	}
4583
4584	if (pci_vfs_assigned(bp->pdev) &&
4585	    !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4586		netdev_err(dev,
4587			   "Reset not allowed when VFs are assigned to VMs\n");
4588		return -EBUSY;
4589	}
4590
4591	if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4592		/* This feature is not supported in older firmware versions */
4593		if (bp->hwrm_spec_code >= 0x10803) {
4594			if (!bnxt_firmware_reset_chip(dev)) {
4595				netdev_info(dev, "Firmware reset request successful.\n");
4596				if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4597					reload = true;
4598				*flags &= ~BNXT_FW_RESET_CHIP;
4599			}
4600		} else if (req == BNXT_FW_RESET_CHIP) {
4601			return -EOPNOTSUPP; /* only request, fail hard */
4602		}
 
 
 
 
 
 
 
 
 
 
 
 
4603	}
4604
4605	if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4606		/* This feature is not supported in older firmware versions */
4607		if (bp->hwrm_spec_code >= 0x10803) {
4608			if (!bnxt_firmware_reset_ap(dev)) {
4609				netdev_info(dev, "Reset application processor successful.\n");
4610				reload = true;
4611				*flags &= ~BNXT_FW_RESET_AP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4612			}
4613		} else if (req == BNXT_FW_RESET_AP) {
4614			return -EOPNOTSUPP; /* only request, fail hard */
4615		}
 
 
 
 
 
 
 
 
 
 
 
 
 
4616	}
 
 
 
 
4617
4618	if (reload)
4619		netdev_info(dev, "Reload driver to complete reset\n");
 
 
 
 
4620
4621	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
4622}
4623
4624static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
 
4625{
4626	struct bnxt *bp = netdev_priv(dev);
4627
4628	if (dump->flag > BNXT_DUMP_CRASH) {
4629		netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4630		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4631	}
 
 
 
 
 
 
 
4632
4633	if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
4634		netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4635		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4636	}
4637
4638	bp->dump_flag = dump->flag;
4639	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4640}
4641
4642static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
4643{
4644	struct bnxt *bp = netdev_priv(dev);
4645
4646	if (bp->hwrm_spec_code < 0x10801)
4647		return -EOPNOTSUPP;
4648
4649	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
4650			bp->ver_resp.hwrm_fw_min_8b << 16 |
4651			bp->ver_resp.hwrm_fw_bld_8b << 8 |
4652			bp->ver_resp.hwrm_fw_rsvd_8b;
4653
4654	dump->flag = bp->dump_flag;
4655	dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
4656	return 0;
4657}
4658
4659static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
4660			      void *buf)
4661{
4662	struct bnxt *bp = netdev_priv(dev);
4663
4664	if (bp->hwrm_spec_code < 0x10801)
4665		return -EOPNOTSUPP;
4666
4667	memset(buf, 0, dump->len);
4668
4669	dump->flag = bp->dump_flag;
4670	return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
4671}
4672
4673static int bnxt_get_ts_info(struct net_device *dev,
4674			    struct ethtool_ts_info *info)
4675{
4676	struct bnxt *bp = netdev_priv(dev);
4677	struct bnxt_ptp_cfg *ptp;
4678
4679	ptp = bp->ptp_cfg;
4680	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4681				SOF_TIMESTAMPING_RX_SOFTWARE |
4682				SOF_TIMESTAMPING_SOFTWARE;
4683
4684	info->phc_index = -1;
4685	if (!ptp)
4686		return 0;
4687
4688	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
4689				 SOF_TIMESTAMPING_RX_HARDWARE |
4690				 SOF_TIMESTAMPING_RAW_HARDWARE;
4691	if (ptp->ptp_clock)
4692		info->phc_index = ptp_clock_index(ptp->ptp_clock);
4693
4694	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4695
4696	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
4697			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
4698			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4699
4700	if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
4701		info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
4702	return 0;
4703}
4704
4705void bnxt_ethtool_init(struct bnxt *bp)
4706{
4707	struct hwrm_selftest_qlist_output *resp;
4708	struct hwrm_selftest_qlist_input *req;
4709	struct bnxt_test_info *test_info;
4710	struct net_device *dev = bp->dev;
4711	int i, rc;
4712
4713	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
4714		bnxt_get_pkgver(dev);
4715
4716	bp->num_tests = 0;
4717	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
4718		return;
4719
 
 
 
 
 
 
4720	test_info = bp->test_info;
4721	if (!test_info) {
4722		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
4723		if (!test_info)
4724			return;
4725		bp->test_info = test_info;
4726	}
4727
4728	if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
4729		return;
4730
4731	resp = hwrm_req_hold(bp, req);
4732	rc = hwrm_req_send_silent(bp, req);
4733	if (rc)
4734		goto ethtool_init_exit;
4735
 
4736	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
4737	if (bp->num_tests > BNXT_MAX_TEST)
4738		bp->num_tests = BNXT_MAX_TEST;
4739
4740	test_info->offline_mask = resp->offline_tests;
4741	test_info->timeout = le16_to_cpu(resp->test_timeout);
4742	if (!test_info->timeout)
4743		test_info->timeout = HWRM_CMD_TIMEOUT;
4744	for (i = 0; i < bp->num_tests; i++) {
4745		char *str = test_info->string[i];
4746		char *fw_str = resp->test_name[i];
4747
4748		if (i == BNXT_MACLPBK_TEST_IDX) {
4749			strcpy(str, "Mac loopback test (offline)");
4750		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
4751			strcpy(str, "Phy loopback test (offline)");
4752		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
4753			strcpy(str, "Ext loopback test (offline)");
4754		} else if (i == BNXT_IRQ_TEST_IDX) {
4755			strcpy(str, "Interrupt_test (offline)");
4756		} else {
4757			snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
4758				 fw_str, test_info->offline_mask & (1 << i) ?
4759					"offline" : "online");
 
 
 
 
 
4760		}
4761	}
4762
4763ethtool_init_exit:
4764	hwrm_req_drop(bp, req);
4765}
4766
4767static void bnxt_get_eth_phy_stats(struct net_device *dev,
4768				   struct ethtool_eth_phy_stats *phy_stats)
4769{
4770	struct bnxt *bp = netdev_priv(dev);
4771	u64 *rx;
4772
4773	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4774		return;
4775
4776	rx = bp->rx_port_stats_ext.sw_stats;
4777	phy_stats->SymbolErrorDuringCarrier =
4778		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4779}
4780
4781static void bnxt_get_eth_mac_stats(struct net_device *dev,
4782				   struct ethtool_eth_mac_stats *mac_stats)
4783{
4784	struct bnxt *bp = netdev_priv(dev);
4785	u64 *rx, *tx;
4786
4787	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4788		return;
4789
4790	rx = bp->port_stats.sw_stats;
4791	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4792
4793	mac_stats->FramesReceivedOK =
4794		BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4795	mac_stats->FramesTransmittedOK =
4796		BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4797	mac_stats->FrameCheckSequenceErrors =
4798		BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4799	mac_stats->AlignmentErrors =
4800		BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4801	mac_stats->OutOfRangeLengthField =
4802		BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4803}
4804
4805static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4806				    struct ethtool_eth_ctrl_stats *ctrl_stats)
4807{
4808	struct bnxt *bp = netdev_priv(dev);
4809	u64 *rx;
4810
4811	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4812		return;
4813
4814	rx = bp->port_stats.sw_stats;
4815	ctrl_stats->MACControlFramesReceived =
4816		BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4817}
4818
4819static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4820	{    0,    64 },
4821	{   65,   127 },
4822	{  128,   255 },
4823	{  256,   511 },
4824	{  512,  1023 },
4825	{ 1024,  1518 },
4826	{ 1519,  2047 },
4827	{ 2048,  4095 },
4828	{ 4096,  9216 },
4829	{ 9217, 16383 },
4830	{}
4831};
4832
4833static void bnxt_get_rmon_stats(struct net_device *dev,
4834				struct ethtool_rmon_stats *rmon_stats,
4835				const struct ethtool_rmon_hist_range **ranges)
4836{
4837	struct bnxt *bp = netdev_priv(dev);
4838	u64 *rx, *tx;
4839
4840	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4841		return;
4842
4843	rx = bp->port_stats.sw_stats;
4844	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4845
4846	rmon_stats->jabbers =
4847		BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4848	rmon_stats->oversize_pkts =
4849		BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4850	rmon_stats->undersize_pkts =
4851		BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4852
4853	rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4854	rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4855	rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4856	rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4857	rmon_stats->hist[4] =
4858		BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4859	rmon_stats->hist[5] =
4860		BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4861	rmon_stats->hist[6] =
4862		BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4863	rmon_stats->hist[7] =
4864		BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4865	rmon_stats->hist[8] =
4866		BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4867	rmon_stats->hist[9] =
4868		BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4869
4870	rmon_stats->hist_tx[0] =
4871		BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4872	rmon_stats->hist_tx[1] =
4873		BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4874	rmon_stats->hist_tx[2] =
4875		BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4876	rmon_stats->hist_tx[3] =
4877		BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4878	rmon_stats->hist_tx[4] =
4879		BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4880	rmon_stats->hist_tx[5] =
4881		BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4882	rmon_stats->hist_tx[6] =
4883		BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4884	rmon_stats->hist_tx[7] =
4885		BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4886	rmon_stats->hist_tx[8] =
4887		BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4888	rmon_stats->hist_tx[9] =
4889		BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4890
4891	*ranges = bnxt_rmon_ranges;
4892}
4893
4894static void bnxt_get_link_ext_stats(struct net_device *dev,
4895				    struct ethtool_link_ext_stats *stats)
4896{
4897	struct bnxt *bp = netdev_priv(dev);
4898	u64 *rx;
4899
4900	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4901		return;
4902
4903	rx = bp->rx_port_stats_ext.sw_stats;
4904	stats->link_down_events =
4905		*(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4906}
4907
4908void bnxt_ethtool_free(struct bnxt *bp)
4909{
4910	kfree(bp->test_info);
4911	bp->test_info = NULL;
4912}
4913
4914const struct ethtool_ops bnxt_ethtool_ops = {
4915	.cap_link_lanes_supported	= 1,
4916	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4917				     ETHTOOL_COALESCE_MAX_FRAMES |
4918				     ETHTOOL_COALESCE_USECS_IRQ |
4919				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4920				     ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4921				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4922				     ETHTOOL_COALESCE_USE_CQE,
4923	.get_link_ksettings	= bnxt_get_link_ksettings,
4924	.set_link_ksettings	= bnxt_set_link_ksettings,
4925	.get_fec_stats		= bnxt_get_fec_stats,
4926	.get_fecparam		= bnxt_get_fecparam,
4927	.set_fecparam		= bnxt_set_fecparam,
4928	.get_pause_stats	= bnxt_get_pause_stats,
4929	.get_pauseparam		= bnxt_get_pauseparam,
4930	.set_pauseparam		= bnxt_set_pauseparam,
4931	.get_drvinfo		= bnxt_get_drvinfo,
4932	.get_regs_len		= bnxt_get_regs_len,
4933	.get_regs		= bnxt_get_regs,
4934	.get_wol		= bnxt_get_wol,
4935	.set_wol		= bnxt_set_wol,
4936	.get_coalesce		= bnxt_get_coalesce,
4937	.set_coalesce		= bnxt_set_coalesce,
4938	.get_msglevel		= bnxt_get_msglevel,
4939	.set_msglevel		= bnxt_set_msglevel,
4940	.get_sset_count		= bnxt_get_sset_count,
4941	.get_strings		= bnxt_get_strings,
4942	.get_ethtool_stats	= bnxt_get_ethtool_stats,
4943	.set_ringparam		= bnxt_set_ringparam,
4944	.get_ringparam		= bnxt_get_ringparam,
4945	.get_channels		= bnxt_get_channels,
4946	.set_channels		= bnxt_set_channels,
4947	.get_rxnfc		= bnxt_get_rxnfc,
4948	.set_rxnfc		= bnxt_set_rxnfc,
4949	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4950	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4951	.get_rxfh               = bnxt_get_rxfh,
4952	.set_rxfh		= bnxt_set_rxfh,
4953	.flash_device		= bnxt_flash_device,
4954	.get_eeprom_len         = bnxt_get_eeprom_len,
4955	.get_eeprom             = bnxt_get_eeprom,
4956	.set_eeprom		= bnxt_set_eeprom,
4957	.get_link		= bnxt_get_link,
4958	.get_link_ext_stats	= bnxt_get_link_ext_stats,
4959	.get_eee		= bnxt_get_eee,
4960	.set_eee		= bnxt_set_eee,
4961	.get_module_info	= bnxt_get_module_info,
4962	.get_module_eeprom	= bnxt_get_module_eeprom,
4963	.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4964	.nway_reset		= bnxt_nway_reset,
4965	.set_phys_id		= bnxt_set_phys_id,
4966	.self_test		= bnxt_self_test,
4967	.get_ts_info		= bnxt_get_ts_info,
4968	.reset			= bnxt_reset,
4969	.set_dump		= bnxt_set_dump,
4970	.get_dump_flag		= bnxt_get_dump_flag,
4971	.get_dump_data		= bnxt_get_dump_data,
4972	.get_eth_phy_stats	= bnxt_get_eth_phy_stats,
4973	.get_eth_mac_stats	= bnxt_get_eth_mac_stats,
4974	.get_eth_ctrl_stats	= bnxt_get_eth_ctrl_stats,
4975	.get_rmon_stats		= bnxt_get_rmon_stats,
4976};
v5.4
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2017 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
 
  11#include <linux/ctype.h>
  12#include <linux/stringify.h>
  13#include <linux/ethtool.h>
 
 
  14#include <linux/interrupt.h>
  15#include <linux/pci.h>
  16#include <linux/etherdevice.h>
  17#include <linux/crc32.h>
  18#include <linux/firmware.h>
  19#include <linux/utsname.h>
  20#include <linux/time.h>
 
 
 
 
  21#include "bnxt_hsi.h"
  22#include "bnxt.h"
 
 
  23#include "bnxt_xdp.h"
 
  24#include "bnxt_ethtool.h"
  25#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
  26#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
  27#include "bnxt_coredump.h"
  28#define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)
  29#define FLASH_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200)
  30#define INSTALL_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200)
 
 
 
 
  31
  32static u32 bnxt_get_msglevel(struct net_device *dev)
  33{
  34	struct bnxt *bp = netdev_priv(dev);
  35
  36	return bp->msg_enable;
  37}
  38
  39static void bnxt_set_msglevel(struct net_device *dev, u32 value)
  40{
  41	struct bnxt *bp = netdev_priv(dev);
  42
  43	bp->msg_enable = value;
  44}
  45
  46static int bnxt_get_coalesce(struct net_device *dev,
  47			     struct ethtool_coalesce *coal)
 
 
  48{
  49	struct bnxt *bp = netdev_priv(dev);
  50	struct bnxt_coal *hw_coal;
  51	u16 mult;
  52
  53	memset(coal, 0, sizeof(*coal));
  54
  55	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
  56
  57	hw_coal = &bp->rx_coal;
  58	mult = hw_coal->bufs_per_record;
  59	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
  60	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  61	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  62	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
 
 
 
  63
  64	hw_coal = &bp->tx_coal;
  65	mult = hw_coal->bufs_per_record;
  66	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
  67	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  68	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  69	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
 
 
 
  70
  71	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
  72
  73	return 0;
  74}
  75
  76static int bnxt_set_coalesce(struct net_device *dev,
  77			     struct ethtool_coalesce *coal)
 
 
  78{
  79	struct bnxt *bp = netdev_priv(dev);
  80	bool update_stats = false;
  81	struct bnxt_coal *hw_coal;
  82	int rc = 0;
  83	u16 mult;
  84
  85	if (coal->use_adaptive_rx_coalesce) {
  86		bp->flags |= BNXT_FLAG_DIM;
  87	} else {
  88		if (bp->flags & BNXT_FLAG_DIM) {
  89			bp->flags &= ~(BNXT_FLAG_DIM);
  90			goto reset_coalesce;
  91		}
  92	}
  93
 
 
 
 
 
  94	hw_coal = &bp->rx_coal;
  95	mult = hw_coal->bufs_per_record;
  96	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
  97	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
  98	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
  99	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
 
 
 
 
 
 100
 101	hw_coal = &bp->tx_coal;
 102	mult = hw_coal->bufs_per_record;
 103	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
 104	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
 105	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
 106	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
 
 
 
 
 
 107
 108	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
 109		u32 stats_ticks = coal->stats_block_coalesce_usecs;
 110
 111		/* Allow 0, which means disable. */
 112		if (stats_ticks)
 113			stats_ticks = clamp_t(u32, stats_ticks,
 114					      BNXT_MIN_STATS_COAL_TICKS,
 115					      BNXT_MAX_STATS_COAL_TICKS);
 116		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
 117		bp->stats_coal_ticks = stats_ticks;
 118		if (bp->stats_coal_ticks)
 119			bp->current_interval =
 120				bp->stats_coal_ticks * HZ / 1000000;
 121		else
 122			bp->current_interval = BNXT_TIMER_INTERVAL;
 123		update_stats = true;
 124	}
 125
 126reset_coalesce:
 127	if (netif_running(dev)) {
 128		if (update_stats) {
 129			rc = bnxt_close_nic(bp, true, false);
 130			if (!rc)
 131				rc = bnxt_open_nic(bp, true, false);
 132		} else {
 133			rc = bnxt_hwrm_set_coal(bp);
 134		}
 135	}
 136
 137	return rc;
 138}
 139
 140static const char * const bnxt_ring_stats_str[] = {
 141	"rx_ucast_packets",
 142	"rx_mcast_packets",
 143	"rx_bcast_packets",
 144	"rx_discards",
 145	"rx_drops",
 146	"rx_ucast_bytes",
 147	"rx_mcast_bytes",
 148	"rx_bcast_bytes",
 
 
 
 149	"tx_ucast_packets",
 150	"tx_mcast_packets",
 151	"tx_bcast_packets",
 
 152	"tx_discards",
 153	"tx_drops",
 154	"tx_ucast_bytes",
 155	"tx_mcast_bytes",
 156	"tx_bcast_bytes",
 157};
 158
 159static const char * const bnxt_ring_tpa_stats_str[] = {
 160	"tpa_packets",
 161	"tpa_bytes",
 162	"tpa_events",
 163	"tpa_aborts",
 164};
 165
 166static const char * const bnxt_ring_tpa2_stats_str[] = {
 167	"rx_tpa_eligible_pkt",
 168	"rx_tpa_eligible_bytes",
 169	"rx_tpa_pkt",
 170	"rx_tpa_bytes",
 171	"rx_tpa_errors",
 
 172};
 173
 174static const char * const bnxt_ring_sw_stats_str[] = {
 175	"rx_l4_csum_errors",
 
 
 
 
 
 176	"missed_irqs",
 177};
 178
 179#define BNXT_RX_STATS_ENTRY(counter)	\
 180	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
 181
 182#define BNXT_TX_STATS_ENTRY(counter)	\
 183	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
 184
 185#define BNXT_RX_STATS_EXT_ENTRY(counter)	\
 186	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 187
 188#define BNXT_TX_STATS_EXT_ENTRY(counter)	\
 189	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 190
 191#define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
 192	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
 193	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
 194
 195#define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
 196	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
 197	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
 198
 199#define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
 200	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
 201	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
 202	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
 203	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
 204	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
 205	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
 206	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
 207	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
 208
 209#define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
 210	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
 211	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
 212	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
 213	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
 214	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
 215	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
 216	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
 217	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
 218
 219#define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
 220	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
 221	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
 222
 223#define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
 224	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
 225	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
 226
 227#define BNXT_RX_STATS_EXT_COS_ENTRIES				\
 228	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
 229	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
 230	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
 231	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
 232	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
 233	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
 234	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
 235	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
 236
 237#define BNXT_TX_STATS_EXT_COS_ENTRIES				\
 238	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
 239	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
 240	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
 241	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
 242	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
 243	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
 244	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
 245	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
 246
 247#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
 248	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
 249	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
 250
 251#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
 252	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
 253	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
 254	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
 255	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
 256	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
 257	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
 258	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
 259	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
 260
 261#define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
 262	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
 263	  __stringify(counter##_pri##n) }
 264
 265#define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
 266	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
 267	  __stringify(counter##_pri##n) }
 268
 269#define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
 270	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
 271	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
 272	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
 273	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
 274	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
 275	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
 276	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
 277	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
 278
 279#define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
 280	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
 281	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
 282	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
 283	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
 284	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
 285	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
 286	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
 287	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
 288
 289#define BNXT_PCIE_STATS_ENTRY(counter)	\
 290	{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
 291
 292enum {
 293	RX_TOTAL_DISCARDS,
 294	TX_TOTAL_DISCARDS,
 
 295};
 296
 297static struct {
 298	u64			counter;
 299	char			string[ETH_GSTRING_LEN];
 300} bnxt_sw_func_stats[] = {
 301	{0, "rx_total_discard_pkts"},
 302	{0, "tx_total_discard_pkts"},
 
 
 
 
 303};
 304
 
 
 
 
 
 305static const struct {
 306	long offset;
 307	char string[ETH_GSTRING_LEN];
 308} bnxt_port_stats_arr[] = {
 309	BNXT_RX_STATS_ENTRY(rx_64b_frames),
 310	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
 311	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
 312	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
 313	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
 314	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
 315	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
 316	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
 317	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
 318	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
 319	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
 320	BNXT_RX_STATS_ENTRY(rx_total_frames),
 321	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
 322	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
 323	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
 324	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
 325	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
 326	BNXT_RX_STATS_ENTRY(rx_pause_frames),
 327	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
 328	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
 329	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
 330	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
 331	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
 332	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
 333	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
 334	BNXT_RX_STATS_ENTRY(rx_good_frames),
 335	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
 336	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
 337	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
 338	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
 339	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
 340	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
 341	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
 342	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
 343	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
 344	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
 345	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
 346	BNXT_RX_STATS_ENTRY(rx_bytes),
 347	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
 348	BNXT_RX_STATS_ENTRY(rx_runt_frames),
 349	BNXT_RX_STATS_ENTRY(rx_stat_discard),
 350	BNXT_RX_STATS_ENTRY(rx_stat_err),
 351
 352	BNXT_TX_STATS_ENTRY(tx_64b_frames),
 353	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
 354	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
 355	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
 356	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
 357	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
 358	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
 359	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
 360	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
 361	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
 362	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
 363	BNXT_TX_STATS_ENTRY(tx_good_frames),
 364	BNXT_TX_STATS_ENTRY(tx_total_frames),
 365	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
 366	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
 367	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
 368	BNXT_TX_STATS_ENTRY(tx_pause_frames),
 369	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
 370	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
 371	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
 372	BNXT_TX_STATS_ENTRY(tx_err),
 373	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
 374	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
 375	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
 376	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
 377	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
 378	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
 379	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
 380	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
 381	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
 382	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
 383	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
 384	BNXT_TX_STATS_ENTRY(tx_total_collisions),
 385	BNXT_TX_STATS_ENTRY(tx_bytes),
 386	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
 387	BNXT_TX_STATS_ENTRY(tx_stat_discard),
 388	BNXT_TX_STATS_ENTRY(tx_stat_error),
 389};
 390
 391static const struct {
 392	long offset;
 393	char string[ETH_GSTRING_LEN];
 394} bnxt_port_stats_ext_arr[] = {
 395	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
 396	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
 397	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
 398	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
 399	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
 400	BNXT_RX_STATS_EXT_COS_ENTRIES,
 401	BNXT_RX_STATS_EXT_PFC_ENTRIES,
 402	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
 403	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
 404	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
 405	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
 406	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
 
 
 
 407};
 408
 409static const struct {
 410	long offset;
 411	char string[ETH_GSTRING_LEN];
 412} bnxt_tx_port_stats_ext_arr[] = {
 413	BNXT_TX_STATS_EXT_COS_ENTRIES,
 414	BNXT_TX_STATS_EXT_PFC_ENTRIES,
 415};
 416
 417static const struct {
 418	long base_off;
 419	char string[ETH_GSTRING_LEN];
 420} bnxt_rx_bytes_pri_arr[] = {
 421	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
 422};
 423
 424static const struct {
 425	long base_off;
 426	char string[ETH_GSTRING_LEN];
 427} bnxt_rx_pkts_pri_arr[] = {
 428	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
 429};
 430
 431static const struct {
 432	long base_off;
 433	char string[ETH_GSTRING_LEN];
 434} bnxt_tx_bytes_pri_arr[] = {
 435	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
 436};
 437
 438static const struct {
 439	long base_off;
 440	char string[ETH_GSTRING_LEN];
 441} bnxt_tx_pkts_pri_arr[] = {
 442	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
 443};
 444
 445static const struct {
 446	long offset;
 447	char string[ETH_GSTRING_LEN];
 448} bnxt_pcie_stats_arr[] = {
 449	BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
 450	BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
 451	BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
 452	BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
 453	BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
 454	BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
 455	BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
 456	BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
 457	BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
 458	BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
 459	BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
 460	BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
 461};
 462
 463#define BNXT_NUM_SW_FUNC_STATS	ARRAY_SIZE(bnxt_sw_func_stats)
 464#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
 465#define BNXT_NUM_STATS_PRI			\
 466	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
 467	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
 468	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
 469	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
 470#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
 471
 472static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
 473{
 474	if (BNXT_SUPPORTS_TPA(bp)) {
 475		if (bp->max_tpa_v2)
 476			return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
 477		return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
 
 
 
 478	}
 479	return 0;
 480}
 481
 482static int bnxt_get_num_ring_stats(struct bnxt *bp)
 483{
 484	int num_stats;
 485
 486	num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
 487		    ARRAY_SIZE(bnxt_ring_sw_stats_str) +
 488		    bnxt_get_num_tpa_ring_stats(bp);
 489	return num_stats * bp->cp_nr_rings;
 
 
 
 490}
 491
 492static int bnxt_get_num_stats(struct bnxt *bp)
 493{
 494	int num_stats = bnxt_get_num_ring_stats(bp);
 
 495
 496	num_stats += BNXT_NUM_SW_FUNC_STATS;
 497
 498	if (bp->flags & BNXT_FLAG_PORT_STATS)
 499		num_stats += BNXT_NUM_PORT_STATS;
 500
 501	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 502		num_stats += bp->fw_rx_stats_ext_size +
 503			     bp->fw_tx_stats_ext_size;
 
 
 
 
 504		if (bp->pri2cos_valid)
 505			num_stats += BNXT_NUM_STATS_PRI;
 506	}
 507
 508	if (bp->flags & BNXT_FLAG_PCIE_STATS)
 509		num_stats += BNXT_NUM_PCIE_STATS;
 510
 511	return num_stats;
 512}
 513
 514static int bnxt_get_sset_count(struct net_device *dev, int sset)
 515{
 516	struct bnxt *bp = netdev_priv(dev);
 517
 518	switch (sset) {
 519	case ETH_SS_STATS:
 520		return bnxt_get_num_stats(bp);
 521	case ETH_SS_TEST:
 522		if (!bp->num_tests)
 523			return -EOPNOTSUPP;
 524		return bp->num_tests;
 525	default:
 526		return -EOPNOTSUPP;
 527	}
 528}
 529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530static void bnxt_get_ethtool_stats(struct net_device *dev,
 531				   struct ethtool_stats *stats, u64 *buf)
 532{
 
 
 
 
 533	u32 i, j = 0;
 534	struct bnxt *bp = netdev_priv(dev);
 535	u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
 536			  bnxt_get_num_tpa_ring_stats(bp);
 537
 538	if (!bp->bnapi) {
 539		j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
 540		goto skip_ring_stats;
 541	}
 542
 543	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
 544		bnxt_sw_func_stats[i].counter = 0;
 545
 546	for (i = 0; i < bp->cp_nr_rings; i++) {
 547		struct bnxt_napi *bnapi = bp->bnapi[i];
 548		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 549		__le64 *hw_stats = (__le64 *)cpr->hw_stats;
 
 550		int k;
 551
 552		for (k = 0; k < stat_fields; j++, k++)
 553			buf[j] = le64_to_cpu(hw_stats[k]);
 554		buf[j++] = cpr->rx_l4_csum_errors;
 555		buf[j++] = cpr->missed_irqs;
 556
 557		bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
 558			le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
 559		bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
 560			le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561	}
 562
 563	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
 564		buf[j] = bnxt_sw_func_stats[i].counter;
 565
 566skip_ring_stats:
 
 
 
 
 
 567	if (bp->flags & BNXT_FLAG_PORT_STATS) {
 568		__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
 569
 570		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
 571			buf[j] = le64_to_cpu(*(port_stats +
 572					       bnxt_port_stats_arr[i].offset));
 573		}
 574	}
 575	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 576		__le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
 577		__le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
 578
 579		for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
 580			buf[j] = le64_to_cpu(*(rx_port_stats_ext +
 581					    bnxt_port_stats_ext_arr[i].offset));
 582		}
 583		for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
 584			buf[j] = le64_to_cpu(*(tx_port_stats_ext +
 585					bnxt_tx_port_stats_ext_arr[i].offset));
 
 
 
 
 
 586		}
 587		if (bp->pri2cos_valid) {
 588			for (i = 0; i < 8; i++, j++) {
 589				long n = bnxt_rx_bytes_pri_arr[i].base_off +
 590					 bp->pri2cos[i];
 591
 592				buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
 593			}
 594			for (i = 0; i < 8; i++, j++) {
 595				long n = bnxt_rx_pkts_pri_arr[i].base_off +
 596					 bp->pri2cos[i];
 597
 598				buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
 599			}
 600			for (i = 0; i < 8; i++, j++) {
 601				long n = bnxt_tx_bytes_pri_arr[i].base_off +
 602					 bp->pri2cos[i];
 603
 604				buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
 605			}
 606			for (i = 0; i < 8; i++, j++) {
 607				long n = bnxt_tx_pkts_pri_arr[i].base_off +
 608					 bp->pri2cos[i];
 609
 610				buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
 611			}
 612		}
 613	}
 614	if (bp->flags & BNXT_FLAG_PCIE_STATS) {
 615		__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
 616
 617		for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
 618			buf[j] = le64_to_cpu(*(pcie_stats +
 619					       bnxt_pcie_stats_arr[i].offset));
 620		}
 621	}
 622}
 623
 624static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 625{
 626	struct bnxt *bp = netdev_priv(dev);
 627	static const char * const *str;
 628	u32 i, j, num_str;
 629
 630	switch (stringset) {
 631	case ETH_SS_STATS:
 632		for (i = 0; i < bp->cp_nr_rings; i++) {
 633			num_str = ARRAY_SIZE(bnxt_ring_stats_str);
 634			for (j = 0; j < num_str; j++) {
 635				sprintf(buf, "[%d]: %s", i,
 636					bnxt_ring_stats_str[j]);
 637				buf += ETH_GSTRING_LEN;
 
 
 
 
 
 
 
 
 
 
 638			}
 639			if (!BNXT_SUPPORTS_TPA(bp))
 
 640				goto skip_tpa_stats;
 641
 642			if (bp->max_tpa_v2) {
 643				num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
 644				str = bnxt_ring_tpa2_stats_str;
 645			} else {
 646				num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
 647				str = bnxt_ring_tpa_stats_str;
 648			}
 649			for (j = 0; j < num_str; j++) {
 650				sprintf(buf, "[%d]: %s", i, str[j]);
 651				buf += ETH_GSTRING_LEN;
 652			}
 653skip_tpa_stats:
 654			num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
 
 
 
 
 
 
 
 
 655			for (j = 0; j < num_str; j++) {
 656				sprintf(buf, "[%d]: %s", i,
 657					bnxt_ring_sw_stats_str[j]);
 658				buf += ETH_GSTRING_LEN;
 659			}
 660		}
 661		for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
 662			strcpy(buf, bnxt_sw_func_stats[i].string);
 663			buf += ETH_GSTRING_LEN;
 664		}
 665
 666		if (bp->flags & BNXT_FLAG_PORT_STATS) {
 667			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
 668				strcpy(buf, bnxt_port_stats_arr[i].string);
 669				buf += ETH_GSTRING_LEN;
 670			}
 671		}
 672		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 673			for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
 
 
 
 
 674				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
 675				buf += ETH_GSTRING_LEN;
 676			}
 677			for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
 
 
 678				strcpy(buf,
 679				       bnxt_tx_port_stats_ext_arr[i].string);
 680				buf += ETH_GSTRING_LEN;
 681			}
 682			if (bp->pri2cos_valid) {
 683				for (i = 0; i < 8; i++) {
 684					strcpy(buf,
 685					       bnxt_rx_bytes_pri_arr[i].string);
 686					buf += ETH_GSTRING_LEN;
 687				}
 688				for (i = 0; i < 8; i++) {
 689					strcpy(buf,
 690					       bnxt_rx_pkts_pri_arr[i].string);
 691					buf += ETH_GSTRING_LEN;
 692				}
 693				for (i = 0; i < 8; i++) {
 694					strcpy(buf,
 695					       bnxt_tx_bytes_pri_arr[i].string);
 696					buf += ETH_GSTRING_LEN;
 697				}
 698				for (i = 0; i < 8; i++) {
 699					strcpy(buf,
 700					       bnxt_tx_pkts_pri_arr[i].string);
 701					buf += ETH_GSTRING_LEN;
 702				}
 703			}
 704		}
 705		if (bp->flags & BNXT_FLAG_PCIE_STATS) {
 706			for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
 707				strcpy(buf, bnxt_pcie_stats_arr[i].string);
 708				buf += ETH_GSTRING_LEN;
 709			}
 710		}
 711		break;
 712	case ETH_SS_TEST:
 713		if (bp->num_tests)
 714			memcpy(buf, bp->test_info->string,
 715			       bp->num_tests * ETH_GSTRING_LEN);
 716		break;
 717	default:
 718		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
 719			   stringset);
 720		break;
 721	}
 722}
 723
 724static void bnxt_get_ringparam(struct net_device *dev,
 725			       struct ethtool_ringparam *ering)
 
 
 726{
 727	struct bnxt *bp = netdev_priv(dev);
 728
 729	ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
 730	ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
 
 
 
 
 
 
 
 731	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
 732
 733	ering->rx_pending = bp->rx_ring_size;
 734	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
 735	ering->tx_pending = bp->tx_ring_size;
 736}
 737
 738static int bnxt_set_ringparam(struct net_device *dev,
 739			      struct ethtool_ringparam *ering)
 
 
 740{
 741	struct bnxt *bp = netdev_priv(dev);
 742
 743	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
 744	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
 745	    (ering->tx_pending <= MAX_SKB_FRAGS))
 746		return -EINVAL;
 747
 748	if (netif_running(dev))
 749		bnxt_close_nic(bp, false, false);
 750
 751	bp->rx_ring_size = ering->rx_pending;
 752	bp->tx_ring_size = ering->tx_pending;
 753	bnxt_set_ring_params(bp);
 754
 755	if (netif_running(dev))
 756		return bnxt_open_nic(bp, false, false);
 757
 758	return 0;
 759}
 760
 761static void bnxt_get_channels(struct net_device *dev,
 762			      struct ethtool_channels *channel)
 763{
 764	struct bnxt *bp = netdev_priv(dev);
 765	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 766	int max_rx_rings, max_tx_rings, tcs;
 767	int max_tx_sch_inputs;
 768
 769	/* Get the most up-to-date max_tx_sch_inputs. */
 770	if (BNXT_NEW_RM(bp))
 771		bnxt_hwrm_func_resc_qcaps(bp, false);
 772	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 773
 774	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 775	if (max_tx_sch_inputs)
 776		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 
 
 
 
 
 
 777	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 778
 779	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
 780		max_rx_rings = 0;
 781		max_tx_rings = 0;
 782	}
 783	if (max_tx_sch_inputs)
 784		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 785
 786	tcs = netdev_get_num_tc(dev);
 787	if (tcs > 1)
 788		max_tx_rings /= tcs;
 789
 790	channel->max_rx = max_rx_rings;
 791	channel->max_tx = max_tx_rings;
 792	channel->max_other = 0;
 793	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
 794		channel->combined_count = bp->rx_nr_rings;
 795		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 796			channel->combined_count--;
 797	} else {
 798		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 799			channel->rx_count = bp->rx_nr_rings;
 800			channel->tx_count = bp->tx_nr_rings_per_tc;
 801		}
 802	}
 803}
 804
 805static int bnxt_set_channels(struct net_device *dev,
 806			     struct ethtool_channels *channel)
 807{
 808	struct bnxt *bp = netdev_priv(dev);
 809	int req_tx_rings, req_rx_rings, tcs;
 810	bool sh = false;
 811	int tx_xdp = 0;
 812	int rc = 0;
 
 813
 814	if (channel->other_count)
 815		return -EINVAL;
 816
 817	if (!channel->combined_count &&
 818	    (!channel->rx_count || !channel->tx_count))
 819		return -EINVAL;
 820
 821	if (channel->combined_count &&
 822	    (channel->rx_count || channel->tx_count))
 823		return -EINVAL;
 824
 825	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
 826					    channel->tx_count))
 827		return -EINVAL;
 828
 829	if (channel->combined_count)
 830		sh = true;
 831
 832	tcs = netdev_get_num_tc(dev);
 833
 834	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
 835	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
 836	if (bp->tx_nr_rings_xdp) {
 837		if (!sh) {
 838			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
 839			return -EINVAL;
 840		}
 841		tx_xdp = req_rx_rings;
 842	}
 843	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
 844	if (rc) {
 845		netdev_warn(dev, "Unable to allocate the requested rings\n");
 846		return rc;
 847	}
 848
 
 
 
 
 
 
 
 849	if (netif_running(dev)) {
 850		if (BNXT_PF(bp)) {
 851			/* TODO CHIMP_FW: Send message to all VF's
 852			 * before PF unload
 853			 */
 854		}
 855		rc = bnxt_close_nic(bp, true, false);
 856		if (rc) {
 857			netdev_err(bp->dev, "Set channel failure rc :%x\n",
 858				   rc);
 859			return rc;
 860		}
 861	}
 862
 863	if (sh) {
 864		bp->flags |= BNXT_FLAG_SHARED_RINGS;
 865		bp->rx_nr_rings = channel->combined_count;
 866		bp->tx_nr_rings_per_tc = channel->combined_count;
 867	} else {
 868		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
 869		bp->rx_nr_rings = channel->rx_count;
 870		bp->tx_nr_rings_per_tc = channel->tx_count;
 871	}
 872	bp->tx_nr_rings_xdp = tx_xdp;
 873	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
 874	if (tcs > 1)
 875		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 876
 877	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
 878			       bp->tx_nr_rings + bp->rx_nr_rings;
 
 879
 880	/* After changing number of rx channels, update NTUPLE feature. */
 881	netdev_update_features(dev);
 882	if (netif_running(dev)) {
 883		rc = bnxt_open_nic(bp, true, false);
 884		if ((!rc) && BNXT_PF(bp)) {
 885			/* TODO CHIMP_FW: Send message to all VF's
 886			 * to renable
 887			 */
 888		}
 889	} else {
 890		rc = bnxt_reserve_rings(bp, true);
 891	}
 892
 893	return rc;
 894}
 895
 896#ifdef CONFIG_RFS_ACCEL
 897static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
 898			    u32 *rule_locs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899{
 900	int i, j = 0;
 901
 902	cmd->data = bp->ntp_fltr_count;
 903	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
 904		struct hlist_head *head;
 905		struct bnxt_ntuple_filter *fltr;
 906
 907		head = &bp->ntp_fltr_hash_tbl[i];
 908		rcu_read_lock();
 909		hlist_for_each_entry_rcu(fltr, head, hash) {
 910			if (j == cmd->rule_cnt)
 911				break;
 912			rule_locs[j++] = fltr->sw_id;
 913		}
 914		rcu_read_unlock();
 915		if (j == cmd->rule_cnt)
 916			break;
 917	}
 918	cmd->rule_cnt = j;
 
 
 
 
 
 
 
 
 
 
 
 
 919	return 0;
 920}
 921
 922static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
 923{
 924	struct ethtool_rx_flow_spec *fs =
 925		(struct ethtool_rx_flow_spec *)&cmd->fs;
 
 926	struct bnxt_ntuple_filter *fltr;
 927	struct flow_keys *fkeys;
 928	int i, rc = -EINVAL;
 929
 930	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
 931		return rc;
 932
 933	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
 934		struct hlist_head *head;
 935
 936		head = &bp->ntp_fltr_hash_tbl[i];
 937		rcu_read_lock();
 938		hlist_for_each_entry_rcu(fltr, head, hash) {
 939			if (fltr->sw_id == fs->location)
 940				goto fltr_found;
 941		}
 942		rcu_read_unlock();
 
 943	}
 944	return rc;
 945
 946fltr_found:
 947	fkeys = &fltr->fkeys;
 948	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
 949		if (fkeys->basic.ip_proto == IPPROTO_TCP)
 950			fs->flow_type = TCP_V4_FLOW;
 951		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
 952			fs->flow_type = UDP_V4_FLOW;
 953		else
 954			goto fltr_err;
 955
 956		fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
 957		fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
 958
 959		fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
 960		fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
 961
 962		fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
 963		fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
 964
 965		fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
 966		fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
 
 
 
 
 
 967	} else {
 968		int i;
 969
 970		if (fkeys->basic.ip_proto == IPPROTO_TCP)
 971			fs->flow_type = TCP_V6_FLOW;
 972		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
 973			fs->flow_type = UDP_V6_FLOW;
 974		else
 975			goto fltr_err;
 976
 977		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
 978			fkeys->addrs.v6addrs.src;
 979		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
 980			fkeys->addrs.v6addrs.dst;
 981		for (i = 0; i < 4; i++) {
 982			fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
 983			fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
 
 
 
 
 
 
 
 
 
 
 984		}
 985		fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
 986		fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
 987
 988		fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
 989		fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
 990	}
 991
 992	fs->ring_cookie = fltr->rxq;
 993	rc = 0;
 994
 995fltr_err:
 996	rcu_read_unlock();
 997
 998	return rc;
 999}
1000#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
1002static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1003{
1004	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1005		return RXH_IP_SRC | RXH_IP_DST;
1006	return 0;
1007}
1008
1009static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1010{
1011	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1012		return RXH_IP_SRC | RXH_IP_DST;
1013	return 0;
1014}
1015
1016static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1017{
1018	cmd->data = 0;
1019	switch (cmd->flow_type) {
1020	case TCP_V4_FLOW:
1021		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1022			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1023				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1024		cmd->data |= get_ethtool_ipv4_rss(bp);
1025		break;
1026	case UDP_V4_FLOW:
1027		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1028			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1029				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1030		/* fall through */
1031	case SCTP_V4_FLOW:
1032	case AH_ESP_V4_FLOW:
1033	case AH_V4_FLOW:
1034	case ESP_V4_FLOW:
1035	case IPV4_FLOW:
1036		cmd->data |= get_ethtool_ipv4_rss(bp);
1037		break;
1038
1039	case TCP_V6_FLOW:
1040		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1041			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1042				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1043		cmd->data |= get_ethtool_ipv6_rss(bp);
1044		break;
1045	case UDP_V6_FLOW:
1046		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1047			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1048				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1049		/* fall through */
1050	case SCTP_V6_FLOW:
1051	case AH_ESP_V6_FLOW:
1052	case AH_V6_FLOW:
1053	case ESP_V6_FLOW:
1054	case IPV6_FLOW:
1055		cmd->data |= get_ethtool_ipv6_rss(bp);
1056		break;
1057	}
1058	return 0;
1059}
1060
1061#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1062#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1063
1064static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1065{
1066	u32 rss_hash_cfg = bp->rss_hash_cfg;
1067	int tuple, rc = 0;
1068
1069	if (cmd->data == RXH_4TUPLE)
1070		tuple = 4;
1071	else if (cmd->data == RXH_2TUPLE)
1072		tuple = 2;
1073	else if (!cmd->data)
1074		tuple = 0;
1075	else
1076		return -EINVAL;
1077
1078	if (cmd->flow_type == TCP_V4_FLOW) {
1079		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1080		if (tuple == 4)
1081			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1082	} else if (cmd->flow_type == UDP_V4_FLOW) {
1083		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1084			return -EINVAL;
1085		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1086		if (tuple == 4)
1087			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1088	} else if (cmd->flow_type == TCP_V6_FLOW) {
1089		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1090		if (tuple == 4)
1091			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1092	} else if (cmd->flow_type == UDP_V6_FLOW) {
1093		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1094			return -EINVAL;
1095		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1096		if (tuple == 4)
1097			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1098	} else if (tuple == 4) {
1099		return -EINVAL;
1100	}
1101
1102	switch (cmd->flow_type) {
1103	case TCP_V4_FLOW:
1104	case UDP_V4_FLOW:
1105	case SCTP_V4_FLOW:
1106	case AH_ESP_V4_FLOW:
1107	case AH_V4_FLOW:
1108	case ESP_V4_FLOW:
1109	case IPV4_FLOW:
1110		if (tuple == 2)
1111			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1112		else if (!tuple)
1113			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1114		break;
1115
1116	case TCP_V6_FLOW:
1117	case UDP_V6_FLOW:
1118	case SCTP_V6_FLOW:
1119	case AH_ESP_V6_FLOW:
1120	case AH_V6_FLOW:
1121	case ESP_V6_FLOW:
1122	case IPV6_FLOW:
1123		if (tuple == 2)
1124			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1125		else if (!tuple)
1126			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1127		break;
1128	}
1129
1130	if (bp->rss_hash_cfg == rss_hash_cfg)
1131		return 0;
1132
 
 
1133	bp->rss_hash_cfg = rss_hash_cfg;
1134	if (netif_running(bp->dev)) {
1135		bnxt_close_nic(bp, false, false);
1136		rc = bnxt_open_nic(bp, false, false);
1137	}
1138	return rc;
1139}
1140
1141static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1142			  u32 *rule_locs)
1143{
1144	struct bnxt *bp = netdev_priv(dev);
1145	int rc = 0;
1146
1147	switch (cmd->cmd) {
1148#ifdef CONFIG_RFS_ACCEL
1149	case ETHTOOL_GRXRINGS:
1150		cmd->data = bp->rx_nr_rings;
1151		break;
1152
1153	case ETHTOOL_GRXCLSRLCNT:
1154		cmd->rule_cnt = bp->ntp_fltr_count;
1155		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1156		break;
1157
1158	case ETHTOOL_GRXCLSRLALL:
1159		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1160		break;
1161
1162	case ETHTOOL_GRXCLSRULE:
1163		rc = bnxt_grxclsrule(bp, cmd);
1164		break;
1165#endif
1166
1167	case ETHTOOL_GRXFH:
1168		rc = bnxt_grxfh(bp, cmd);
1169		break;
1170
1171	default:
1172		rc = -EOPNOTSUPP;
1173		break;
1174	}
1175
1176	return rc;
1177}
1178
1179static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1180{
1181	struct bnxt *bp = netdev_priv(dev);
1182	int rc;
1183
1184	switch (cmd->cmd) {
1185	case ETHTOOL_SRXFH:
1186		rc = bnxt_srxfh(bp, cmd);
1187		break;
1188
 
 
 
 
 
 
 
 
1189	default:
1190		rc = -EOPNOTSUPP;
1191		break;
1192	}
1193	return rc;
1194}
1195
1196static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1197{
 
 
 
 
 
1198	return HW_HASH_INDEX_SIZE;
1199}
1200
1201static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1202{
1203	return HW_HASH_KEY_SIZE;
1204}
1205
1206static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1207			 u8 *hfunc)
1208{
1209	struct bnxt *bp = netdev_priv(dev);
1210	struct bnxt_vnic_info *vnic;
1211	int i = 0;
1212
1213	if (hfunc)
1214		*hfunc = ETH_RSS_HASH_TOP;
1215
1216	if (!bp->vnic_info)
1217		return 0;
1218
1219	vnic = &bp->vnic_info[0];
1220	if (indir && vnic->rss_table) {
1221		for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
1222			indir[i] = le16_to_cpu(vnic->rss_table[i]);
 
1223	}
1224
1225	if (key && vnic->rss_hash_key)
1226		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1227
1228	return 0;
1229}
1230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231static void bnxt_get_drvinfo(struct net_device *dev,
1232			     struct ethtool_drvinfo *info)
1233{
1234	struct bnxt *bp = netdev_priv(dev);
1235
1236	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1237	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1238	strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1239	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1240	info->n_stats = bnxt_get_num_stats(bp);
1241	info->testinfo_len = bp->num_tests;
1242	/* TODO CHIMP_FW: eeprom dump details */
1243	info->eedump_len = 0;
1244	/* TODO CHIMP FW: reg dump details */
1245	info->regdump_len = 0;
1246}
1247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1248static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1249{
1250	struct bnxt *bp = netdev_priv(dev);
1251
1252	wol->supported = 0;
1253	wol->wolopts = 0;
1254	memset(&wol->sopass, 0, sizeof(wol->sopass));
1255	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1256		wol->supported = WAKE_MAGIC;
1257		if (bp->wol)
1258			wol->wolopts = WAKE_MAGIC;
1259	}
1260}
1261
1262static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1263{
1264	struct bnxt *bp = netdev_priv(dev);
1265
1266	if (wol->wolopts & ~WAKE_MAGIC)
1267		return -EINVAL;
1268
1269	if (wol->wolopts & WAKE_MAGIC) {
1270		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1271			return -EINVAL;
1272		if (!bp->wol) {
1273			if (bnxt_hwrm_alloc_wol_fltr(bp))
1274				return -EBUSY;
1275			bp->wol = 1;
1276		}
1277	} else {
1278		if (bp->wol) {
1279			if (bnxt_hwrm_free_wol_fltr(bp))
1280				return -EBUSY;
1281			bp->wol = 0;
1282		}
1283	}
1284	return 0;
1285}
1286
1287u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1288{
1289	u32 speed_mask = 0;
1290
1291	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1292	/* set the advertised speeds */
1293	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1294		speed_mask |= ADVERTISED_100baseT_Full;
1295	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1296		speed_mask |= ADVERTISED_1000baseT_Full;
1297	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1298		speed_mask |= ADVERTISED_2500baseX_Full;
1299	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1300		speed_mask |= ADVERTISED_10000baseT_Full;
1301	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1302		speed_mask |= ADVERTISED_40000baseCR4_Full;
1303
1304	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1305		speed_mask |= ADVERTISED_Pause;
1306	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1307		speed_mask |= ADVERTISED_Asym_Pause;
1308	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1309		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1310
1311	return speed_mask;
1312}
1313
1314#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1315{									\
1316	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)			\
1317		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1318						     100baseT_Full);	\
1319	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)			\
1320		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1321						     1000baseT_Full);	\
1322	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)			\
1323		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1324						     10000baseT_Full);	\
1325	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)			\
1326		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1327						     25000baseCR_Full);	\
1328	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)			\
1329		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1330						     40000baseCR4_Full);\
1331	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)			\
1332		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1333						     50000baseCR2_Full);\
1334	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)			\
1335		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1336						     100000baseCR4_Full);\
1337	if ((fw_pause) & BNXT_LINK_PAUSE_RX) {				\
1338		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1339						     Pause);		\
1340		if (!((fw_pause) & BNXT_LINK_PAUSE_TX))			\
1341			ethtool_link_ksettings_add_link_mode(		\
1342					lk_ksettings, name, Asym_Pause);\
1343	} else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {			\
1344		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1345						     Asym_Pause);	\
1346	}								\
1347}
1348
1349#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)		\
1350{									\
1351	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1352						  100baseT_Full) ||	\
1353	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1354						  100baseT_Half))	\
1355		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;		\
1356	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1357						  1000baseT_Full) ||	\
1358	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1359						  1000baseT_Half))	\
1360		(fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;			\
1361	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1362						  10000baseT_Full))	\
1363		(fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;		\
1364	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1365						  25000baseCR_Full))	\
1366		(fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;		\
1367	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1368						  40000baseCR4_Full))	\
1369		(fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;		\
1370	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1371						  50000baseCR2_Full))	\
1372		(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;		\
1373	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1374						  100000baseCR4_Full))	\
1375		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;		\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376}
1377
1378static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379				struct ethtool_link_ksettings *lk_ksettings)
1380{
1381	u16 fw_speeds = link_info->advertising;
1382	u8 fw_pause = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383
1384	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1385		fw_pause = link_info->auto_pause_setting;
 
 
 
 
 
1386
1387	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
 
 
 
 
 
1388}
1389
1390static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1391				struct ethtool_link_ksettings *lk_ksettings)
 
1392{
1393	u16 fw_speeds = link_info->lp_auto_link_speeds;
1394	u8 fw_pause = 0;
 
 
1395
1396	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1397		fw_pause = link_info->lp_pause;
 
 
 
 
1398
1399	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1400				lp_advertising);
 
 
 
1401}
1402
1403static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1404				struct ethtool_link_ksettings *lk_ksettings)
1405{
1406	u16 fw_speeds = link_info->support_speeds;
1407
1408	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409
1410	ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1411	ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1412					     Asym_Pause);
 
1413
1414	if (link_info->support_auto_speeds)
1415		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1416						     Autoneg);
 
 
 
 
 
 
 
 
 
 
 
1417}
1418
1419u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1420{
1421	switch (fw_link_speed) {
1422	case BNXT_LINK_SPEED_100MB:
1423		return SPEED_100;
1424	case BNXT_LINK_SPEED_1GB:
1425		return SPEED_1000;
1426	case BNXT_LINK_SPEED_2_5GB:
1427		return SPEED_2500;
1428	case BNXT_LINK_SPEED_10GB:
1429		return SPEED_10000;
1430	case BNXT_LINK_SPEED_20GB:
1431		return SPEED_20000;
1432	case BNXT_LINK_SPEED_25GB:
1433		return SPEED_25000;
1434	case BNXT_LINK_SPEED_40GB:
1435		return SPEED_40000;
1436	case BNXT_LINK_SPEED_50GB:
 
1437		return SPEED_50000;
1438	case BNXT_LINK_SPEED_100GB:
 
 
1439		return SPEED_100000;
 
 
 
 
 
 
 
 
1440	default:
1441		return SPEED_UNKNOWN;
1442	}
1443}
1444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445static int bnxt_get_link_ksettings(struct net_device *dev,
1446				   struct ethtool_link_ksettings *lk_ksettings)
1447{
 
 
1448	struct bnxt *bp = netdev_priv(dev);
1449	struct bnxt_link_info *link_info = &bp->link_info;
1450	struct ethtool_link_settings *base = &lk_ksettings->base;
1451	u32 ethtool_speed;
1452
 
 
1453	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
 
 
 
 
1454	mutex_lock(&bp->link_lock);
1455	bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
 
 
 
 
 
 
 
 
1456
1457	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1458	if (link_info->autoneg) {
1459		bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1460		ethtool_link_ksettings_add_link_mode(lk_ksettings,
1461						     advertising, Autoneg);
1462		base->autoneg = AUTONEG_ENABLE;
 
1463		if (link_info->phy_link_status == BNXT_LINK_LINK)
1464			bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1465		ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1466		if (!netif_carrier_ok(dev))
1467			base->duplex = DUPLEX_UNKNOWN;
1468		else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1469			base->duplex = DUPLEX_FULL;
1470		else
1471			base->duplex = DUPLEX_HALF;
1472	} else {
1473		base->autoneg = AUTONEG_DISABLE;
1474		ethtool_speed =
1475			bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1476		base->duplex = DUPLEX_HALF;
1477		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1478			base->duplex = DUPLEX_FULL;
1479	}
1480	base->speed = ethtool_speed;
1481
1482	base->port = PORT_NONE;
1483	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1484		base->port = PORT_TP;
1485		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1486						     TP);
1487		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1488						     TP);
1489	} else {
1490		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1491						     FIBRE);
1492		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1493						     FIBRE);
1494
1495		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1496			base->port = PORT_DA;
1497		else if (link_info->media_type ==
1498			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1499			base->port = PORT_FIBRE;
1500	}
1501	base->phy_address = link_info->phy_addr;
1502	mutex_unlock(&bp->link_lock);
1503
1504	return 0;
1505}
1506
1507static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
 
1508{
1509	struct bnxt *bp = netdev_priv(dev);
1510	struct bnxt_link_info *link_info = &bp->link_info;
 
 
1511	u16 support_spds = link_info->support_speeds;
1512	u32 fw_speed = 0;
 
 
1513
1514	switch (ethtool_speed) {
1515	case SPEED_100:
1516		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1517			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1518		break;
1519	case SPEED_1000:
1520		if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1521			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
 
1522		break;
1523	case SPEED_2500:
1524		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1525			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1526		break;
1527	case SPEED_10000:
1528		if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1529			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
 
1530		break;
1531	case SPEED_20000:
1532		if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1533			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
 
 
1534		break;
1535	case SPEED_25000:
1536		if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1537			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
 
1538		break;
1539	case SPEED_40000:
1540		if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1541			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
 
 
 
1542		break;
1543	case SPEED_50000:
1544		if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1545			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
 
 
 
 
 
 
 
 
 
 
1546		break;
1547	case SPEED_100000:
1548		if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
1549			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550		break;
1551	default:
 
 
1552		netdev_err(dev, "unsupported speed!\n");
1553		break;
 
 
 
 
 
1554	}
1555	return fw_speed;
 
 
 
 
 
 
 
 
 
 
 
 
 
1556}
1557
1558u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1559{
1560	u16 fw_speed_mask = 0;
1561
1562	/* only support autoneg at speed 100, 1000, and 10000 */
1563	if (advertising & (ADVERTISED_100baseT_Full |
1564			   ADVERTISED_100baseT_Half)) {
1565		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1566	}
1567	if (advertising & (ADVERTISED_1000baseT_Full |
1568			   ADVERTISED_1000baseT_Half)) {
1569		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1570	}
1571	if (advertising & ADVERTISED_10000baseT_Full)
1572		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1573
1574	if (advertising & ADVERTISED_40000baseCR4_Full)
1575		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1576
1577	return fw_speed_mask;
1578}
1579
1580static int bnxt_set_link_ksettings(struct net_device *dev,
1581			   const struct ethtool_link_ksettings *lk_ksettings)
1582{
1583	struct bnxt *bp = netdev_priv(dev);
1584	struct bnxt_link_info *link_info = &bp->link_info;
1585	const struct ethtool_link_settings *base = &lk_ksettings->base;
1586	bool set_pause = false;
1587	u16 fw_advertising = 0;
1588	u32 speed;
1589	int rc = 0;
1590
1591	if (!BNXT_SINGLE_PF(bp))
1592		return -EOPNOTSUPP;
1593
1594	mutex_lock(&bp->link_lock);
1595	if (base->autoneg == AUTONEG_ENABLE) {
1596		BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1597					advertising);
1598		link_info->autoneg |= BNXT_AUTONEG_SPEED;
1599		if (!fw_advertising)
1600			link_info->advertising = link_info->support_auto_speeds;
1601		else
1602			link_info->advertising = fw_advertising;
 
1603		/* any change to autoneg will cause link change, therefore the
1604		 * driver should put back the original pause setting in autoneg
1605		 */
1606		set_pause = true;
 
1607	} else {
1608		u16 fw_speed;
1609		u8 phy_type = link_info->phy_type;
1610
1611		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1612		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1613		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1614			netdev_err(dev, "10GBase-T devices must autoneg\n");
1615			rc = -EINVAL;
1616			goto set_setting_exit;
1617		}
1618		if (base->duplex == DUPLEX_HALF) {
1619			netdev_err(dev, "HALF DUPLEX is not supported!\n");
1620			rc = -EINVAL;
1621			goto set_setting_exit;
1622		}
1623		speed = base->speed;
1624		fw_speed = bnxt_get_fw_speed(dev, speed);
1625		if (!fw_speed) {
1626			rc = -EINVAL;
 
 
1627			goto set_setting_exit;
1628		}
1629		link_info->req_link_speed = fw_speed;
1630		link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1631		link_info->autoneg = 0;
1632		link_info->advertising = 0;
1633	}
1634
1635	if (netif_running(dev))
1636		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1637
1638set_setting_exit:
1639	mutex_unlock(&bp->link_lock);
1640	return rc;
1641}
1642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1643static void bnxt_get_pauseparam(struct net_device *dev,
1644				struct ethtool_pauseparam *epause)
1645{
1646	struct bnxt *bp = netdev_priv(dev);
1647	struct bnxt_link_info *link_info = &bp->link_info;
1648
1649	if (BNXT_VF(bp))
1650		return;
1651	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
1652	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1653	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
1654}
1655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1656static int bnxt_set_pauseparam(struct net_device *dev,
1657			       struct ethtool_pauseparam *epause)
1658{
1659	int rc = 0;
1660	struct bnxt *bp = netdev_priv(dev);
1661	struct bnxt_link_info *link_info = &bp->link_info;
1662
1663	if (!BNXT_SINGLE_PF(bp))
1664		return -EOPNOTSUPP;
1665
 
1666	if (epause->autoneg) {
1667		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1668			return -EINVAL;
 
 
1669
1670		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
1671		if (bp->hwrm_spec_code >= 0x10201)
1672			link_info->req_flow_ctrl =
1673				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
1674	} else {
1675		/* when transition from auto pause to force pause,
1676		 * force a link change
1677		 */
1678		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1679			link_info->force_link_chng = true;
1680		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
1681		link_info->req_flow_ctrl = 0;
1682	}
1683	if (epause->rx_pause)
1684		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
1685
1686	if (epause->tx_pause)
1687		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
1688
1689	if (netif_running(dev))
1690		rc = bnxt_hwrm_set_pause(bp);
 
 
 
1691	return rc;
1692}
1693
1694static u32 bnxt_get_link(struct net_device *dev)
1695{
1696	struct bnxt *bp = netdev_priv(dev);
1697
1698	/* TODO: handle MF, VF, driver close case */
1699	return bp->link_info.link_up;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1700}
1701
1702static void bnxt_print_admin_err(struct bnxt *bp)
1703{
1704	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
1705}
1706
1707static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1708				u16 ext, u16 *index, u32 *item_length,
1709				u32 *data_length);
1710
1711static int bnxt_flash_nvram(struct net_device *dev,
1712			    u16 dir_type,
1713			    u16 dir_ordinal,
1714			    u16 dir_ext,
1715			    u16 dir_attr,
1716			    const u8 *data,
1717			    size_t data_len)
1718{
1719	struct bnxt *bp = netdev_priv(dev);
 
1720	int rc;
1721	struct hwrm_nvm_write_input req = {0};
1722	dma_addr_t dma_handle;
1723	u8 *kmem;
1724
1725	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
 
 
 
 
 
 
 
 
 
 
 
 
1726
1727	req.dir_type = cpu_to_le16(dir_type);
1728	req.dir_ordinal = cpu_to_le16(dir_ordinal);
1729	req.dir_ext = cpu_to_le16(dir_ext);
1730	req.dir_attr = cpu_to_le16(dir_attr);
1731	req.dir_data_length = cpu_to_le32(data_len);
1732
1733	kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1734				  GFP_KERNEL);
1735	if (!kmem) {
1736		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1737			   (unsigned)data_len);
1738		return -ENOMEM;
1739	}
1740	memcpy(kmem, data, data_len);
1741	req.host_src_addr = cpu_to_le64(dma_handle);
1742
1743	rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1744	dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
 
 
 
 
 
1745
1746	if (rc == -EACCES)
1747		bnxt_print_admin_err(bp);
1748	return rc;
1749}
1750
1751static int bnxt_firmware_reset(struct net_device *dev,
1752			       u16 dir_type)
1753{
1754	struct hwrm_fw_reset_input req = {0};
1755	struct bnxt *bp = netdev_priv(dev);
 
1756	int rc;
1757
1758	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759
1760	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1761	/*       (e.g. when firmware isn't already running) */
1762	switch (dir_type) {
1763	case BNX_DIR_TYPE_CHIMP_PATCH:
1764	case BNX_DIR_TYPE_BOOTCODE:
1765	case BNX_DIR_TYPE_BOOTCODE_2:
1766		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1767		/* Self-reset ChiMP upon next PCIe reset: */
1768		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1769		break;
1770	case BNX_DIR_TYPE_APE_FW:
1771	case BNX_DIR_TYPE_APE_PATCH:
1772		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1773		/* Self-reset APE upon next PCIe reset: */
1774		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1775		break;
1776	case BNX_DIR_TYPE_KONG_FW:
1777	case BNX_DIR_TYPE_KONG_PATCH:
1778		req.embedded_proc_type =
1779			FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1780		break;
1781	case BNX_DIR_TYPE_BONO_FW:
1782	case BNX_DIR_TYPE_BONO_PATCH:
1783		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1784		break;
1785	case BNXT_FW_RESET_CHIP:
1786		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
1787		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
1788		break;
1789	case BNXT_FW_RESET_AP:
1790		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
1791		break;
1792	default:
1793		return -EINVAL;
1794	}
1795
1796	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1797	if (rc == -EACCES)
1798		bnxt_print_admin_err(bp);
1799	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800}
1801
1802static int bnxt_flash_firmware(struct net_device *dev,
1803			       u16 dir_type,
1804			       const u8 *fw_data,
1805			       size_t fw_size)
1806{
1807	int	rc = 0;
1808	u16	code_type;
1809	u32	stored_crc;
1810	u32	calculated_crc;
1811	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1812
1813	switch (dir_type) {
1814	case BNX_DIR_TYPE_BOOTCODE:
1815	case BNX_DIR_TYPE_BOOTCODE_2:
1816		code_type = CODE_BOOT;
1817		break;
1818	case BNX_DIR_TYPE_CHIMP_PATCH:
1819		code_type = CODE_CHIMP_PATCH;
1820		break;
1821	case BNX_DIR_TYPE_APE_FW:
1822		code_type = CODE_MCTP_PASSTHRU;
1823		break;
1824	case BNX_DIR_TYPE_APE_PATCH:
1825		code_type = CODE_APE_PATCH;
1826		break;
1827	case BNX_DIR_TYPE_KONG_FW:
1828		code_type = CODE_KONG_FW;
1829		break;
1830	case BNX_DIR_TYPE_KONG_PATCH:
1831		code_type = CODE_KONG_PATCH;
1832		break;
1833	case BNX_DIR_TYPE_BONO_FW:
1834		code_type = CODE_BONO_FW;
1835		break;
1836	case BNX_DIR_TYPE_BONO_PATCH:
1837		code_type = CODE_BONO_PATCH;
1838		break;
1839	default:
1840		netdev_err(dev, "Unsupported directory entry type: %u\n",
1841			   dir_type);
1842		return -EINVAL;
1843	}
1844	if (fw_size < sizeof(struct bnxt_fw_header)) {
1845		netdev_err(dev, "Invalid firmware file size: %u\n",
1846			   (unsigned int)fw_size);
1847		return -EINVAL;
1848	}
1849	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1850		netdev_err(dev, "Invalid firmware signature: %08X\n",
1851			   le32_to_cpu(header->signature));
1852		return -EINVAL;
1853	}
1854	if (header->code_type != code_type) {
1855		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1856			   code_type, header->code_type);
1857		return -EINVAL;
1858	}
1859	if (header->device != DEVICE_CUMULUS_FAMILY) {
1860		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1861			   DEVICE_CUMULUS_FAMILY, header->device);
1862		return -EINVAL;
1863	}
1864	/* Confirm the CRC32 checksum of the file: */
1865	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1866					     sizeof(stored_crc)));
1867	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1868	if (calculated_crc != stored_crc) {
1869		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1870			   (unsigned long)stored_crc,
1871			   (unsigned long)calculated_crc);
1872		return -EINVAL;
1873	}
1874	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1875			      0, 0, fw_data, fw_size);
1876	if (rc == 0)	/* Firmware update successful */
1877		rc = bnxt_firmware_reset(dev, dir_type);
1878
1879	return rc;
1880}
1881
1882static int bnxt_flash_microcode(struct net_device *dev,
1883				u16 dir_type,
1884				const u8 *fw_data,
1885				size_t fw_size)
1886{
1887	struct bnxt_ucode_trailer *trailer;
1888	u32 calculated_crc;
1889	u32 stored_crc;
1890	int rc = 0;
1891
1892	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
1893		netdev_err(dev, "Invalid microcode file size: %u\n",
1894			   (unsigned int)fw_size);
1895		return -EINVAL;
1896	}
1897	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
1898						sizeof(*trailer)));
1899	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
1900		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
1901			   le32_to_cpu(trailer->sig));
1902		return -EINVAL;
1903	}
1904	if (le16_to_cpu(trailer->dir_type) != dir_type) {
1905		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
1906			   dir_type, le16_to_cpu(trailer->dir_type));
1907		return -EINVAL;
1908	}
1909	if (le16_to_cpu(trailer->trailer_length) <
1910		sizeof(struct bnxt_ucode_trailer)) {
1911		netdev_err(dev, "Invalid microcode trailer length: %d\n",
1912			   le16_to_cpu(trailer->trailer_length));
1913		return -EINVAL;
1914	}
1915
1916	/* Confirm the CRC32 checksum of the file: */
1917	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1918					     sizeof(stored_crc)));
1919	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1920	if (calculated_crc != stored_crc) {
1921		netdev_err(dev,
1922			   "CRC32 (%08lX) does not match calculated: %08lX\n",
1923			   (unsigned long)stored_crc,
1924			   (unsigned long)calculated_crc);
1925		return -EINVAL;
1926	}
1927	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1928			      0, 0, fw_data, fw_size);
1929
1930	return rc;
1931}
1932
1933static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1934{
1935	switch (dir_type) {
1936	case BNX_DIR_TYPE_CHIMP_PATCH:
1937	case BNX_DIR_TYPE_BOOTCODE:
1938	case BNX_DIR_TYPE_BOOTCODE_2:
1939	case BNX_DIR_TYPE_APE_FW:
1940	case BNX_DIR_TYPE_APE_PATCH:
1941	case BNX_DIR_TYPE_KONG_FW:
1942	case BNX_DIR_TYPE_KONG_PATCH:
1943	case BNX_DIR_TYPE_BONO_FW:
1944	case BNX_DIR_TYPE_BONO_PATCH:
1945		return true;
1946	}
1947
1948	return false;
1949}
1950
1951static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
1952{
1953	switch (dir_type) {
1954	case BNX_DIR_TYPE_AVS:
1955	case BNX_DIR_TYPE_EXP_ROM_MBA:
1956	case BNX_DIR_TYPE_PCIE:
1957	case BNX_DIR_TYPE_TSCF_UCODE:
1958	case BNX_DIR_TYPE_EXT_PHY:
1959	case BNX_DIR_TYPE_CCM:
1960	case BNX_DIR_TYPE_ISCSI_BOOT:
1961	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1962	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1963		return true;
1964	}
1965
1966	return false;
1967}
1968
1969static bool bnxt_dir_type_is_executable(u16 dir_type)
1970{
1971	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
1972		bnxt_dir_type_is_other_exec_format(dir_type);
1973}
1974
1975static int bnxt_flash_firmware_from_file(struct net_device *dev,
1976					 u16 dir_type,
1977					 const char *filename)
1978{
1979	const struct firmware  *fw;
1980	int			rc;
1981
1982	rc = request_firmware(&fw, filename, &dev->dev);
1983	if (rc != 0) {
1984		netdev_err(dev, "Error %d requesting firmware file: %s\n",
1985			   rc, filename);
1986		return rc;
1987	}
1988	if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1989		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1990	else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
1991		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
1992	else
1993		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1994				      0, 0, fw->data, fw->size);
1995	release_firmware(fw);
1996	return rc;
1997}
1998
1999static int bnxt_flash_package_from_file(struct net_device *dev,
2000					char *filename, u32 install_type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002	struct bnxt *bp = netdev_priv(dev);
2003	struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
2004	struct hwrm_nvm_install_update_input install = {0};
2005	const struct firmware *fw;
2006	int rc, hwrm_err = 0;
2007	u32 item_len;
 
2008	u16 index;
 
 
 
 
 
 
2009
2010	bnxt_hwrm_fw_set_time(bp);
2011
2012	if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2013				 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2014				 &index, &item_len, NULL) != 0) {
2015		netdev_err(dev, "PKG update area not created in nvram\n");
2016		return -ENOBUFS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017	}
2018
2019	rc = request_firmware(&fw, filename, &dev->dev);
2020	if (rc != 0) {
2021		netdev_err(dev, "PKG error %d requesting file: %s\n",
2022			   rc, filename);
2023		return rc;
2024	}
2025
2026	if (fw->size > item_len) {
2027		netdev_err(dev, "PKG insufficient update area in nvram: %lu",
2028			   (unsigned long)fw->size);
2029		rc = -EFBIG;
2030	} else {
2031		dma_addr_t dma_handle;
2032		u8 *kmem;
2033		struct hwrm_nvm_modify_input modify = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034
2035		bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
2036
2037		modify.dir_idx = cpu_to_le16(index);
2038		modify.len = cpu_to_le32(fw->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2039
2040		kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
2041					  &dma_handle, GFP_KERNEL);
2042		if (!kmem) {
2043			netdev_err(dev,
2044				   "dma_alloc_coherent failure, length = %u\n",
2045				   (unsigned int)fw->size);
2046			rc = -ENOMEM;
2047		} else {
2048			memcpy(kmem, fw->data, fw->size);
2049			modify.host_src_addr = cpu_to_le64(dma_handle);
2050
2051			hwrm_err = hwrm_send_message(bp, &modify,
2052						     sizeof(modify),
2053						     FLASH_PACKAGE_TIMEOUT);
2054			dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
2055					  dma_handle);
2056		}
2057	}
2058	release_firmware(fw);
2059	if (rc || hwrm_err)
2060		goto err_exit;
2061
2062	if ((install_type & 0xffff) == 0)
2063		install_type >>= 16;
2064	bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
2065	install.install_type = cpu_to_le32(install_type);
 
 
 
 
 
 
 
 
 
 
 
 
2066
2067	mutex_lock(&bp->hwrm_cmd_lock);
2068	hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
2069				      INSTALL_PACKAGE_TIMEOUT);
2070	if (hwrm_err) {
2071		u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2072
2073		if (resp->error_code && error_code ==
2074		    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2075			install.flags |= cpu_to_le16(
2076			       NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2077			hwrm_err = _hwrm_send_message(bp, &install,
2078						      sizeof(install),
2079						      INSTALL_PACKAGE_TIMEOUT);
 
 
 
2080		}
2081		if (hwrm_err)
2082			goto flash_pkg_exit;
2083	}
 
 
2084
2085	if (resp->result) {
2086		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2087			   (s8)resp->result, (int)resp->problem_item);
2088		rc = -ENOPKG;
2089	}
2090flash_pkg_exit:
2091	mutex_unlock(&bp->hwrm_cmd_lock);
2092err_exit:
2093	if (hwrm_err == -EACCES)
2094		bnxt_print_admin_err(bp);
2095	return rc;
2096}
2097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2098static int bnxt_flash_device(struct net_device *dev,
2099			     struct ethtool_flash *flash)
2100{
2101	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2102		netdev_err(dev, "flashdev not supported from a virtual function\n");
2103		return -EINVAL;
2104	}
2105
2106	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2107	    flash->region > 0xffff)
2108		return bnxt_flash_package_from_file(dev, flash->data,
2109						    flash->region);
2110
2111	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2112}
2113
2114static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2115{
 
 
2116	struct bnxt *bp = netdev_priv(dev);
2117	int rc;
2118	struct hwrm_nvm_get_dir_info_input req = {0};
2119	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
2120
2121	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
 
 
2122
2123	mutex_lock(&bp->hwrm_cmd_lock);
2124	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2125	if (!rc) {
2126		*entries = le32_to_cpu(output->entries);
2127		*length = le32_to_cpu(output->entry_length);
2128	}
2129	mutex_unlock(&bp->hwrm_cmd_lock);
2130	return rc;
2131}
2132
2133static int bnxt_get_eeprom_len(struct net_device *dev)
2134{
2135	struct bnxt *bp = netdev_priv(dev);
2136
2137	if (BNXT_VF(bp))
2138		return 0;
2139
2140	/* The -1 return value allows the entire 32-bit range of offsets to be
2141	 * passed via the ethtool command-line utility.
2142	 */
2143	return -1;
2144}
2145
2146static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2147{
2148	struct bnxt *bp = netdev_priv(dev);
2149	int rc;
2150	u32 dir_entries;
2151	u32 entry_length;
2152	u8 *buf;
2153	size_t buflen;
2154	dma_addr_t dma_handle;
2155	struct hwrm_nvm_get_dir_entries_input req = {0};
2156
2157	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2158	if (rc != 0)
2159		return rc;
2160
 
 
 
2161	/* Insert 2 bytes of directory info (count and size of entries) */
2162	if (len < 2)
2163		return -EINVAL;
2164
2165	*data++ = dir_entries;
2166	*data++ = entry_length;
2167	len -= 2;
2168	memset(data, 0xff, len);
2169
2170	buflen = dir_entries * entry_length;
2171	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
2172				 GFP_KERNEL);
 
 
 
2173	if (!buf) {
2174		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2175			   (unsigned)buflen);
2176		return -ENOMEM;
2177	}
2178	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
2179	req.host_dest_addr = cpu_to_le64(dma_handle);
2180	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
2181	if (rc == 0)
2182		memcpy(data, buf, len > buflen ? buflen : len);
2183	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
2184	return rc;
2185}
2186
2187static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2188			       u32 length, u8 *data)
2189{
2190	struct bnxt *bp = netdev_priv(dev);
2191	int rc;
2192	u8 *buf;
2193	dma_addr_t dma_handle;
2194	struct hwrm_nvm_read_input req = {0};
2195
2196	if (!length)
2197		return -EINVAL;
2198
2199	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
2200				 GFP_KERNEL);
 
 
 
2201	if (!buf) {
2202		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2203			   (unsigned)length);
2204		return -ENOMEM;
2205	}
2206	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
2207	req.host_dest_addr = cpu_to_le64(dma_handle);
2208	req.dir_idx = cpu_to_le16(index);
2209	req.offset = cpu_to_le32(offset);
2210	req.len = cpu_to_le32(length);
2211
2212	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
 
 
 
2213	if (rc == 0)
2214		memcpy(data, buf, length);
2215	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
2216	return rc;
2217}
2218
2219static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2220				u16 ext, u16 *index, u32 *item_length,
2221				u32 *data_length)
2222{
 
 
2223	struct bnxt *bp = netdev_priv(dev);
2224	int rc;
2225	struct hwrm_nvm_find_dir_entry_input req = {0};
2226	struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
2227
2228	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
2229	req.enables = 0;
2230	req.dir_idx = 0;
2231	req.dir_type = cpu_to_le16(type);
2232	req.dir_ordinal = cpu_to_le16(ordinal);
2233	req.dir_ext = cpu_to_le16(ext);
2234	req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2235	mutex_lock(&bp->hwrm_cmd_lock);
2236	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
2237	if (rc == 0) {
2238		if (index)
2239			*index = le16_to_cpu(output->dir_idx);
2240		if (item_length)
2241			*item_length = le32_to_cpu(output->dir_item_length);
2242		if (data_length)
2243			*data_length = le32_to_cpu(output->dir_data_length);
2244	}
2245	mutex_unlock(&bp->hwrm_cmd_lock);
2246	return rc;
2247}
2248
2249static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2250{
2251	char	*retval = NULL;
2252	char	*p;
2253	char	*value;
2254	int	field = 0;
2255
2256	if (datalen < 1)
2257		return NULL;
2258	/* null-terminate the log data (removing last '\n'): */
2259	data[datalen - 1] = 0;
2260	for (p = data; *p != 0; p++) {
2261		field = 0;
2262		retval = NULL;
2263		while (*p != 0 && *p != '\n') {
2264			value = p;
2265			while (*p != 0 && *p != '\t' && *p != '\n')
2266				p++;
2267			if (field == desired_field)
2268				retval = value;
2269			if (*p != '\t')
2270				break;
2271			*p = 0;
2272			field++;
2273			p++;
2274		}
2275		if (*p == 0)
2276			break;
2277		*p = 0;
2278	}
2279	return retval;
2280}
2281
2282static void bnxt_get_pkgver(struct net_device *dev)
2283{
2284	struct bnxt *bp = netdev_priv(dev);
2285	u16 index = 0;
2286	char *pkgver;
2287	u32 pkglen;
2288	u8 *pkgbuf;
2289	int len;
2290
2291	if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2292				 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2293				 &index, NULL, &pkglen) != 0)
2294		return;
 
2295
2296	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2297	if (!pkgbuf) {
2298		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2299			pkglen);
2300		return;
2301	}
2302
2303	if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
 
2304		goto err;
2305
2306	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2307				   pkglen);
2308	if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2309		len = strlen(bp->fw_ver_str);
2310		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2311			 "/pkg %s", pkgver);
2312	}
2313err:
2314	kfree(pkgbuf);
2315}
2316
2317static int bnxt_get_eeprom(struct net_device *dev,
2318			   struct ethtool_eeprom *eeprom,
2319			   u8 *data)
2320{
2321	u32 index;
2322	u32 offset;
2323
2324	if (eeprom->offset == 0) /* special offset value to get directory */
2325		return bnxt_get_nvram_directory(dev, eeprom->len, data);
2326
2327	index = eeprom->offset >> 24;
2328	offset = eeprom->offset & 0xffffff;
2329
2330	if (index == 0) {
2331		netdev_err(dev, "unsupported index value: %d\n", index);
2332		return -EINVAL;
2333	}
2334
2335	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2336}
2337
2338static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2339{
 
2340	struct bnxt *bp = netdev_priv(dev);
2341	struct hwrm_nvm_erase_dir_entry_input req = {0};
 
 
 
 
2342
2343	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
2344	req.dir_idx = cpu_to_le16(index);
2345	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2346}
2347
2348static int bnxt_set_eeprom(struct net_device *dev,
2349			   struct ethtool_eeprom *eeprom,
2350			   u8 *data)
2351{
2352	struct bnxt *bp = netdev_priv(dev);
2353	u8 index, dir_op;
2354	u16 type, ext, ordinal, attr;
2355
2356	if (!BNXT_PF(bp)) {
2357		netdev_err(dev, "NVM write not supported from a virtual function\n");
2358		return -EINVAL;
2359	}
2360
2361	type = eeprom->magic >> 16;
2362
2363	if (type == 0xffff) { /* special value for directory operations */
2364		index = eeprom->magic & 0xff;
2365		dir_op = eeprom->magic >> 8;
2366		if (index == 0)
2367			return -EINVAL;
2368		switch (dir_op) {
2369		case 0x0e: /* erase */
2370			if (eeprom->offset != ~eeprom->magic)
2371				return -EINVAL;
2372			return bnxt_erase_nvram_directory(dev, index - 1);
2373		default:
2374			return -EINVAL;
2375		}
2376	}
2377
2378	/* Create or re-write an NVM item: */
2379	if (bnxt_dir_type_is_executable(type) == true)
2380		return -EOPNOTSUPP;
2381	ext = eeprom->magic & 0xffff;
2382	ordinal = eeprom->offset >> 16;
2383	attr = eeprom->offset & 0xffff;
2384
2385	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2386				eeprom->len);
2387}
2388
2389static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2390{
2391	struct bnxt *bp = netdev_priv(dev);
2392	struct ethtool_eee *eee = &bp->eee;
2393	struct bnxt_link_info *link_info = &bp->link_info;
2394	u32 advertising =
2395		 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2396	int rc = 0;
2397
2398	if (!BNXT_SINGLE_PF(bp))
2399		return -EOPNOTSUPP;
2400
2401	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2402		return -EOPNOTSUPP;
2403
 
 
2404	if (!edata->eee_enabled)
2405		goto eee_ok;
2406
2407	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2408		netdev_warn(dev, "EEE requires autoneg\n");
2409		return -EINVAL;
 
2410	}
2411	if (edata->tx_lpi_enabled) {
2412		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2413				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2414			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2415				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2416			return -EINVAL;
 
2417		} else if (!bp->lpi_tmr_hi) {
2418			edata->tx_lpi_timer = eee->tx_lpi_timer;
2419		}
2420	}
2421	if (!edata->advertised) {
2422		edata->advertised = advertising & eee->supported;
2423	} else if (edata->advertised & ~advertising) {
2424		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2425			    edata->advertised, advertising);
2426		return -EINVAL;
 
2427	}
2428
2429	eee->advertised = edata->advertised;
2430	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2431	eee->tx_lpi_timer = edata->tx_lpi_timer;
2432eee_ok:
2433	eee->eee_enabled = edata->eee_enabled;
2434
2435	if (netif_running(dev))
2436		rc = bnxt_hwrm_set_link_setting(bp, false, true);
2437
 
 
2438	return rc;
2439}
2440
2441static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2442{
2443	struct bnxt *bp = netdev_priv(dev);
2444
2445	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2446		return -EOPNOTSUPP;
2447
2448	*edata = bp->eee;
2449	if (!bp->eee.eee_enabled) {
2450		/* Preserve tx_lpi_timer so that the last value will be used
2451		 * by default when it is re-enabled.
2452		 */
2453		edata->advertised = 0;
2454		edata->tx_lpi_enabled = 0;
2455	}
2456
2457	if (!bp->eee.eee_active)
2458		edata->lp_advertised = 0;
2459
2460	return 0;
2461}
2462
2463static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2464					    u16 page_number, u16 start_addr,
2465					    u16 data_length, u8 *buf)
 
2466{
2467	struct hwrm_port_phy_i2c_read_input req = {0};
2468	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2469	int rc, byte_offset = 0;
2470
2471	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2472	req.i2c_slave_addr = i2c_addr;
2473	req.page_number = cpu_to_le16(page_number);
2474	req.port_id = cpu_to_le16(bp->pf.port_id);
 
 
 
 
2475	do {
2476		u16 xfer_size;
2477
2478		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2479		data_length -= xfer_size;
2480		req.page_offset = cpu_to_le16(start_addr + byte_offset);
2481		req.data_length = xfer_size;
2482		req.enables = cpu_to_le32(start_addr + byte_offset ?
2483				 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2484		mutex_lock(&bp->hwrm_cmd_lock);
2485		rc = _hwrm_send_message(bp, &req, sizeof(req),
2486					HWRM_CMD_TIMEOUT);
 
 
 
2487		if (!rc)
2488			memcpy(buf + byte_offset, output->data, xfer_size);
2489		mutex_unlock(&bp->hwrm_cmd_lock);
2490		byte_offset += xfer_size;
2491	} while (!rc && data_length > 0);
 
2492
2493	return rc;
2494}
2495
2496static int bnxt_get_module_info(struct net_device *dev,
2497				struct ethtool_modinfo *modinfo)
2498{
2499	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
2500	struct bnxt *bp = netdev_priv(dev);
2501	int rc;
2502
2503	/* No point in going further if phy status indicates
2504	 * module is not inserted or if it is powered down or
2505	 * if it is of type 10GBase-T
2506	 */
2507	if (bp->link_info.module_status >
2508		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2509		return -EOPNOTSUPP;
2510
2511	/* This feature is not supported in older firmware versions */
2512	if (bp->hwrm_spec_code < 0x10202)
2513		return -EOPNOTSUPP;
2514
2515	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
2516					      SFF_DIAG_SUPPORT_OFFSET + 1,
2517					      data);
2518	if (!rc) {
2519		u8 module_id = data[0];
2520		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
2521
2522		switch (module_id) {
2523		case SFF_MODULE_ID_SFP:
2524			modinfo->type = ETH_MODULE_SFF_8472;
2525			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2526			if (!diag_supported)
2527				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2528			break;
2529		case SFF_MODULE_ID_QSFP:
2530		case SFF_MODULE_ID_QSFP_PLUS:
2531			modinfo->type = ETH_MODULE_SFF_8436;
2532			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2533			break;
2534		case SFF_MODULE_ID_QSFP28:
2535			modinfo->type = ETH_MODULE_SFF_8636;
2536			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2537			break;
2538		default:
2539			rc = -EOPNOTSUPP;
2540			break;
2541		}
2542	}
2543	return rc;
2544}
2545
2546static int bnxt_get_module_eeprom(struct net_device *dev,
2547				  struct ethtool_eeprom *eeprom,
2548				  u8 *data)
2549{
2550	struct bnxt *bp = netdev_priv(dev);
2551	u16  start = eeprom->offset, length = eeprom->len;
2552	int rc = 0;
2553
2554	memset(data, 0, eeprom->len);
2555
2556	/* Read A0 portion of the EEPROM */
2557	if (start < ETH_MODULE_SFF_8436_LEN) {
2558		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2559			length = ETH_MODULE_SFF_8436_LEN - start;
2560		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2561						      start, length, data);
2562		if (rc)
2563			return rc;
2564		start += length;
2565		data += length;
2566		length = eeprom->len - length;
2567	}
2568
2569	/* Read A2 portion of the EEPROM */
2570	if (length) {
2571		start -= ETH_MODULE_SFF_8436_LEN;
2572		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2573						      start, length, data);
2574	}
2575	return rc;
2576}
2577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2578static int bnxt_nway_reset(struct net_device *dev)
2579{
2580	int rc = 0;
2581
2582	struct bnxt *bp = netdev_priv(dev);
2583	struct bnxt_link_info *link_info = &bp->link_info;
2584
2585	if (!BNXT_SINGLE_PF(bp))
2586		return -EOPNOTSUPP;
2587
2588	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2589		return -EINVAL;
2590
2591	if (netif_running(dev))
2592		rc = bnxt_hwrm_set_link_setting(bp, true, false);
2593
2594	return rc;
2595}
2596
2597static int bnxt_set_phys_id(struct net_device *dev,
2598			    enum ethtool_phys_id_state state)
2599{
2600	struct hwrm_port_led_cfg_input req = {0};
2601	struct bnxt *bp = netdev_priv(dev);
2602	struct bnxt_pf_info *pf = &bp->pf;
2603	struct bnxt_led_cfg *led_cfg;
2604	u8 led_state;
2605	__le16 duration;
2606	int i, rc;
2607
2608	if (!bp->num_leds || BNXT_VF(bp))
2609		return -EOPNOTSUPP;
2610
2611	if (state == ETHTOOL_ID_ACTIVE) {
2612		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2613		duration = cpu_to_le16(500);
2614	} else if (state == ETHTOOL_ID_INACTIVE) {
2615		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2616		duration = cpu_to_le16(0);
2617	} else {
2618		return -EINVAL;
2619	}
2620	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2621	req.port_id = cpu_to_le16(pf->port_id);
2622	req.num_leds = bp->num_leds;
2623	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
 
 
 
2624	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2625		req.enables |= BNXT_LED_DFLT_ENABLES(i);
2626		led_cfg->led_id = bp->leds[i].led_id;
2627		led_cfg->led_state = led_state;
2628		led_cfg->led_blink_on = duration;
2629		led_cfg->led_blink_off = duration;
2630		led_cfg->led_group_id = bp->leds[i].led_group_id;
2631	}
2632	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2633	return rc;
2634}
2635
2636static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
2637{
2638	struct hwrm_selftest_irq_input req = {0};
 
2639
2640	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
2641	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
 
2642}
2643
2644static int bnxt_test_irq(struct bnxt *bp)
2645{
2646	int i;
2647
2648	for (i = 0; i < bp->cp_nr_rings; i++) {
2649		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
2650		int rc;
2651
2652		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
2653		if (rc)
2654			return rc;
2655	}
2656	return 0;
2657}
2658
2659static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
2660{
2661	struct hwrm_port_mac_cfg_input req = {0};
 
2662
2663	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
 
 
2664
2665	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
2666	if (enable)
2667		req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
2668	else
2669		req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
2670	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2671}
2672
2673static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
2674{
2675	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2676	struct hwrm_port_phy_qcaps_input req = {0};
2677	int rc;
2678
2679	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
2680	mutex_lock(&bp->hwrm_cmd_lock);
2681	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
 
2682	if (!rc)
2683		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
2684
2685	mutex_unlock(&bp->hwrm_cmd_lock);
2686	return rc;
2687}
2688
2689static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
2690				    struct hwrm_port_phy_cfg_input *req)
2691{
2692	struct bnxt_link_info *link_info = &bp->link_info;
2693	u16 fw_advertising;
2694	u16 fw_speed;
2695	int rc;
2696
2697	if (!link_info->autoneg)
 
2698		return 0;
2699
2700	rc = bnxt_query_force_speeds(bp, &fw_advertising);
2701	if (rc)
2702		return rc;
2703
2704	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2705	if (netif_carrier_ok(bp->dev))
2706		fw_speed = bp->link_info.link_speed;
2707	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
2708		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2709	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
2710		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2711	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
2712		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2713	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
2714		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2715
2716	req->force_link_speed = cpu_to_le16(fw_speed);
2717	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
2718				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2719	rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
2720	req->flags = 0;
2721	req->force_link_speed = cpu_to_le16(0);
2722	return rc;
2723}
2724
2725static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
2726{
2727	struct hwrm_port_phy_cfg_input req = {0};
 
 
 
 
 
2728
2729	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
 
2730
2731	if (enable) {
2732		bnxt_disable_an_for_lpbk(bp, &req);
2733		if (ext)
2734			req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
2735		else
2736			req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
2737	} else {
2738		req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
2739	}
2740	req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
2741	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 
2742}
2743
2744static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2745			    u32 raw_cons, int pkt_size)
2746{
2747	struct bnxt_napi *bnapi = cpr->bnapi;
2748	struct bnxt_rx_ring_info *rxr;
2749	struct bnxt_sw_rx_bd *rx_buf;
2750	struct rx_cmp *rxcmp;
2751	u16 cp_cons, cons;
2752	u8 *data;
2753	u32 len;
2754	int i;
2755
2756	rxr = bnapi->rx_ring;
2757	cp_cons = RING_CMP(raw_cons);
2758	rxcmp = (struct rx_cmp *)
2759		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2760	cons = rxcmp->rx_cmp_opaque;
2761	rx_buf = &rxr->rx_buf_ring[cons];
2762	data = rx_buf->data_ptr;
2763	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
2764	if (len != pkt_size)
2765		return -EIO;
2766	i = ETH_ALEN;
2767	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
2768		return -EIO;
2769	i += ETH_ALEN;
2770	for (  ; i < pkt_size; i++) {
2771		if (data[i] != (u8)(i & 0xff))
2772			return -EIO;
2773	}
2774	return 0;
2775}
2776
2777static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2778			      int pkt_size)
2779{
2780	struct tx_cmp *txcmp;
2781	int rc = -EIO;
2782	u32 raw_cons;
2783	u32 cons;
2784	int i;
2785
2786	raw_cons = cpr->cp_raw_cons;
2787	for (i = 0; i < 200; i++) {
2788		cons = RING_CMP(raw_cons);
2789		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2790
2791		if (!TX_CMP_VALID(txcmp, raw_cons)) {
2792			udelay(5);
2793			continue;
2794		}
2795
2796		/* The valid test of the entry must be done first before
2797		 * reading any further.
2798		 */
2799		dma_rmb();
2800		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
 
2801			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
2802			raw_cons = NEXT_RAW_CMP(raw_cons);
2803			raw_cons = NEXT_RAW_CMP(raw_cons);
2804			break;
2805		}
2806		raw_cons = NEXT_RAW_CMP(raw_cons);
2807	}
2808	cpr->cp_raw_cons = raw_cons;
2809	return rc;
2810}
2811
2812static int bnxt_run_loopback(struct bnxt *bp)
2813{
2814	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
2815	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
2816	struct bnxt_cp_ring_info *cpr;
2817	int pkt_size, i = 0;
2818	struct sk_buff *skb;
2819	dma_addr_t map;
2820	u8 *data;
2821	int rc;
2822
2823	cpr = &rxr->bnapi->cp_ring;
2824	if (bp->flags & BNXT_FLAG_CHIP_P5)
2825		cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
2826	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
2827	skb = netdev_alloc_skb(bp->dev, pkt_size);
2828	if (!skb)
2829		return -ENOMEM;
2830	data = skb_put(skb, pkt_size);
2831	eth_broadcast_addr(data);
2832	i += ETH_ALEN;
2833	ether_addr_copy(&data[i], bp->dev->dev_addr);
2834	i += ETH_ALEN;
2835	for ( ; i < pkt_size; i++)
2836		data[i] = (u8)(i & 0xff);
2837
2838	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
2839			     PCI_DMA_TODEVICE);
2840	if (dma_mapping_error(&bp->pdev->dev, map)) {
2841		dev_kfree_skb(skb);
2842		return -EIO;
2843	}
2844	bnxt_xmit_bd(bp, txr, map, pkt_size);
2845
2846	/* Sync BD data before updating doorbell */
2847	wmb();
2848
2849	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
2850	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
2851
2852	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
2853	dev_kfree_skb(skb);
2854	return rc;
2855}
2856
2857static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
2858{
2859	struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
2860	struct hwrm_selftest_exec_input req = {0};
2861	int rc;
2862
2863	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
2864	mutex_lock(&bp->hwrm_cmd_lock);
2865	resp->test_success = 0;
2866	req.flags = test_mask;
2867	rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
 
 
 
 
2868	*test_results = resp->test_success;
2869	mutex_unlock(&bp->hwrm_cmd_lock);
2870	return rc;
2871}
2872
2873#define BNXT_DRV_TESTS			4
2874#define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
2875#define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
2876#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
2877#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
2878
2879static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2880			   u64 *buf)
2881{
2882	struct bnxt *bp = netdev_priv(dev);
2883	bool do_ext_lpbk = false;
2884	bool offline = false;
2885	u8 test_results = 0;
2886	u8 test_mask = 0;
2887	int rc = 0, i;
2888
2889	if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
2890		return;
2891	memset(buf, 0, sizeof(u64) * bp->num_tests);
2892	if (!netif_running(dev)) {
2893		etest->flags |= ETH_TEST_FL_FAILED;
2894		return;
2895	}
2896
2897	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
2898	    (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
2899		do_ext_lpbk = true;
2900
2901	if (etest->flags & ETH_TEST_FL_OFFLINE) {
2902		if (bp->pf.active_vfs) {
2903			etest->flags |= ETH_TEST_FL_FAILED;
2904			netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
2905			return;
2906		}
2907		offline = true;
2908	}
2909
2910	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2911		u8 bit_val = 1 << i;
2912
2913		if (!(bp->test_info->offline_mask & bit_val))
2914			test_mask |= bit_val;
2915		else if (offline)
2916			test_mask |= bit_val;
2917	}
2918	if (!offline) {
2919		bnxt_run_fw_tests(bp, test_mask, &test_results);
2920	} else {
2921		rc = bnxt_close_nic(bp, false, false);
2922		if (rc)
2923			return;
2924		bnxt_run_fw_tests(bp, test_mask, &test_results);
2925
2926		buf[BNXT_MACLPBK_TEST_IDX] = 1;
2927		bnxt_hwrm_mac_loopback(bp, true);
2928		msleep(250);
2929		rc = bnxt_half_open_nic(bp);
2930		if (rc) {
2931			bnxt_hwrm_mac_loopback(bp, false);
2932			etest->flags |= ETH_TEST_FL_FAILED;
 
2933			return;
2934		}
2935		if (bnxt_run_loopback(bp))
2936			etest->flags |= ETH_TEST_FL_FAILED;
2937		else
2938			buf[BNXT_MACLPBK_TEST_IDX] = 0;
2939
2940		bnxt_hwrm_mac_loopback(bp, false);
2941		bnxt_hwrm_phy_loopback(bp, true, false);
2942		msleep(1000);
2943		if (bnxt_run_loopback(bp)) {
2944			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
2945			etest->flags |= ETH_TEST_FL_FAILED;
2946		}
2947		if (do_ext_lpbk) {
2948			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2949			bnxt_hwrm_phy_loopback(bp, true, true);
2950			msleep(1000);
2951			if (bnxt_run_loopback(bp)) {
2952				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
2953				etest->flags |= ETH_TEST_FL_FAILED;
2954			}
2955		}
2956		bnxt_hwrm_phy_loopback(bp, false, false);
2957		bnxt_half_close_nic(bp);
2958		rc = bnxt_open_nic(bp, false, true);
 
2959	}
2960	if (rc || bnxt_test_irq(bp)) {
2961		buf[BNXT_IRQ_TEST_IDX] = 1;
2962		etest->flags |= ETH_TEST_FL_FAILED;
2963	}
2964	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2965		u8 bit_val = 1 << i;
2966
2967		if ((test_mask & bit_val) && !(test_results & bit_val)) {
2968			buf[i] = 1;
2969			etest->flags |= ETH_TEST_FL_FAILED;
2970		}
2971	}
2972}
2973
2974static int bnxt_reset(struct net_device *dev, u32 *flags)
2975{
2976	struct bnxt *bp = netdev_priv(dev);
2977	int rc = 0;
 
 
 
 
2978
2979	if (!BNXT_PF(bp)) {
2980		netdev_err(dev, "Reset is not supported from a VF\n");
2981		return -EOPNOTSUPP;
2982	}
2983
2984	if (pci_vfs_assigned(bp->pdev)) {
 
2985		netdev_err(dev,
2986			   "Reset not allowed when VFs are assigned to VMs\n");
2987		return -EBUSY;
2988	}
2989
2990	if (*flags == ETH_RESET_ALL) {
2991		/* This feature is not supported in older firmware versions */
2992		if (bp->hwrm_spec_code < 0x10803)
2993			return -EOPNOTSUPP;
2994
2995		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
2996		if (!rc) {
2997			netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
2998			*flags = 0;
 
 
2999		}
3000	} else if (*flags == ETH_RESET_AP) {
3001		/* This feature is not supported in older firmware versions */
3002		if (bp->hwrm_spec_code < 0x10803)
3003			return -EOPNOTSUPP;
3004
3005		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
3006		if (!rc) {
3007			netdev_info(dev, "Reset Application Processor request successful.\n");
3008			*flags = 0;
3009		}
3010	} else {
3011		rc = -EINVAL;
3012	}
3013
3014	return rc;
3015}
3016
3017static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
3018				  struct bnxt_hwrm_dbg_dma_info *info)
3019{
3020	struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
3021	struct hwrm_dbg_cmn_input *cmn_req = msg;
3022	__le16 *seq_ptr = msg + info->seq_off;
3023	u16 seq = 0, len, segs_off;
3024	void *resp = cmn_resp;
3025	dma_addr_t dma_handle;
3026	int rc, off = 0;
3027	void *dma_buf;
3028
3029	dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
3030				     GFP_KERNEL);
3031	if (!dma_buf)
3032		return -ENOMEM;
3033
3034	segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
3035			    total_segments);
3036	cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
3037	cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
3038	mutex_lock(&bp->hwrm_cmd_lock);
3039	while (1) {
3040		*seq_ptr = cpu_to_le16(seq);
3041		rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
3042		if (rc)
3043			break;
3044
3045		len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
3046		if (!seq &&
3047		    cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
3048			info->segs = le16_to_cpu(*((__le16 *)(resp +
3049							      segs_off)));
3050			if (!info->segs) {
3051				rc = -EIO;
3052				break;
3053			}
3054
3055			info->dest_buf_size = info->segs *
3056					sizeof(struct coredump_segment_record);
3057			info->dest_buf = kmalloc(info->dest_buf_size,
3058						 GFP_KERNEL);
3059			if (!info->dest_buf) {
3060				rc = -ENOMEM;
3061				break;
3062			}
 
 
3063		}
3064
3065		if (info->dest_buf)
3066			memcpy(info->dest_buf + off, dma_buf, len);
3067
3068		if (cmn_req->req_type ==
3069				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
3070			info->dest_buf_size += len;
3071
3072		if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
3073			break;
3074
3075		seq++;
3076		off += len;
3077	}
3078	mutex_unlock(&bp->hwrm_cmd_lock);
3079	dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
3080	return rc;
3081}
3082
3083static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
3084				       struct bnxt_coredump *coredump)
3085{
3086	struct hwrm_dbg_coredump_list_input req = {0};
3087	struct bnxt_hwrm_dbg_dma_info info = {NULL};
3088	int rc;
3089
3090	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
3091
3092	info.dma_len = COREDUMP_LIST_BUF_LEN;
3093	info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
3094	info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
3095				     data_len);
3096
3097	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3098	if (!rc) {
3099		coredump->data = info.dest_buf;
3100		coredump->data_size = info.dest_buf_size;
3101		coredump->total_segs = info.segs;
3102	}
3103	return rc;
3104}
3105
3106static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
3107					   u16 segment_id)
3108{
3109	struct hwrm_dbg_coredump_initiate_input req = {0};
3110
3111	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
3112	req.component_id = cpu_to_le16(component_id);
3113	req.segment_id = cpu_to_le16(segment_id);
3114
3115	return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
3116}
3117
3118static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
3119					   u16 segment_id, u32 *seg_len,
3120					   void *buf, u32 offset)
3121{
3122	struct hwrm_dbg_coredump_retrieve_input req = {0};
3123	struct bnxt_hwrm_dbg_dma_info info = {NULL};
3124	int rc;
3125
3126	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
3127	req.component_id = cpu_to_le16(component_id);
3128	req.segment_id = cpu_to_le16(segment_id);
3129
3130	info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
3131	info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
3132				seq_no);
3133	info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
3134				     data_len);
3135	if (buf)
3136		info.dest_buf = buf + offset;
3137
3138	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3139	if (!rc)
3140		*seg_len = info.dest_buf_size;
3141
3142	return rc;
3143}
3144
3145static void
3146bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
3147			   struct bnxt_coredump_segment_hdr *seg_hdr,
3148			   struct coredump_segment_record *seg_rec, u32 seg_len,
3149			   int status, u32 duration, u32 instance)
3150{
3151	memset(seg_hdr, 0, sizeof(*seg_hdr));
3152	memcpy(seg_hdr->signature, "sEgM", 4);
3153	if (seg_rec) {
3154		seg_hdr->component_id = (__force __le32)seg_rec->component_id;
3155		seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
3156		seg_hdr->low_version = seg_rec->version_low;
3157		seg_hdr->high_version = seg_rec->version_hi;
3158	} else {
3159		/* For hwrm_ver_get response Component id = 2
3160		 * and Segment id = 0
3161		 */
3162		seg_hdr->component_id = cpu_to_le32(2);
3163		seg_hdr->segment_id = 0;
3164	}
3165	seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
3166	seg_hdr->length = cpu_to_le32(seg_len);
3167	seg_hdr->status = cpu_to_le32(status);
3168	seg_hdr->duration = cpu_to_le32(duration);
3169	seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
3170	seg_hdr->instance = cpu_to_le32(instance);
3171}
3172
3173static void
3174bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
3175			  time64_t start, s16 start_utc, u16 total_segs,
3176			  int status)
3177{
3178	time64_t end = ktime_get_real_seconds();
3179	u32 os_ver_major = 0, os_ver_minor = 0;
3180	struct tm tm;
3181
3182	time64_to_tm(start, 0, &tm);
3183	memset(record, 0, sizeof(*record));
3184	memcpy(record->signature, "cOrE", 4);
3185	record->flags = 0;
3186	record->low_version = 0;
3187	record->high_version = 1;
3188	record->asic_state = 0;
3189	strlcpy(record->system_name, utsname()->nodename,
3190		sizeof(record->system_name));
3191	record->year = cpu_to_le16(tm.tm_year + 1900);
3192	record->month = cpu_to_le16(tm.tm_mon + 1);
3193	record->day = cpu_to_le16(tm.tm_mday);
3194	record->hour = cpu_to_le16(tm.tm_hour);
3195	record->minute = cpu_to_le16(tm.tm_min);
3196	record->second = cpu_to_le16(tm.tm_sec);
3197	record->utc_bias = cpu_to_le16(start_utc);
3198	strcpy(record->commandline, "ethtool -w");
3199	record->total_segments = cpu_to_le32(total_segs);
3200
3201	sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
3202	record->os_ver_major = cpu_to_le32(os_ver_major);
3203	record->os_ver_minor = cpu_to_le32(os_ver_minor);
3204
3205	strlcpy(record->os_name, utsname()->sysname, 32);
3206	time64_to_tm(end, 0, &tm);
3207	record->end_year = cpu_to_le16(tm.tm_year + 1900);
3208	record->end_month = cpu_to_le16(tm.tm_mon + 1);
3209	record->end_day = cpu_to_le16(tm.tm_mday);
3210	record->end_hour = cpu_to_le16(tm.tm_hour);
3211	record->end_minute = cpu_to_le16(tm.tm_min);
3212	record->end_second = cpu_to_le16(tm.tm_sec);
3213	record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
3214	record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
3215				       bp->ver_resp.chip_rev << 8 |
3216				       bp->ver_resp.chip_metal);
3217	record->asic_id2 = 0;
3218	record->coredump_status = cpu_to_le32(status);
3219	record->ioctl_low_version = 0;
3220	record->ioctl_high_version = 0;
3221}
3222
3223static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
3224{
3225	u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
3226	struct coredump_segment_record *seg_record = NULL;
3227	u32 offset = 0, seg_hdr_len, seg_record_len;
3228	struct bnxt_coredump_segment_hdr seg_hdr;
3229	struct bnxt_coredump coredump = {NULL};
3230	time64_t start_time;
3231	u16 start_utc;
3232	int rc = 0, i;
3233
3234	start_time = ktime_get_real_seconds();
3235	start_utc = sys_tz.tz_minuteswest * 60;
3236	seg_hdr_len = sizeof(seg_hdr);
3237
3238	/* First segment should be hwrm_ver_get response */
3239	*dump_len = seg_hdr_len + ver_get_resp_len;
3240	if (buf) {
3241		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
3242					   0, 0, 0);
3243		memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3244		offset += seg_hdr_len;
3245		memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
3246		offset += ver_get_resp_len;
3247	}
3248
3249	rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
3250	if (rc) {
3251		netdev_err(bp->dev, "Failed to get coredump segment list\n");
3252		goto err;
3253	}
3254
3255	*dump_len += seg_hdr_len * coredump.total_segs;
3256
3257	seg_record = (struct coredump_segment_record *)coredump.data;
3258	seg_record_len = sizeof(*seg_record);
3259
3260	for (i = 0; i < coredump.total_segs; i++) {
3261		u16 comp_id = le16_to_cpu(seg_record->component_id);
3262		u16 seg_id = le16_to_cpu(seg_record->segment_id);
3263		u32 duration = 0, seg_len = 0;
3264		unsigned long start, end;
3265
3266		start = jiffies;
3267
3268		rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
3269		if (rc) {
3270			netdev_err(bp->dev,
3271				   "Failed to initiate coredump for seg = %d\n",
3272				   seg_record->segment_id);
3273			goto next_seg;
3274		}
3275
3276		/* Write segment data into the buffer */
3277		rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
3278						     &seg_len, buf,
3279						     offset + seg_hdr_len);
3280		if (rc)
3281			netdev_err(bp->dev,
3282				   "Failed to retrieve coredump for seg = %d\n",
3283				   seg_record->segment_id);
3284
3285next_seg:
3286		end = jiffies;
3287		duration = jiffies_to_msecs(end - start);
3288		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
3289					   rc, duration, 0);
3290
3291		if (buf) {
3292			/* Write segment header into the buffer */
3293			memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3294			offset += seg_hdr_len + seg_len;
3295		}
3296
3297		*dump_len += seg_len;
3298		seg_record =
3299			(struct coredump_segment_record *)((u8 *)seg_record +
3300							   seg_record_len);
3301	}
3302
3303err:
3304	if (buf)
3305		bnxt_fill_coredump_record(bp, buf + offset, start_time,
3306					  start_utc, coredump.total_segs + 1,
3307					  rc);
3308	kfree(coredump.data);
3309	*dump_len += sizeof(struct bnxt_coredump_record);
3310
3311	return rc;
3312}
3313
3314static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3315{
3316	struct bnxt *bp = netdev_priv(dev);
3317
3318	if (bp->hwrm_spec_code < 0x10801)
3319		return -EOPNOTSUPP;
3320
3321	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3322			bp->ver_resp.hwrm_fw_min_8b << 16 |
3323			bp->ver_resp.hwrm_fw_bld_8b << 8 |
3324			bp->ver_resp.hwrm_fw_rsvd_8b;
3325
3326	return bnxt_get_coredump(bp, NULL, &dump->len);
 
 
3327}
3328
3329static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3330			      void *buf)
3331{
3332	struct bnxt *bp = netdev_priv(dev);
3333
3334	if (bp->hwrm_spec_code < 0x10801)
3335		return -EOPNOTSUPP;
3336
3337	memset(buf, 0, dump->len);
3338
3339	return bnxt_get_coredump(bp, buf, &dump->len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3340}
3341
3342void bnxt_ethtool_init(struct bnxt *bp)
3343{
3344	struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
3345	struct hwrm_selftest_qlist_input req = {0};
3346	struct bnxt_test_info *test_info;
3347	struct net_device *dev = bp->dev;
3348	int i, rc;
3349
3350	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3351		bnxt_get_pkgver(dev);
3352
3353	bp->num_tests = 0;
3354	if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
3355		return;
3356
3357	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
3358	mutex_lock(&bp->hwrm_cmd_lock);
3359	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3360	if (rc)
3361		goto ethtool_init_exit;
3362
3363	test_info = bp->test_info;
3364	if (!test_info)
3365		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3366	if (!test_info)
 
 
 
 
 
 
 
 
 
 
3367		goto ethtool_init_exit;
3368
3369	bp->test_info = test_info;
3370	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3371	if (bp->num_tests > BNXT_MAX_TEST)
3372		bp->num_tests = BNXT_MAX_TEST;
3373
3374	test_info->offline_mask = resp->offline_tests;
3375	test_info->timeout = le16_to_cpu(resp->test_timeout);
3376	if (!test_info->timeout)
3377		test_info->timeout = HWRM_CMD_TIMEOUT;
3378	for (i = 0; i < bp->num_tests; i++) {
3379		char *str = test_info->string[i];
3380		char *fw_str = resp->test0_name + i * 32;
3381
3382		if (i == BNXT_MACLPBK_TEST_IDX) {
3383			strcpy(str, "Mac loopback test (offline)");
3384		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
3385			strcpy(str, "Phy loopback test (offline)");
3386		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
3387			strcpy(str, "Ext loopback test (offline)");
3388		} else if (i == BNXT_IRQ_TEST_IDX) {
3389			strcpy(str, "Interrupt_test (offline)");
3390		} else {
3391			strlcpy(str, fw_str, ETH_GSTRING_LEN);
3392			strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
3393			if (test_info->offline_mask & (1 << i))
3394				strncat(str, " (offline)",
3395					ETH_GSTRING_LEN - strlen(str));
3396			else
3397				strncat(str, " (online)",
3398					ETH_GSTRING_LEN - strlen(str));
3399		}
3400	}
3401
3402ethtool_init_exit:
3403	mutex_unlock(&bp->hwrm_cmd_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3404}
3405
3406void bnxt_ethtool_free(struct bnxt *bp)
3407{
3408	kfree(bp->test_info);
3409	bp->test_info = NULL;
3410}
3411
3412const struct ethtool_ops bnxt_ethtool_ops = {
 
 
 
 
 
 
 
 
3413	.get_link_ksettings	= bnxt_get_link_ksettings,
3414	.set_link_ksettings	= bnxt_set_link_ksettings,
 
 
 
 
3415	.get_pauseparam		= bnxt_get_pauseparam,
3416	.set_pauseparam		= bnxt_set_pauseparam,
3417	.get_drvinfo		= bnxt_get_drvinfo,
 
 
3418	.get_wol		= bnxt_get_wol,
3419	.set_wol		= bnxt_set_wol,
3420	.get_coalesce		= bnxt_get_coalesce,
3421	.set_coalesce		= bnxt_set_coalesce,
3422	.get_msglevel		= bnxt_get_msglevel,
3423	.set_msglevel		= bnxt_set_msglevel,
3424	.get_sset_count		= bnxt_get_sset_count,
3425	.get_strings		= bnxt_get_strings,
3426	.get_ethtool_stats	= bnxt_get_ethtool_stats,
3427	.set_ringparam		= bnxt_set_ringparam,
3428	.get_ringparam		= bnxt_get_ringparam,
3429	.get_channels		= bnxt_get_channels,
3430	.set_channels		= bnxt_set_channels,
3431	.get_rxnfc		= bnxt_get_rxnfc,
3432	.set_rxnfc		= bnxt_set_rxnfc,
3433	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
3434	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
3435	.get_rxfh               = bnxt_get_rxfh,
 
3436	.flash_device		= bnxt_flash_device,
3437	.get_eeprom_len         = bnxt_get_eeprom_len,
3438	.get_eeprom             = bnxt_get_eeprom,
3439	.set_eeprom		= bnxt_set_eeprom,
3440	.get_link		= bnxt_get_link,
 
3441	.get_eee		= bnxt_get_eee,
3442	.set_eee		= bnxt_set_eee,
3443	.get_module_info	= bnxt_get_module_info,
3444	.get_module_eeprom	= bnxt_get_module_eeprom,
 
3445	.nway_reset		= bnxt_nway_reset,
3446	.set_phys_id		= bnxt_set_phys_id,
3447	.self_test		= bnxt_self_test,
 
3448	.reset			= bnxt_reset,
 
3449	.get_dump_flag		= bnxt_get_dump_flag,
3450	.get_dump_data		= bnxt_get_dump_data,
 
 
 
 
3451};