Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*******************************************************************************
   3  STMMAC Ethtool support
   4
   5  Copyright (C) 2007-2009  STMicroelectronics Ltd
   6
 
 
 
 
 
 
 
 
 
 
 
   7
   8  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   9*******************************************************************************/
  10
  11#include <linux/etherdevice.h>
  12#include <linux/ethtool.h>
  13#include <linux/interrupt.h>
  14#include <linux/mii.h>
  15#include <linux/phylink.h>
  16#include <linux/net_tstamp.h>
  17#include <asm/io.h>
  18
  19#include "stmmac.h"
  20#include "dwmac_dma.h"
  21#include "dwxgmac2.h"
  22
  23#define REG_SPACE_SIZE	0x1060
  24#define GMAC4_REG_SPACE_SIZE	0x116C
  25#define MAC100_ETHTOOL_NAME	"st_mac100"
  26#define GMAC_ETHTOOL_NAME	"st_gmac"
  27#define XGMAC_ETHTOOL_NAME	"st_xgmac"
  28
  29/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
  30 *
  31 * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
  32 * same time due to the conflicting macro names.
  33 */
  34#define GMAC4_DMA_CHAN_BASE_ADDR  0x00001100
  35
  36#define ETHTOOL_DMA_OFFSET	55
  37
  38struct stmmac_stats {
  39	char stat_string[ETH_GSTRING_LEN];
  40	int sizeof_stat;
  41	int stat_offset;
  42};
  43
  44#define STMMAC_STAT(m)	\
  45	{ #m, sizeof_field(struct stmmac_extra_stats, m),	\
  46	offsetof(struct stmmac_priv, xstats.m)}
  47
  48static const struct stmmac_stats stmmac_gstrings_stats[] = {
  49	/* Transmit errors */
  50	STMMAC_STAT(tx_underflow),
  51	STMMAC_STAT(tx_carrier),
  52	STMMAC_STAT(tx_losscarrier),
  53	STMMAC_STAT(vlan_tag),
  54	STMMAC_STAT(tx_deferred),
  55	STMMAC_STAT(tx_vlan),
  56	STMMAC_STAT(tx_jabber),
  57	STMMAC_STAT(tx_frame_flushed),
  58	STMMAC_STAT(tx_payload_error),
  59	STMMAC_STAT(tx_ip_header_error),
  60	/* Receive errors */
  61	STMMAC_STAT(rx_desc),
  62	STMMAC_STAT(sa_filter_fail),
  63	STMMAC_STAT(overflow_error),
  64	STMMAC_STAT(ipc_csum_error),
  65	STMMAC_STAT(rx_collision),
  66	STMMAC_STAT(rx_crc_errors),
  67	STMMAC_STAT(dribbling_bit),
  68	STMMAC_STAT(rx_length),
  69	STMMAC_STAT(rx_mii),
  70	STMMAC_STAT(rx_multicast),
  71	STMMAC_STAT(rx_gmac_overflow),
  72	STMMAC_STAT(rx_watchdog),
  73	STMMAC_STAT(da_rx_filter_fail),
  74	STMMAC_STAT(sa_rx_filter_fail),
  75	STMMAC_STAT(rx_missed_cntr),
  76	STMMAC_STAT(rx_overflow_cntr),
  77	STMMAC_STAT(rx_vlan),
  78	STMMAC_STAT(rx_split_hdr_pkt_n),
  79	/* Tx/Rx IRQ error info */
  80	STMMAC_STAT(tx_undeflow_irq),
  81	STMMAC_STAT(tx_process_stopped_irq),
  82	STMMAC_STAT(tx_jabber_irq),
  83	STMMAC_STAT(rx_overflow_irq),
  84	STMMAC_STAT(rx_buf_unav_irq),
  85	STMMAC_STAT(rx_process_stopped_irq),
  86	STMMAC_STAT(rx_watchdog_irq),
  87	STMMAC_STAT(tx_early_irq),
  88	STMMAC_STAT(fatal_bus_error_irq),
  89	/* Tx/Rx IRQ Events */
  90	STMMAC_STAT(rx_early_irq),
  91	STMMAC_STAT(threshold),
 
 
 
 
 
 
 
 
  92	STMMAC_STAT(irq_receive_pmt_irq_n),
  93	/* MMC info */
  94	STMMAC_STAT(mmc_tx_irq_n),
  95	STMMAC_STAT(mmc_rx_irq_n),
  96	STMMAC_STAT(mmc_rx_csum_offload_irq_n),
  97	/* EEE */
  98	STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
  99	STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
 100	STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
 101	STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
 102	STMMAC_STAT(phy_eee_wakeup_error_n),
 103	/* Extended RDES status */
 104	STMMAC_STAT(ip_hdr_err),
 105	STMMAC_STAT(ip_payload_err),
 106	STMMAC_STAT(ip_csum_bypassed),
 107	STMMAC_STAT(ipv4_pkt_rcvd),
 108	STMMAC_STAT(ipv6_pkt_rcvd),
 109	STMMAC_STAT(no_ptp_rx_msg_type_ext),
 110	STMMAC_STAT(ptp_rx_msg_type_sync),
 111	STMMAC_STAT(ptp_rx_msg_type_follow_up),
 112	STMMAC_STAT(ptp_rx_msg_type_delay_req),
 113	STMMAC_STAT(ptp_rx_msg_type_delay_resp),
 114	STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
 115	STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
 116	STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
 117	STMMAC_STAT(ptp_rx_msg_type_announce),
 118	STMMAC_STAT(ptp_rx_msg_type_management),
 119	STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
 120	STMMAC_STAT(ptp_frame_type),
 121	STMMAC_STAT(ptp_ver),
 122	STMMAC_STAT(timestamp_dropped),
 123	STMMAC_STAT(av_pkt_rcvd),
 124	STMMAC_STAT(av_tagged_pkt_rcvd),
 125	STMMAC_STAT(vlan_tag_priority_val),
 126	STMMAC_STAT(l3_filter_match),
 127	STMMAC_STAT(l4_filter_match),
 128	STMMAC_STAT(l3_l4_filter_no_match),
 129	/* PCS */
 130	STMMAC_STAT(irq_pcs_ane_n),
 131	STMMAC_STAT(irq_pcs_link_n),
 132	STMMAC_STAT(irq_rgmii_n),
 133	/* DEBUG */
 134	STMMAC_STAT(mtl_tx_status_fifo_full),
 135	STMMAC_STAT(mtl_tx_fifo_not_empty),
 136	STMMAC_STAT(mmtl_fifo_ctrl),
 137	STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
 138	STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
 139	STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
 140	STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
 141	STMMAC_STAT(mac_tx_in_pause),
 142	STMMAC_STAT(mac_tx_frame_ctrl_xfer),
 143	STMMAC_STAT(mac_tx_frame_ctrl_idle),
 144	STMMAC_STAT(mac_tx_frame_ctrl_wait),
 145	STMMAC_STAT(mac_tx_frame_ctrl_pause),
 146	STMMAC_STAT(mac_gmii_tx_proto_engine),
 147	STMMAC_STAT(mtl_rx_fifo_fill_level_full),
 148	STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
 149	STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
 150	STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
 151	STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
 152	STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
 153	STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
 154	STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
 155	STMMAC_STAT(mtl_rx_fifo_ctrl_active),
 156	STMMAC_STAT(mac_rx_frame_ctrl_fifo),
 157	STMMAC_STAT(mac_gmii_rx_proto_engine),
 158	/* EST */
 159	STMMAC_STAT(mtl_est_cgce),
 160	STMMAC_STAT(mtl_est_hlbs),
 161	STMMAC_STAT(mtl_est_hlbf),
 162	STMMAC_STAT(mtl_est_btre),
 163	STMMAC_STAT(mtl_est_btrlm),
 164};
 165#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
 166
 167/* statistics collected in queue which will be summed up for all TX or RX
 168 * queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n).
 169 */
 170static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = {
 171	"rx_pkt_n",
 172	"rx_normal_irq_n",
 173	"tx_pkt_n",
 174	"tx_normal_irq_n",
 175	"tx_clean",
 176	"tx_set_ic_bit",
 177	"tx_tso_frames",
 178	"tx_tso_nfrags",
 179	"normal_irq_n",
 180	"napi_poll",
 181};
 182#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string)
 183
 184/* HW MAC Management counters (if supported) */
 185#define STMMAC_MMC_STAT(m)	\
 186	{ #m, sizeof_field(struct stmmac_counters, m),	\
 187	offsetof(struct stmmac_priv, mmc.m)}
 188
 189static const struct stmmac_stats stmmac_mmc[] = {
 190	STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
 191	STMMAC_MMC_STAT(mmc_tx_framecount_gb),
 192	STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
 193	STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
 194	STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
 195	STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
 196	STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
 197	STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
 198	STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
 199	STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
 200	STMMAC_MMC_STAT(mmc_tx_unicast_gb),
 201	STMMAC_MMC_STAT(mmc_tx_multicast_gb),
 202	STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
 203	STMMAC_MMC_STAT(mmc_tx_underflow_error),
 204	STMMAC_MMC_STAT(mmc_tx_singlecol_g),
 205	STMMAC_MMC_STAT(mmc_tx_multicol_g),
 206	STMMAC_MMC_STAT(mmc_tx_deferred),
 207	STMMAC_MMC_STAT(mmc_tx_latecol),
 208	STMMAC_MMC_STAT(mmc_tx_exesscol),
 209	STMMAC_MMC_STAT(mmc_tx_carrier_error),
 210	STMMAC_MMC_STAT(mmc_tx_octetcount_g),
 211	STMMAC_MMC_STAT(mmc_tx_framecount_g),
 212	STMMAC_MMC_STAT(mmc_tx_excessdef),
 213	STMMAC_MMC_STAT(mmc_tx_pause_frame),
 214	STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
 215	STMMAC_MMC_STAT(mmc_tx_lpi_usec),
 216	STMMAC_MMC_STAT(mmc_tx_lpi_tran),
 217	STMMAC_MMC_STAT(mmc_rx_framecount_gb),
 218	STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
 219	STMMAC_MMC_STAT(mmc_rx_octetcount_g),
 220	STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
 221	STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
 222	STMMAC_MMC_STAT(mmc_rx_crc_error),
 223	STMMAC_MMC_STAT(mmc_rx_align_error),
 224	STMMAC_MMC_STAT(mmc_rx_run_error),
 225	STMMAC_MMC_STAT(mmc_rx_jabber_error),
 226	STMMAC_MMC_STAT(mmc_rx_undersize_g),
 227	STMMAC_MMC_STAT(mmc_rx_oversize_g),
 228	STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
 229	STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
 230	STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
 231	STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
 232	STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
 233	STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
 234	STMMAC_MMC_STAT(mmc_rx_unicast_g),
 235	STMMAC_MMC_STAT(mmc_rx_length_error),
 236	STMMAC_MMC_STAT(mmc_rx_autofrangetype),
 237	STMMAC_MMC_STAT(mmc_rx_pause_frames),
 238	STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
 239	STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
 240	STMMAC_MMC_STAT(mmc_rx_watchdog_error),
 241	STMMAC_MMC_STAT(mmc_rx_lpi_usec),
 242	STMMAC_MMC_STAT(mmc_rx_lpi_tran),
 243	STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
 244	STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
 245	STMMAC_MMC_STAT(mmc_rx_align_err_frames),
 246	STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
 247	STMMAC_MMC_STAT(mmc_rx_ipc_intr),
 248	STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
 249	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
 250	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
 251	STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
 252	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
 253	STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
 254	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
 255	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
 256	STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
 257	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
 258	STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
 259	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
 260	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
 261	STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
 262	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
 263	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
 264	STMMAC_MMC_STAT(mmc_rx_udp_gd),
 265	STMMAC_MMC_STAT(mmc_rx_udp_err),
 266	STMMAC_MMC_STAT(mmc_rx_tcp_gd),
 267	STMMAC_MMC_STAT(mmc_rx_tcp_err),
 268	STMMAC_MMC_STAT(mmc_rx_icmp_gd),
 269	STMMAC_MMC_STAT(mmc_rx_icmp_err),
 270	STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
 271	STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
 272	STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
 273	STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
 274	STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
 275	STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
 276	STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr),
 277	STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr),
 278	STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
 279	STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
 280	STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr),
 281	STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
 282	STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
 283	STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
 284	STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
 285};
 286#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
 287
 288static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = {
 289	"tx_pkt_n",
 290	"tx_irq_n",
 291#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string)
 292};
 293
 294static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = {
 295	"rx_pkt_n",
 296	"rx_irq_n",
 297#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string)
 298};
 299
 300static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
 301				      struct ethtool_drvinfo *info)
 302{
 303	struct stmmac_priv *priv = netdev_priv(dev);
 304
 305	if (priv->plat->has_gmac || priv->plat->has_gmac4)
 306		strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
 307	else if (priv->plat->has_xgmac)
 308		strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
 309	else
 310		strscpy(info->driver, MAC100_ETHTOOL_NAME,
 311			sizeof(info->driver));
 312
 313	if (priv->plat->pdev) {
 314		strscpy(info->bus_info, pci_name(priv->plat->pdev),
 315			sizeof(info->bus_info));
 316	}
 317}
 318
 319static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
 320					     struct ethtool_link_ksettings *cmd)
 321{
 322	struct stmmac_priv *priv = netdev_priv(dev);
 
 323
 324	if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
 325	    (priv->hw->pcs & STMMAC_PCS_RGMII ||
 326	     priv->hw->pcs & STMMAC_PCS_SGMII)) {
 327		struct rgmii_adv adv;
 328		u32 supported, advertising, lp_advertising;
 329
 330		if (!priv->xstats.pcs_link) {
 331			cmd->base.speed = SPEED_UNKNOWN;
 332			cmd->base.duplex = DUPLEX_UNKNOWN;
 333			return 0;
 334		}
 335		cmd->base.duplex = priv->xstats.pcs_duplex;
 336
 337		cmd->base.speed = priv->xstats.pcs_speed;
 338
 339		/* Get and convert ADV/LP_ADV from the HW AN registers */
 340		if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
 341			return -EOPNOTSUPP;	/* should never happen indeed */
 342
 
 
 343		/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
 344
 345		ethtool_convert_link_mode_to_legacy_u32(
 346			&supported, cmd->link_modes.supported);
 347		ethtool_convert_link_mode_to_legacy_u32(
 348			&advertising, cmd->link_modes.advertising);
 349		ethtool_convert_link_mode_to_legacy_u32(
 350			&lp_advertising, cmd->link_modes.lp_advertising);
 351
 352		if (adv.pause & STMMAC_PCS_PAUSE)
 353			advertising |= ADVERTISED_Pause;
 354		if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
 355			advertising |= ADVERTISED_Asym_Pause;
 356		if (adv.lp_pause & STMMAC_PCS_PAUSE)
 357			lp_advertising |= ADVERTISED_Pause;
 358		if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
 359			lp_advertising |= ADVERTISED_Asym_Pause;
 360
 361		/* Reg49[3] always set because ANE is always supported */
 362		cmd->base.autoneg = ADVERTISED_Autoneg;
 363		supported |= SUPPORTED_Autoneg;
 364		advertising |= ADVERTISED_Autoneg;
 365		lp_advertising |= ADVERTISED_Autoneg;
 366
 367		if (adv.duplex) {
 368			supported |= (SUPPORTED_1000baseT_Full |
 369				      SUPPORTED_100baseT_Full |
 370				      SUPPORTED_10baseT_Full);
 371			advertising |= (ADVERTISED_1000baseT_Full |
 372					ADVERTISED_100baseT_Full |
 373					ADVERTISED_10baseT_Full);
 374		} else {
 375			supported |= (SUPPORTED_1000baseT_Half |
 376				      SUPPORTED_100baseT_Half |
 377				      SUPPORTED_10baseT_Half);
 378			advertising |= (ADVERTISED_1000baseT_Half |
 379					ADVERTISED_100baseT_Half |
 380					ADVERTISED_10baseT_Half);
 381		}
 382		if (adv.lp_duplex)
 383			lp_advertising |= (ADVERTISED_1000baseT_Full |
 384					   ADVERTISED_100baseT_Full |
 385					   ADVERTISED_10baseT_Full);
 386		else
 387			lp_advertising |= (ADVERTISED_1000baseT_Half |
 388					   ADVERTISED_100baseT_Half |
 389					   ADVERTISED_10baseT_Half);
 390		cmd->base.port = PORT_OTHER;
 391
 392		ethtool_convert_legacy_u32_to_link_mode(
 393			cmd->link_modes.supported, supported);
 394		ethtool_convert_legacy_u32_to_link_mode(
 395			cmd->link_modes.advertising, advertising);
 396		ethtool_convert_legacy_u32_to_link_mode(
 397			cmd->link_modes.lp_advertising, lp_advertising);
 398
 399		return 0;
 400	}
 401
 402	return phylink_ethtool_ksettings_get(priv->phylink, cmd);
 
 
 
 
 
 
 
 
 
 
 
 403}
 404
 405static int
 406stmmac_ethtool_set_link_ksettings(struct net_device *dev,
 407				  const struct ethtool_link_ksettings *cmd)
 408{
 409	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
 
 
 410
 411	if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
 412	    (priv->hw->pcs & STMMAC_PCS_RGMII ||
 413	     priv->hw->pcs & STMMAC_PCS_SGMII)) {
 414		/* Only support ANE */
 415		if (cmd->base.autoneg != AUTONEG_ENABLE)
 416			return -EINVAL;
 417
 418		mutex_lock(&priv->lock);
 419		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
 420		mutex_unlock(&priv->lock);
 
 
 
 
 
 
 
 
 
 
 
 421
 422		return 0;
 423	}
 424
 425	return phylink_ethtool_ksettings_set(priv->phylink, cmd);
 
 
 426}
 427
 428static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
 429{
 430	struct stmmac_priv *priv = netdev_priv(dev);
 431	return priv->msg_enable;
 432}
 433
 434static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
 435{
 436	struct stmmac_priv *priv = netdev_priv(dev);
 437	priv->msg_enable = level;
 438
 439}
 440
 441static int stmmac_check_if_running(struct net_device *dev)
 442{
 443	if (!netif_running(dev))
 444		return -EBUSY;
 445	return 0;
 446}
 447
 448static int stmmac_ethtool_get_regs_len(struct net_device *dev)
 449{
 450	struct stmmac_priv *priv = netdev_priv(dev);
 451
 452	if (priv->plat->has_xgmac)
 453		return XGMAC_REGSIZE * 4;
 454	else if (priv->plat->has_gmac4)
 455		return GMAC4_REG_SPACE_SIZE;
 456	return REG_SPACE_SIZE;
 457}
 458
 459static void stmmac_ethtool_gregs(struct net_device *dev,
 460			  struct ethtool_regs *regs, void *space)
 461{
 462	struct stmmac_priv *priv = netdev_priv(dev);
 463	u32 *reg_space = (u32 *) space;
 464
 465	stmmac_dump_mac_regs(priv, priv->hw, reg_space);
 466	stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
 467
 468	/* Copy DMA registers to where ethtool expects them */
 469	if (priv->plat->has_gmac4) {
 470		/* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
 471		memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
 472		       &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
 473		       NUM_DWMAC4_DMA_REGS * 4);
 474	} else if (!priv->plat->has_xgmac) {
 475		memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
 476		       &reg_space[DMA_BUS_MODE / 4],
 477		       NUM_DWMAC1000_DMA_REGS * 4);
 478	}
 479}
 480
 481static int stmmac_nway_reset(struct net_device *dev)
 482{
 483	struct stmmac_priv *priv = netdev_priv(dev);
 484
 485	return phylink_ethtool_nway_reset(priv->phylink);
 486}
 487
 488static void stmmac_get_ringparam(struct net_device *netdev,
 489				 struct ethtool_ringparam *ring,
 490				 struct kernel_ethtool_ringparam *kernel_ring,
 491				 struct netlink_ext_ack *extack)
 492{
 493	struct stmmac_priv *priv = netdev_priv(netdev);
 494
 495	ring->rx_max_pending = DMA_MAX_RX_SIZE;
 496	ring->tx_max_pending = DMA_MAX_TX_SIZE;
 497	ring->rx_pending = priv->dma_conf.dma_rx_size;
 498	ring->tx_pending = priv->dma_conf.dma_tx_size;
 499}
 500
 501static int stmmac_set_ringparam(struct net_device *netdev,
 502				struct ethtool_ringparam *ring,
 503				struct kernel_ethtool_ringparam *kernel_ring,
 504				struct netlink_ext_ack *extack)
 505{
 506	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
 507	    ring->rx_pending < DMA_MIN_RX_SIZE ||
 508	    ring->rx_pending > DMA_MAX_RX_SIZE ||
 509	    !is_power_of_2(ring->rx_pending) ||
 510	    ring->tx_pending < DMA_MIN_TX_SIZE ||
 511	    ring->tx_pending > DMA_MAX_TX_SIZE ||
 512	    !is_power_of_2(ring->tx_pending))
 513		return -EINVAL;
 514
 515	return stmmac_reinit_ringparam(netdev, ring->rx_pending,
 516				       ring->tx_pending);
 
 
 
 517}
 518
 519static void
 520stmmac_get_pauseparam(struct net_device *netdev,
 521		      struct ethtool_pauseparam *pause)
 522{
 523	struct stmmac_priv *priv = netdev_priv(netdev);
 524	struct rgmii_adv adv_lp;
 525
 526	if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
 
 
 
 
 
 527		pause->autoneg = 1;
 
 528		if (!adv_lp.pause)
 529			return;
 530	} else {
 531		phylink_ethtool_get_pauseparam(priv->phylink, pause);
 
 
 532	}
 
 
 
 
 
 
 
 
 533}
 534
 535static int
 536stmmac_set_pauseparam(struct net_device *netdev,
 537		      struct ethtool_pauseparam *pause)
 538{
 539	struct stmmac_priv *priv = netdev_priv(netdev);
 540	struct rgmii_adv adv_lp;
 
 
 
 
 
 541
 542	if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
 543		pause->autoneg = 1;
 
 544		if (!adv_lp.pause)
 545			return -EOPNOTSUPP;
 546		return 0;
 547	} else {
 548		return phylink_ethtool_set_pauseparam(priv->phylink, pause);
 549	}
 550}
 551
 552static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
 553{
 554	u64 total;
 555	int cpu;
 556
 557	total = 0;
 558	for_each_possible_cpu(cpu) {
 559		struct stmmac_pcpu_stats *pcpu;
 560		unsigned int start;
 561		u64 irq_n;
 562
 563		pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
 564		do {
 565			start = u64_stats_fetch_begin(&pcpu->syncp);
 566			irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
 567		} while (u64_stats_fetch_retry(&pcpu->syncp, start));
 568		total += irq_n;
 569	}
 570	return total;
 571}
 572
 573static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
 574{
 575	u64 total;
 576	int cpu;
 577
 578	total = 0;
 579	for_each_possible_cpu(cpu) {
 580		struct stmmac_pcpu_stats *pcpu;
 581		unsigned int start;
 582		u64 irq_n;
 583
 584		pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
 585		do {
 586			start = u64_stats_fetch_begin(&pcpu->syncp);
 587			irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
 588		} while (u64_stats_fetch_retry(&pcpu->syncp, start));
 589		total += irq_n;
 590	}
 591	return total;
 592}
 593
 594static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
 595{
 596	u32 tx_cnt = priv->plat->tx_queues_to_use;
 597	u32 rx_cnt = priv->plat->rx_queues_to_use;
 598	unsigned int start;
 599	int q;
 600
 601	for (q = 0; q < tx_cnt; q++) {
 602		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
 603		u64 pkt_n;
 604
 605		do {
 606			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
 607			pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
 608		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
 609
 610		*data++ = pkt_n;
 611		*data++ = stmmac_get_tx_normal_irq_n(priv, q);
 
 612	}
 613
 614	for (q = 0; q < rx_cnt; q++) {
 615		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
 616		u64 pkt_n;
 617
 618		do {
 619			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
 620			pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
 621		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
 622
 623		*data++ = pkt_n;
 624		*data++ = stmmac_get_rx_normal_irq_n(priv, q);
 625	}
 626}
 627
 628static void stmmac_get_ethtool_stats(struct net_device *dev,
 629				 struct ethtool_stats *dummy, u64 *data)
 630{
 
 
 631	struct stmmac_priv *priv = netdev_priv(dev);
 632	u32 rx_queues_count = priv->plat->rx_queues_to_use;
 633	u32 tx_queues_count = priv->plat->tx_queues_to_use;
 634	u64 napi_poll = 0, normal_irq_n = 0;
 635	int i, j = 0, pos, ret;
 636	unsigned long count;
 637	unsigned int start;
 
 
 
 638
 639	if (priv->dma_cap.asp) {
 640		for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
 641			if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
 642						&count, NULL))
 643				data[j++] = count;
 644		}
 645	}
 646
 647	/* Update the DMA HW counters for dwmac10/100 */
 648	ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr);
 649	if (ret) {
 
 
 
 650		/* If supported, for new GMAC chips expose the MMC counters */
 651		if (priv->dma_cap.rmon) {
 652			stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
 653
 654			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
 655				char *p;
 656				p = (char *)priv + stmmac_mmc[i].stat_offset;
 657
 658				data[j++] = (stmmac_mmc[i].sizeof_stat ==
 659					     sizeof(u64)) ? (*(u64 *)p) :
 660					     (*(u32 *)p);
 661			}
 662		}
 663		if (priv->eee_enabled) {
 664			int val = phylink_get_eee_err(priv->phylink);
 665			if (val)
 666				priv->xstats.phy_eee_wakeup_error_n = val;
 667		}
 668
 669		if (priv->synopsys_id >= DWMAC_CORE_3_50)
 670			stmmac_mac_debug(priv, priv->ioaddr,
 671					(void *)&priv->xstats,
 672					rx_queues_count, tx_queues_count);
 
 673	}
 674	for (i = 0; i < STMMAC_STATS_LEN; i++) {
 675		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
 676		data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
 677			     sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
 678	}
 679
 680	pos = j;
 681	for (i = 0; i < rx_queues_count; i++) {
 682		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
 683		struct stmmac_napi_rx_stats snapshot;
 684		u64 n_irq;
 685
 686		j = pos;
 687		do {
 688			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
 689			snapshot = rxq_stats->napi;
 690		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
 691
 692		data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
 693		n_irq = stmmac_get_rx_normal_irq_n(priv, i);
 694		data[j++] += n_irq;
 695		normal_irq_n += n_irq;
 696		napi_poll += u64_stats_read(&snapshot.poll);
 697	}
 698
 699	pos = j;
 700	for (i = 0; i < tx_queues_count; i++) {
 701		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
 702		struct stmmac_napi_tx_stats napi_snapshot;
 703		struct stmmac_q_tx_stats q_snapshot;
 704		u64 n_irq;
 705
 706		j = pos;
 707		do {
 708			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
 709			q_snapshot = txq_stats->q;
 710		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
 711		do {
 712			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
 713			napi_snapshot = txq_stats->napi;
 714		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
 715
 716		data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
 717		n_irq = stmmac_get_tx_normal_irq_n(priv, i);
 718		data[j++] += n_irq;
 719		normal_irq_n += n_irq;
 720		data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
 721		data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
 722			u64_stats_read(&napi_snapshot.tx_set_ic_bit);
 723		data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
 724		data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
 725		napi_poll += u64_stats_read(&napi_snapshot.poll);
 726	}
 727	normal_irq_n += priv->xstats.rx_early_irq;
 728	data[j++] = normal_irq_n;
 729	data[j++] = napi_poll;
 730
 731	stmmac_get_per_qstats(priv, &data[j]);
 732}
 733
 734static int stmmac_get_sset_count(struct net_device *netdev, int sset)
 735{
 736	struct stmmac_priv *priv = netdev_priv(netdev);
 737	u32 tx_cnt = priv->plat->tx_queues_to_use;
 738	u32 rx_cnt = priv->plat->rx_queues_to_use;
 739	int i, len, safety_len = 0;
 740
 741	switch (sset) {
 742	case ETH_SS_STATS:
 743		len = STMMAC_STATS_LEN + STMMAC_QSTATS +
 744		      STMMAC_TXQ_STATS * tx_cnt +
 745		      STMMAC_RXQ_STATS * rx_cnt;
 746
 747		if (priv->dma_cap.rmon)
 748			len += STMMAC_MMC_STATS_LEN;
 749		if (priv->dma_cap.asp) {
 
 
 750			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
 751				if (!stmmac_safety_feat_dump(priv,
 752							&priv->sstats, i,
 753							NULL, NULL))
 754					safety_len++;
 755			}
 756
 757			len += safety_len;
 758		}
 759
 760		return len;
 761	case ETH_SS_TEST:
 762		return stmmac_selftest_get_count(priv);
 763	default:
 764		return -EOPNOTSUPP;
 765	}
 766}
 767
 768static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data)
 769{
 770	u32 tx_cnt = priv->plat->tx_queues_to_use;
 771	u32 rx_cnt = priv->plat->rx_queues_to_use;
 772	int q, stat;
 773
 774	for (q = 0; q < tx_cnt; q++) {
 775		for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
 776			snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
 777				 stmmac_qstats_tx_string[stat]);
 778			data += ETH_GSTRING_LEN;
 779		}
 780	}
 781	for (q = 0; q < rx_cnt; q++) {
 782		for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
 783			snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
 784				 stmmac_qstats_rx_string[stat]);
 785			data += ETH_GSTRING_LEN;
 786		}
 787	}
 788}
 789
 790static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 791{
 792	int i;
 793	u8 *p = data;
 794	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 795
 796	switch (stringset) {
 797	case ETH_SS_STATS:
 798		if (priv->dma_cap.asp) {
 
 799			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
 800				const char *desc;
 801				if (!stmmac_safety_feat_dump(priv,
 802							&priv->sstats, i,
 803							NULL, &desc)) {
 804					memcpy(p, desc, ETH_GSTRING_LEN);
 805					p += ETH_GSTRING_LEN;
 806				}
 807			}
 808		}
 809		if (priv->dma_cap.rmon)
 810			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
 811				memcpy(p, stmmac_mmc[i].stat_string,
 812				       ETH_GSTRING_LEN);
 813				p += ETH_GSTRING_LEN;
 814			}
 815		for (i = 0; i < STMMAC_STATS_LEN; i++) {
 816			memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
 817			p += ETH_GSTRING_LEN;
 818		}
 819		for (i = 0; i < STMMAC_QSTATS; i++) {
 820			memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN);
 821			p += ETH_GSTRING_LEN;
 822		}
 823		stmmac_get_qstats_string(priv, p);
 824		break;
 825	case ETH_SS_TEST:
 826		stmmac_selftest_get_strings(priv, p);
 827		break;
 828	default:
 829		WARN_ON(1);
 830		break;
 831	}
 832}
 833
 834/* Currently only support WOL through Magic packet. */
 835static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 836{
 837	struct stmmac_priv *priv = netdev_priv(dev);
 838
 839	if (!priv->plat->pmt)
 840		return phylink_ethtool_get_wol(priv->phylink, wol);
 841
 842	mutex_lock(&priv->lock);
 843	if (device_can_wakeup(priv->device)) {
 844		wol->supported = WAKE_MAGIC | WAKE_UCAST;
 845		if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
 846			wol->supported &= ~WAKE_MAGIC;
 847		wol->wolopts = priv->wolopts;
 848	}
 849	mutex_unlock(&priv->lock);
 850}
 851
 852static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 853{
 854	struct stmmac_priv *priv = netdev_priv(dev);
 855	u32 support = WAKE_MAGIC | WAKE_UCAST;
 856
 857	if (!device_can_wakeup(priv->device))
 858		return -EOPNOTSUPP;
 859
 860	if (!priv->plat->pmt) {
 861		int ret = phylink_ethtool_set_wol(priv->phylink, wol);
 862
 863		if (!ret)
 864			device_set_wakeup_enable(priv->device, !!wol->wolopts);
 865		return ret;
 866	}
 867
 868	/* By default almost all GMAC devices support the WoL via
 869	 * magic frame but we can disable it if the HW capability
 870	 * register shows no support for pmt_magic_frame. */
 871	if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
 872		wol->wolopts &= ~WAKE_MAGIC;
 873
 
 
 
 874	if (wol->wolopts & ~support)
 875		return -EINVAL;
 876
 877	if (wol->wolopts) {
 878		pr_info("stmmac: wakeup enable\n");
 879		device_set_wakeup_enable(priv->device, 1);
 880		/* Avoid unbalanced enable_irq_wake calls */
 881		if (priv->wol_irq_disabled)
 882			enable_irq_wake(priv->wol_irq);
 883		priv->wol_irq_disabled = false;
 884	} else {
 885		device_set_wakeup_enable(priv->device, 0);
 886		/* Avoid unbalanced disable_irq_wake calls */
 887		if (!priv->wol_irq_disabled)
 888			disable_irq_wake(priv->wol_irq);
 889		priv->wol_irq_disabled = true;
 890	}
 891
 892	mutex_lock(&priv->lock);
 893	priv->wolopts = wol->wolopts;
 894	mutex_unlock(&priv->lock);
 895
 896	return 0;
 897}
 898
 899static int stmmac_ethtool_op_get_eee(struct net_device *dev,
 900				     struct ethtool_eee *edata)
 901{
 902	struct stmmac_priv *priv = netdev_priv(dev);
 903
 904	if (!priv->dma_cap.eee)
 905		return -EOPNOTSUPP;
 906
 907	edata->eee_enabled = priv->eee_enabled;
 908	edata->eee_active = priv->eee_active;
 909	edata->tx_lpi_timer = priv->tx_lpi_timer;
 910	edata->tx_lpi_enabled = priv->tx_lpi_enabled;
 911
 912	return phylink_ethtool_get_eee(priv->phylink, edata);
 913}
 914
 915static int stmmac_ethtool_op_set_eee(struct net_device *dev,
 916				     struct ethtool_eee *edata)
 917{
 918	struct stmmac_priv *priv = netdev_priv(dev);
 919	int ret;
 920
 921	if (!priv->dma_cap.eee)
 922		return -EOPNOTSUPP;
 923
 924	if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
 925		netdev_warn(priv->dev,
 926			    "Setting EEE tx-lpi is not supported\n");
 927
 928	if (!edata->eee_enabled)
 929		stmmac_disable_eee_mode(priv);
 
 
 
 
 
 
 
 
 930
 931	ret = phylink_ethtool_set_eee(priv->phylink, edata);
 932	if (ret)
 933		return ret;
 934
 935	if (edata->eee_enabled &&
 936	    priv->tx_lpi_timer != edata->tx_lpi_timer) {
 937		priv->tx_lpi_timer = edata->tx_lpi_timer;
 938		stmmac_eee_init(priv);
 939	}
 940
 941	return 0;
 942}
 943
 944static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
 945{
 946	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 947
 948	if (!clk) {
 949		clk = priv->plat->clk_ref_rate;
 950		if (!clk)
 951			return 0;
 952	}
 953
 954	return (usec * (clk / 1000000)) / 256;
 955}
 956
 957static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
 958{
 959	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 960
 961	if (!clk) {
 962		clk = priv->plat->clk_ref_rate;
 963		if (!clk)
 964			return 0;
 965	}
 966
 967	return (riwt * 256) / (clk / 1000000);
 968}
 969
 970static int __stmmac_get_coalesce(struct net_device *dev,
 971				 struct ethtool_coalesce *ec,
 972				 int queue)
 973{
 974	struct stmmac_priv *priv = netdev_priv(dev);
 975	u32 max_cnt;
 976	u32 rx_cnt;
 977	u32 tx_cnt;
 978
 979	rx_cnt = priv->plat->rx_queues_to_use;
 980	tx_cnt = priv->plat->tx_queues_to_use;
 981	max_cnt = max(rx_cnt, tx_cnt);
 982
 983	if (queue < 0)
 984		queue = 0;
 985	else if (queue >= max_cnt)
 986		return -EINVAL;
 987
 988	if (queue < tx_cnt) {
 989		ec->tx_coalesce_usecs = priv->tx_coal_timer[queue];
 990		ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue];
 991	} else {
 992		ec->tx_coalesce_usecs = 0;
 993		ec->tx_max_coalesced_frames = 0;
 994	}
 995
 996	if (priv->use_riwt && queue < rx_cnt) {
 997		ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue];
 998		ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue],
 999							 priv);
1000	} else {
1001		ec->rx_max_coalesced_frames = 0;
1002		ec->rx_coalesce_usecs = 0;
1003	}
1004
1005	return 0;
1006}
1007
1008static int stmmac_get_coalesce(struct net_device *dev,
1009			       struct ethtool_coalesce *ec,
1010			       struct kernel_ethtool_coalesce *kernel_coal,
1011			       struct netlink_ext_ack *extack)
1012{
1013	return __stmmac_get_coalesce(dev, ec, -1);
1014}
1015
1016static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue,
1017					 struct ethtool_coalesce *ec)
1018{
1019	return __stmmac_get_coalesce(dev, ec, queue);
1020}
1021
1022static int __stmmac_set_coalesce(struct net_device *dev,
1023				 struct ethtool_coalesce *ec,
1024				 int queue)
1025{
1026	struct stmmac_priv *priv = netdev_priv(dev);
1027	bool all_queues = false;
1028	unsigned int rx_riwt;
1029	u32 max_cnt;
1030	u32 rx_cnt;
1031	u32 tx_cnt;
1032
1033	rx_cnt = priv->plat->rx_queues_to_use;
1034	tx_cnt = priv->plat->tx_queues_to_use;
1035	max_cnt = max(rx_cnt, tx_cnt);
1036
1037	if (queue < 0)
1038		all_queues = true;
1039	else if (queue >= max_cnt)
1040		return -EINVAL;
1041
1042	if (priv->use_riwt) {
1043		rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
1044
1045		if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
1046			return -EINVAL;
1047
1048		if (all_queues) {
1049			int i;
 
 
 
 
 
 
 
 
 
 
 
1050
1051			for (i = 0; i < rx_cnt; i++) {
1052				priv->rx_riwt[i] = rx_riwt;
1053				stmmac_rx_watchdog(priv, priv->ioaddr,
1054						   rx_riwt, i);
1055				priv->rx_coal_frames[i] =
1056					ec->rx_max_coalesced_frames;
1057			}
1058		} else if (queue < rx_cnt) {
1059			priv->rx_riwt[queue] = rx_riwt;
1060			stmmac_rx_watchdog(priv, priv->ioaddr,
1061					   rx_riwt, queue);
1062			priv->rx_coal_frames[queue] =
1063				ec->rx_max_coalesced_frames;
1064		}
1065	}
1066
1067	if ((ec->tx_coalesce_usecs == 0) &&
1068	    (ec->tx_max_coalesced_frames == 0))
1069		return -EINVAL;
1070
1071	if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
1072	    (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
1073		return -EINVAL;
1074
1075	if (all_queues) {
1076		int i;
1077
1078		for (i = 0; i < tx_cnt; i++) {
1079			priv->tx_coal_frames[i] =
1080				ec->tx_max_coalesced_frames;
1081			priv->tx_coal_timer[i] =
1082				ec->tx_coalesce_usecs;
1083		}
1084	} else if (queue < tx_cnt) {
1085		priv->tx_coal_frames[queue] =
1086			ec->tx_max_coalesced_frames;
1087		priv->tx_coal_timer[queue] =
1088			ec->tx_coalesce_usecs;
1089	}
1090
1091	return 0;
1092}
1093
1094static int stmmac_set_coalesce(struct net_device *dev,
1095			       struct ethtool_coalesce *ec,
1096			       struct kernel_ethtool_coalesce *kernel_coal,
1097			       struct netlink_ext_ack *extack)
1098{
1099	return __stmmac_set_coalesce(dev, ec, -1);
1100}
1101
1102static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
1103					 struct ethtool_coalesce *ec)
1104{
1105	return __stmmac_set_coalesce(dev, ec, queue);
1106}
1107
1108static int stmmac_get_rxnfc(struct net_device *dev,
1109			    struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
1110{
1111	struct stmmac_priv *priv = netdev_priv(dev);
1112
1113	switch (rxnfc->cmd) {
1114	case ETHTOOL_GRXRINGS:
1115		rxnfc->data = priv->plat->rx_queues_to_use;
1116		break;
1117	default:
1118		return -EOPNOTSUPP;
1119	}
1120
1121	return 0;
1122}
1123
1124static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
1125{
1126	struct stmmac_priv *priv = netdev_priv(dev);
1127
1128	return sizeof(priv->rss.key);
1129}
1130
1131static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
1132{
1133	struct stmmac_priv *priv = netdev_priv(dev);
1134
1135	return ARRAY_SIZE(priv->rss.table);
1136}
1137
1138static int stmmac_get_rxfh(struct net_device *dev,
1139			   struct ethtool_rxfh_param *rxfh)
1140{
1141	struct stmmac_priv *priv = netdev_priv(dev);
1142	int i;
1143
1144	if (rxfh->indir) {
1145		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
1146			rxfh->indir[i] = priv->rss.table[i];
1147	}
1148
1149	if (rxfh->key)
1150		memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
1151	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
 
1152
1153	return 0;
1154}
1155
1156static int stmmac_set_rxfh(struct net_device *dev,
1157			   struct ethtool_rxfh_param *rxfh,
1158			   struct netlink_ext_ack *extack)
1159{
1160	struct stmmac_priv *priv = netdev_priv(dev);
1161	int i;
1162
1163	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1164	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1165		return -EOPNOTSUPP;
1166
1167	if (rxfh->indir) {
1168		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
1169			priv->rss.table[i] = rxfh->indir[i];
1170	}
1171
1172	if (rxfh->key)
1173		memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
1174
1175	return stmmac_rss_configure(priv, priv->hw, &priv->rss,
1176				    priv->plat->rx_queues_to_use);
1177}
1178
1179static void stmmac_get_channels(struct net_device *dev,
1180				struct ethtool_channels *chan)
1181{
1182	struct stmmac_priv *priv = netdev_priv(dev);
1183
1184	chan->rx_count = priv->plat->rx_queues_to_use;
1185	chan->tx_count = priv->plat->tx_queues_to_use;
1186	chan->max_rx = priv->dma_cap.number_rx_queues;
1187	chan->max_tx = priv->dma_cap.number_tx_queues;
1188}
1189
1190static int stmmac_set_channels(struct net_device *dev,
1191			       struct ethtool_channels *chan)
1192{
1193	struct stmmac_priv *priv = netdev_priv(dev);
1194
1195	if (chan->rx_count > priv->dma_cap.number_rx_queues ||
1196	    chan->tx_count > priv->dma_cap.number_tx_queues ||
1197	    !chan->rx_count || !chan->tx_count)
1198		return -EINVAL;
1199
1200	return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
1201}
1202
1203static int stmmac_get_ts_info(struct net_device *dev,
1204			      struct ethtool_ts_info *info)
1205{
1206	struct stmmac_priv *priv = netdev_priv(dev);
1207
1208	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
1209
1210		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1211					SOF_TIMESTAMPING_TX_HARDWARE |
1212					SOF_TIMESTAMPING_RX_SOFTWARE |
1213					SOF_TIMESTAMPING_RX_HARDWARE |
1214					SOF_TIMESTAMPING_SOFTWARE |
1215					SOF_TIMESTAMPING_RAW_HARDWARE;
1216
1217		if (priv->ptp_clock)
1218			info->phc_index = ptp_clock_index(priv->ptp_clock);
1219
1220		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1221
1222		info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
1223				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1224				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1225				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1226				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1227				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1228				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
1229				    (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1230				    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1231				    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
1232				    (1 << HWTSTAMP_FILTER_ALL));
1233		return 0;
1234	} else
1235		return ethtool_op_get_ts_info(dev, info);
1236}
1237
1238static int stmmac_get_tunable(struct net_device *dev,
1239			      const struct ethtool_tunable *tuna, void *data)
1240{
1241	struct stmmac_priv *priv = netdev_priv(dev);
1242	int ret = 0;
1243
1244	switch (tuna->id) {
1245	case ETHTOOL_RX_COPYBREAK:
1246		*(u32 *)data = priv->rx_copybreak;
1247		break;
1248	default:
1249		ret = -EINVAL;
1250		break;
1251	}
1252
1253	return ret;
1254}
1255
1256static int stmmac_set_tunable(struct net_device *dev,
1257			      const struct ethtool_tunable *tuna,
1258			      const void *data)
1259{
1260	struct stmmac_priv *priv = netdev_priv(dev);
1261	int ret = 0;
1262
1263	switch (tuna->id) {
1264	case ETHTOOL_RX_COPYBREAK:
1265		priv->rx_copybreak = *(u32 *)data;
1266		break;
1267	default:
1268		ret = -EINVAL;
1269		break;
1270	}
1271
1272	return ret;
1273}
1274
1275static const struct ethtool_ops stmmac_ethtool_ops = {
1276	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1277				     ETHTOOL_COALESCE_MAX_FRAMES,
1278	.begin = stmmac_check_if_running,
1279	.get_drvinfo = stmmac_ethtool_getdrvinfo,
1280	.get_msglevel = stmmac_ethtool_getmsglevel,
1281	.set_msglevel = stmmac_ethtool_setmsglevel,
1282	.get_regs = stmmac_ethtool_gregs,
1283	.get_regs_len = stmmac_ethtool_get_regs_len,
1284	.get_link = ethtool_op_get_link,
1285	.nway_reset = stmmac_nway_reset,
1286	.get_ringparam = stmmac_get_ringparam,
1287	.set_ringparam = stmmac_set_ringparam,
1288	.get_pauseparam = stmmac_get_pauseparam,
1289	.set_pauseparam = stmmac_set_pauseparam,
1290	.self_test = stmmac_selftest_run,
1291	.get_ethtool_stats = stmmac_get_ethtool_stats,
1292	.get_strings = stmmac_get_strings,
1293	.get_wol = stmmac_get_wol,
1294	.set_wol = stmmac_set_wol,
1295	.get_eee = stmmac_ethtool_op_get_eee,
1296	.set_eee = stmmac_ethtool_op_set_eee,
1297	.get_sset_count	= stmmac_get_sset_count,
1298	.get_rxnfc = stmmac_get_rxnfc,
1299	.get_rxfh_key_size = stmmac_get_rxfh_key_size,
1300	.get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
1301	.get_rxfh = stmmac_get_rxfh,
1302	.set_rxfh = stmmac_set_rxfh,
1303	.get_ts_info = stmmac_get_ts_info,
1304	.get_coalesce = stmmac_get_coalesce,
1305	.set_coalesce = stmmac_set_coalesce,
1306	.get_per_queue_coalesce = stmmac_get_per_queue_coalesce,
1307	.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
1308	.get_channels = stmmac_get_channels,
1309	.set_channels = stmmac_set_channels,
1310	.get_tunable = stmmac_get_tunable,
1311	.set_tunable = stmmac_set_tunable,
1312	.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
1313	.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
1314};
1315
1316void stmmac_set_ethtool_ops(struct net_device *netdev)
1317{
1318	netdev->ethtool_ops = &stmmac_ethtool_ops;
1319}
v4.17
 
  1/*******************************************************************************
  2  STMMAC Ethtool support
  3
  4  Copyright (C) 2007-2009  STMicroelectronics Ltd
  5
  6  This program is free software; you can redistribute it and/or modify it
  7  under the terms and conditions of the GNU General Public License,
  8  version 2, as published by the Free Software Foundation.
  9
 10  This program is distributed in the hope it will be useful, but WITHOUT
 11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13  more details.
 14
 15  The full GNU General Public License is included in this distribution in
 16  the file called "COPYING".
 17
 18  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 19*******************************************************************************/
 20
 21#include <linux/etherdevice.h>
 22#include <linux/ethtool.h>
 23#include <linux/interrupt.h>
 24#include <linux/mii.h>
 25#include <linux/phy.h>
 26#include <linux/net_tstamp.h>
 27#include <asm/io.h>
 28
 29#include "stmmac.h"
 30#include "dwmac_dma.h"
 
 31
 32#define REG_SPACE_SIZE	0x1060
 
 33#define MAC100_ETHTOOL_NAME	"st_mac100"
 34#define GMAC_ETHTOOL_NAME	"st_gmac"
 
 
 
 
 
 
 
 
 35
 36#define ETHTOOL_DMA_OFFSET	55
 37
 38struct stmmac_stats {
 39	char stat_string[ETH_GSTRING_LEN];
 40	int sizeof_stat;
 41	int stat_offset;
 42};
 43
 44#define STMMAC_STAT(m)	\
 45	{ #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),	\
 46	offsetof(struct stmmac_priv, xstats.m)}
 47
 48static const struct stmmac_stats stmmac_gstrings_stats[] = {
 49	/* Transmit errors */
 50	STMMAC_STAT(tx_underflow),
 51	STMMAC_STAT(tx_carrier),
 52	STMMAC_STAT(tx_losscarrier),
 53	STMMAC_STAT(vlan_tag),
 54	STMMAC_STAT(tx_deferred),
 55	STMMAC_STAT(tx_vlan),
 56	STMMAC_STAT(tx_jabber),
 57	STMMAC_STAT(tx_frame_flushed),
 58	STMMAC_STAT(tx_payload_error),
 59	STMMAC_STAT(tx_ip_header_error),
 60	/* Receive errors */
 61	STMMAC_STAT(rx_desc),
 62	STMMAC_STAT(sa_filter_fail),
 63	STMMAC_STAT(overflow_error),
 64	STMMAC_STAT(ipc_csum_error),
 65	STMMAC_STAT(rx_collision),
 66	STMMAC_STAT(rx_crc_errors),
 67	STMMAC_STAT(dribbling_bit),
 68	STMMAC_STAT(rx_length),
 69	STMMAC_STAT(rx_mii),
 70	STMMAC_STAT(rx_multicast),
 71	STMMAC_STAT(rx_gmac_overflow),
 72	STMMAC_STAT(rx_watchdog),
 73	STMMAC_STAT(da_rx_filter_fail),
 74	STMMAC_STAT(sa_rx_filter_fail),
 75	STMMAC_STAT(rx_missed_cntr),
 76	STMMAC_STAT(rx_overflow_cntr),
 77	STMMAC_STAT(rx_vlan),
 
 78	/* Tx/Rx IRQ error info */
 79	STMMAC_STAT(tx_undeflow_irq),
 80	STMMAC_STAT(tx_process_stopped_irq),
 81	STMMAC_STAT(tx_jabber_irq),
 82	STMMAC_STAT(rx_overflow_irq),
 83	STMMAC_STAT(rx_buf_unav_irq),
 84	STMMAC_STAT(rx_process_stopped_irq),
 85	STMMAC_STAT(rx_watchdog_irq),
 86	STMMAC_STAT(tx_early_irq),
 87	STMMAC_STAT(fatal_bus_error_irq),
 88	/* Tx/Rx IRQ Events */
 89	STMMAC_STAT(rx_early_irq),
 90	STMMAC_STAT(threshold),
 91	STMMAC_STAT(tx_pkt_n),
 92	STMMAC_STAT(rx_pkt_n),
 93	STMMAC_STAT(normal_irq_n),
 94	STMMAC_STAT(rx_normal_irq_n),
 95	STMMAC_STAT(napi_poll),
 96	STMMAC_STAT(tx_normal_irq_n),
 97	STMMAC_STAT(tx_clean),
 98	STMMAC_STAT(tx_set_ic_bit),
 99	STMMAC_STAT(irq_receive_pmt_irq_n),
100	/* MMC info */
101	STMMAC_STAT(mmc_tx_irq_n),
102	STMMAC_STAT(mmc_rx_irq_n),
103	STMMAC_STAT(mmc_rx_csum_offload_irq_n),
104	/* EEE */
105	STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
106	STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
107	STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
108	STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
109	STMMAC_STAT(phy_eee_wakeup_error_n),
110	/* Extended RDES status */
111	STMMAC_STAT(ip_hdr_err),
112	STMMAC_STAT(ip_payload_err),
113	STMMAC_STAT(ip_csum_bypassed),
114	STMMAC_STAT(ipv4_pkt_rcvd),
115	STMMAC_STAT(ipv6_pkt_rcvd),
116	STMMAC_STAT(no_ptp_rx_msg_type_ext),
117	STMMAC_STAT(ptp_rx_msg_type_sync),
118	STMMAC_STAT(ptp_rx_msg_type_follow_up),
119	STMMAC_STAT(ptp_rx_msg_type_delay_req),
120	STMMAC_STAT(ptp_rx_msg_type_delay_resp),
121	STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
122	STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
123	STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
124	STMMAC_STAT(ptp_rx_msg_type_announce),
125	STMMAC_STAT(ptp_rx_msg_type_management),
126	STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
127	STMMAC_STAT(ptp_frame_type),
128	STMMAC_STAT(ptp_ver),
129	STMMAC_STAT(timestamp_dropped),
130	STMMAC_STAT(av_pkt_rcvd),
131	STMMAC_STAT(av_tagged_pkt_rcvd),
132	STMMAC_STAT(vlan_tag_priority_val),
133	STMMAC_STAT(l3_filter_match),
134	STMMAC_STAT(l4_filter_match),
135	STMMAC_STAT(l3_l4_filter_no_match),
136	/* PCS */
137	STMMAC_STAT(irq_pcs_ane_n),
138	STMMAC_STAT(irq_pcs_link_n),
139	STMMAC_STAT(irq_rgmii_n),
140	/* DEBUG */
141	STMMAC_STAT(mtl_tx_status_fifo_full),
142	STMMAC_STAT(mtl_tx_fifo_not_empty),
143	STMMAC_STAT(mmtl_fifo_ctrl),
144	STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
145	STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
146	STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
147	STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
148	STMMAC_STAT(mac_tx_in_pause),
149	STMMAC_STAT(mac_tx_frame_ctrl_xfer),
150	STMMAC_STAT(mac_tx_frame_ctrl_idle),
151	STMMAC_STAT(mac_tx_frame_ctrl_wait),
152	STMMAC_STAT(mac_tx_frame_ctrl_pause),
153	STMMAC_STAT(mac_gmii_tx_proto_engine),
154	STMMAC_STAT(mtl_rx_fifo_fill_level_full),
155	STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
156	STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
157	STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
158	STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
159	STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
160	STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
161	STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
162	STMMAC_STAT(mtl_rx_fifo_ctrl_active),
163	STMMAC_STAT(mac_rx_frame_ctrl_fifo),
164	STMMAC_STAT(mac_gmii_rx_proto_engine),
165	/* TSO */
166	STMMAC_STAT(tx_tso_frames),
167	STMMAC_STAT(tx_tso_nfrags),
 
 
 
168};
169#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171/* HW MAC Management counters (if supported) */
172#define STMMAC_MMC_STAT(m)	\
173	{ #m, FIELD_SIZEOF(struct stmmac_counters, m),	\
174	offsetof(struct stmmac_priv, mmc.m)}
175
176static const struct stmmac_stats stmmac_mmc[] = {
177	STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
178	STMMAC_MMC_STAT(mmc_tx_framecount_gb),
179	STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
180	STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
181	STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
182	STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
183	STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
184	STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
185	STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
186	STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
187	STMMAC_MMC_STAT(mmc_tx_unicast_gb),
188	STMMAC_MMC_STAT(mmc_tx_multicast_gb),
189	STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
190	STMMAC_MMC_STAT(mmc_tx_underflow_error),
191	STMMAC_MMC_STAT(mmc_tx_singlecol_g),
192	STMMAC_MMC_STAT(mmc_tx_multicol_g),
193	STMMAC_MMC_STAT(mmc_tx_deferred),
194	STMMAC_MMC_STAT(mmc_tx_latecol),
195	STMMAC_MMC_STAT(mmc_tx_exesscol),
196	STMMAC_MMC_STAT(mmc_tx_carrier_error),
197	STMMAC_MMC_STAT(mmc_tx_octetcount_g),
198	STMMAC_MMC_STAT(mmc_tx_framecount_g),
199	STMMAC_MMC_STAT(mmc_tx_excessdef),
200	STMMAC_MMC_STAT(mmc_tx_pause_frame),
201	STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
 
 
202	STMMAC_MMC_STAT(mmc_rx_framecount_gb),
203	STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
204	STMMAC_MMC_STAT(mmc_rx_octetcount_g),
205	STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
206	STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
207	STMMAC_MMC_STAT(mmc_rx_crc_error),
208	STMMAC_MMC_STAT(mmc_rx_align_error),
209	STMMAC_MMC_STAT(mmc_rx_run_error),
210	STMMAC_MMC_STAT(mmc_rx_jabber_error),
211	STMMAC_MMC_STAT(mmc_rx_undersize_g),
212	STMMAC_MMC_STAT(mmc_rx_oversize_g),
213	STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
214	STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
215	STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
216	STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
217	STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
218	STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
219	STMMAC_MMC_STAT(mmc_rx_unicast_g),
220	STMMAC_MMC_STAT(mmc_rx_length_error),
221	STMMAC_MMC_STAT(mmc_rx_autofrangetype),
222	STMMAC_MMC_STAT(mmc_rx_pause_frames),
223	STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
224	STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
225	STMMAC_MMC_STAT(mmc_rx_watchdog_error),
 
 
 
 
 
226	STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
227	STMMAC_MMC_STAT(mmc_rx_ipc_intr),
228	STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
229	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
230	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
231	STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
232	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
233	STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
234	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
235	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
236	STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
237	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
238	STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
239	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
240	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
241	STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
242	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
243	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
244	STMMAC_MMC_STAT(mmc_rx_udp_gd),
245	STMMAC_MMC_STAT(mmc_rx_udp_err),
246	STMMAC_MMC_STAT(mmc_rx_tcp_gd),
247	STMMAC_MMC_STAT(mmc_rx_tcp_err),
248	STMMAC_MMC_STAT(mmc_rx_icmp_gd),
249	STMMAC_MMC_STAT(mmc_rx_icmp_err),
250	STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
251	STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
252	STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
253	STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
254	STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
255	STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
 
 
 
 
 
 
 
 
 
256};
257#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
258
 
 
 
 
 
 
 
 
 
 
 
 
259static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
260				      struct ethtool_drvinfo *info)
261{
262	struct stmmac_priv *priv = netdev_priv(dev);
263
264	if (priv->plat->has_gmac || priv->plat->has_gmac4)
265		strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
 
 
266	else
267		strlcpy(info->driver, MAC100_ETHTOOL_NAME,
268			sizeof(info->driver));
269
270	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 
 
 
271}
272
273static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
274					     struct ethtool_link_ksettings *cmd)
275{
276	struct stmmac_priv *priv = netdev_priv(dev);
277	struct phy_device *phy = dev->phydev;
278
279	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
280	    priv->hw->pcs & STMMAC_PCS_SGMII) {
 
281		struct rgmii_adv adv;
282		u32 supported, advertising, lp_advertising;
283
284		if (!priv->xstats.pcs_link) {
285			cmd->base.speed = SPEED_UNKNOWN;
286			cmd->base.duplex = DUPLEX_UNKNOWN;
287			return 0;
288		}
289		cmd->base.duplex = priv->xstats.pcs_duplex;
290
291		cmd->base.speed = priv->xstats.pcs_speed;
292
293		/* Get and convert ADV/LP_ADV from the HW AN registers */
294		if (!priv->hw->mac->pcs_get_adv_lp)
295			return -EOPNOTSUPP;	/* should never happen indeed */
296
297		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
298
299		/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
300
301		ethtool_convert_link_mode_to_legacy_u32(
302			&supported, cmd->link_modes.supported);
303		ethtool_convert_link_mode_to_legacy_u32(
304			&advertising, cmd->link_modes.advertising);
305		ethtool_convert_link_mode_to_legacy_u32(
306			&lp_advertising, cmd->link_modes.lp_advertising);
307
308		if (adv.pause & STMMAC_PCS_PAUSE)
309			advertising |= ADVERTISED_Pause;
310		if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
311			advertising |= ADVERTISED_Asym_Pause;
312		if (adv.lp_pause & STMMAC_PCS_PAUSE)
313			lp_advertising |= ADVERTISED_Pause;
314		if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
315			lp_advertising |= ADVERTISED_Asym_Pause;
316
317		/* Reg49[3] always set because ANE is always supported */
318		cmd->base.autoneg = ADVERTISED_Autoneg;
319		supported |= SUPPORTED_Autoneg;
320		advertising |= ADVERTISED_Autoneg;
321		lp_advertising |= ADVERTISED_Autoneg;
322
323		if (adv.duplex) {
324			supported |= (SUPPORTED_1000baseT_Full |
325				      SUPPORTED_100baseT_Full |
326				      SUPPORTED_10baseT_Full);
327			advertising |= (ADVERTISED_1000baseT_Full |
328					ADVERTISED_100baseT_Full |
329					ADVERTISED_10baseT_Full);
330		} else {
331			supported |= (SUPPORTED_1000baseT_Half |
332				      SUPPORTED_100baseT_Half |
333				      SUPPORTED_10baseT_Half);
334			advertising |= (ADVERTISED_1000baseT_Half |
335					ADVERTISED_100baseT_Half |
336					ADVERTISED_10baseT_Half);
337		}
338		if (adv.lp_duplex)
339			lp_advertising |= (ADVERTISED_1000baseT_Full |
340					   ADVERTISED_100baseT_Full |
341					   ADVERTISED_10baseT_Full);
342		else
343			lp_advertising |= (ADVERTISED_1000baseT_Half |
344					   ADVERTISED_100baseT_Half |
345					   ADVERTISED_10baseT_Half);
346		cmd->base.port = PORT_OTHER;
347
348		ethtool_convert_legacy_u32_to_link_mode(
349			cmd->link_modes.supported, supported);
350		ethtool_convert_legacy_u32_to_link_mode(
351			cmd->link_modes.advertising, advertising);
352		ethtool_convert_legacy_u32_to_link_mode(
353			cmd->link_modes.lp_advertising, lp_advertising);
354
355		return 0;
356	}
357
358	if (phy == NULL) {
359		pr_err("%s: %s: PHY is not registered\n",
360		       __func__, dev->name);
361		return -ENODEV;
362	}
363	if (!netif_running(dev)) {
364		pr_err("%s: interface is disabled: we cannot track "
365		"link speed / duplex setting\n", dev->name);
366		return -EBUSY;
367	}
368	phy_ethtool_ksettings_get(phy, cmd);
369	return 0;
370}
371
372static int
373stmmac_ethtool_set_link_ksettings(struct net_device *dev,
374				  const struct ethtool_link_ksettings *cmd)
375{
376	struct stmmac_priv *priv = netdev_priv(dev);
377	struct phy_device *phy = dev->phydev;
378	int rc;
379
380	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
381	    priv->hw->pcs & STMMAC_PCS_SGMII) {
382		u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
383
 
 
 
384		/* Only support ANE */
385		if (cmd->base.autoneg != AUTONEG_ENABLE)
386			return -EINVAL;
387
388		mask &= (ADVERTISED_1000baseT_Half |
389			ADVERTISED_1000baseT_Full |
390			ADVERTISED_100baseT_Half |
391			ADVERTISED_100baseT_Full |
392			ADVERTISED_10baseT_Half |
393			ADVERTISED_10baseT_Full);
394
395		spin_lock(&priv->lock);
396
397		if (priv->hw->mac->pcs_ctrl_ane)
398			priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
399						    priv->hw->ps, 0);
400
401		spin_unlock(&priv->lock);
402
403		return 0;
404	}
405
406	rc = phy_ethtool_ksettings_set(phy, cmd);
407
408	return rc;
409}
410
411static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
412{
413	struct stmmac_priv *priv = netdev_priv(dev);
414	return priv->msg_enable;
415}
416
417static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
418{
419	struct stmmac_priv *priv = netdev_priv(dev);
420	priv->msg_enable = level;
421
422}
423
424static int stmmac_check_if_running(struct net_device *dev)
425{
426	if (!netif_running(dev))
427		return -EBUSY;
428	return 0;
429}
430
431static int stmmac_ethtool_get_regs_len(struct net_device *dev)
432{
 
 
 
 
 
 
433	return REG_SPACE_SIZE;
434}
435
436static void stmmac_ethtool_gregs(struct net_device *dev,
437			  struct ethtool_regs *regs, void *space)
438{
 
439	u32 *reg_space = (u32 *) space;
440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441	struct stmmac_priv *priv = netdev_priv(dev);
442
443	memset(reg_space, 0x0, REG_SPACE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
445	priv->hw->mac->dump_regs(priv->hw, reg_space);
446	priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
447	/* Copy DMA registers to where ethtool expects them */
448	memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
449	       NUM_DWMAC1000_DMA_REGS * 4);
450}
451
452static void
453stmmac_get_pauseparam(struct net_device *netdev,
454		      struct ethtool_pauseparam *pause)
455{
456	struct stmmac_priv *priv = netdev_priv(netdev);
 
457
458	pause->rx_pause = 0;
459	pause->tx_pause = 0;
460
461	if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
462		struct rgmii_adv adv_lp;
463
464		pause->autoneg = 1;
465		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
466		if (!adv_lp.pause)
467			return;
468	} else {
469		if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
470		    !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
471			return;
472	}
473
474	pause->autoneg = netdev->phydev->autoneg;
475
476	if (priv->flow_ctrl & FLOW_RX)
477		pause->rx_pause = 1;
478	if (priv->flow_ctrl & FLOW_TX)
479		pause->tx_pause = 1;
480
481}
482
483static int
484stmmac_set_pauseparam(struct net_device *netdev,
485		      struct ethtool_pauseparam *pause)
486{
487	struct stmmac_priv *priv = netdev_priv(netdev);
488	u32 tx_cnt = priv->plat->tx_queues_to_use;
489	struct phy_device *phy = netdev->phydev;
490	int new_pause = FLOW_OFF;
491
492	if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
493		struct rgmii_adv adv_lp;
494
 
495		pause->autoneg = 1;
496		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
497		if (!adv_lp.pause)
498			return -EOPNOTSUPP;
 
499	} else {
500		if (!(phy->supported & SUPPORTED_Pause) ||
501		    !(phy->supported & SUPPORTED_Asym_Pause))
502			return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503	}
 
 
504
505	if (pause->rx_pause)
506		new_pause |= FLOW_RX;
507	if (pause->tx_pause)
508		new_pause |= FLOW_TX;
 
 
509
510	priv->flow_ctrl = new_pause;
511	phy->autoneg = pause->autoneg;
 
 
 
 
 
 
512
513	if (phy->autoneg) {
514		if (netif_running(netdev))
515			return phy_start_aneg(phy);
516	}
517
518	priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
519				 priv->pause, tx_cnt);
520	return 0;
 
 
 
 
 
 
 
 
 
521}
522
523static void stmmac_get_ethtool_stats(struct net_device *dev,
524				 struct ethtool_stats *dummy, u64 *data)
525{
526	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
527			unsigned long *count);
528	struct stmmac_priv *priv = netdev_priv(dev);
529	u32 rx_queues_count = priv->plat->rx_queues_to_use;
530	u32 tx_queues_count = priv->plat->tx_queues_to_use;
 
 
531	unsigned long count;
532	int i, j = 0;
533
534	if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
535		dump = priv->hw->mac->safety_feat_dump;
536
 
537		for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
538			if (dump(&priv->sstats, i, &count))
 
539				data[j++] = count;
540		}
541	}
542
543	/* Update the DMA HW counters for dwmac10/100 */
544	if (priv->hw->dma->dma_diagnostic_fr)
545		priv->hw->dma->dma_diagnostic_fr(&dev->stats,
546						 (void *) &priv->xstats,
547						 priv->ioaddr);
548	else {
549		/* If supported, for new GMAC chips expose the MMC counters */
550		if (priv->dma_cap.rmon) {
551			dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
552
553			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
554				char *p;
555				p = (char *)priv + stmmac_mmc[i].stat_offset;
556
557				data[j++] = (stmmac_mmc[i].sizeof_stat ==
558					     sizeof(u64)) ? (*(u64 *)p) :
559					     (*(u32 *)p);
560			}
561		}
562		if (priv->eee_enabled) {
563			int val = phy_get_eee_err(dev->phydev);
564			if (val)
565				priv->xstats.phy_eee_wakeup_error_n = val;
566		}
567
568		if ((priv->hw->mac->debug) &&
569		    (priv->synopsys_id >= DWMAC_CORE_3_50))
570			priv->hw->mac->debug(priv->ioaddr,
571					     (void *)&priv->xstats,
572					     rx_queues_count, tx_queues_count);
573	}
574	for (i = 0; i < STMMAC_STATS_LEN; i++) {
575		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
576		data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
577			     sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
578	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579}
580
581static int stmmac_get_sset_count(struct net_device *netdev, int sset)
582{
583	struct stmmac_priv *priv = netdev_priv(netdev);
584	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
585			unsigned long *count);
586	int i, len, safety_len = 0;
587
588	switch (sset) {
589	case ETH_SS_STATS:
590		len = STMMAC_STATS_LEN;
 
 
591
592		if (priv->dma_cap.rmon)
593			len += STMMAC_MMC_STATS_LEN;
594		if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
595			dump = priv->hw->mac->safety_feat_dump;
596
597			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
598				if (dump(&priv->sstats, i, NULL))
 
 
599					safety_len++;
600			}
601
602			len += safety_len;
603		}
604
605		return len;
 
 
606	default:
607		return -EOPNOTSUPP;
608	}
609}
610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
612{
613	int i;
614	u8 *p = data;
615	struct stmmac_priv *priv = netdev_priv(dev);
616	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
617			unsigned long *count);
618
619	switch (stringset) {
620	case ETH_SS_STATS:
621		if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
622			dump = priv->hw->mac->safety_feat_dump;
623			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
624				const char *desc = dump(&priv->sstats, i, NULL);
625
626				if (desc) {
 
627					memcpy(p, desc, ETH_GSTRING_LEN);
628					p += ETH_GSTRING_LEN;
629				}
630			}
631		}
632		if (priv->dma_cap.rmon)
633			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
634				memcpy(p, stmmac_mmc[i].stat_string,
635				       ETH_GSTRING_LEN);
636				p += ETH_GSTRING_LEN;
637			}
638		for (i = 0; i < STMMAC_STATS_LEN; i++) {
639			memcpy(p, stmmac_gstrings_stats[i].stat_string,
640				ETH_GSTRING_LEN);
 
 
 
641			p += ETH_GSTRING_LEN;
642		}
 
 
 
 
643		break;
644	default:
645		WARN_ON(1);
646		break;
647	}
648}
649
650/* Currently only support WOL through Magic packet. */
651static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
652{
653	struct stmmac_priv *priv = netdev_priv(dev);
654
655	spin_lock_irq(&priv->lock);
 
 
 
656	if (device_can_wakeup(priv->device)) {
657		wol->supported = WAKE_MAGIC | WAKE_UCAST;
 
 
658		wol->wolopts = priv->wolopts;
659	}
660	spin_unlock_irq(&priv->lock);
661}
662
663static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664{
665	struct stmmac_priv *priv = netdev_priv(dev);
666	u32 support = WAKE_MAGIC | WAKE_UCAST;
667
 
 
 
 
 
 
 
 
 
 
 
668	/* By default almost all GMAC devices support the WoL via
669	 * magic frame but we can disable it if the HW capability
670	 * register shows no support for pmt_magic_frame. */
671	if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
672		wol->wolopts &= ~WAKE_MAGIC;
673
674	if (!device_can_wakeup(priv->device))
675		return -EINVAL;
676
677	if (wol->wolopts & ~support)
678		return -EINVAL;
679
680	if (wol->wolopts) {
681		pr_info("stmmac: wakeup enable\n");
682		device_set_wakeup_enable(priv->device, 1);
683		enable_irq_wake(priv->wol_irq);
 
 
 
684	} else {
685		device_set_wakeup_enable(priv->device, 0);
686		disable_irq_wake(priv->wol_irq);
 
 
 
687	}
688
689	spin_lock_irq(&priv->lock);
690	priv->wolopts = wol->wolopts;
691	spin_unlock_irq(&priv->lock);
692
693	return 0;
694}
695
696static int stmmac_ethtool_op_get_eee(struct net_device *dev,
697				     struct ethtool_eee *edata)
698{
699	struct stmmac_priv *priv = netdev_priv(dev);
700
701	if (!priv->dma_cap.eee)
702		return -EOPNOTSUPP;
703
704	edata->eee_enabled = priv->eee_enabled;
705	edata->eee_active = priv->eee_active;
706	edata->tx_lpi_timer = priv->tx_lpi_timer;
 
707
708	return phy_ethtool_get_eee(dev->phydev, edata);
709}
710
711static int stmmac_ethtool_op_set_eee(struct net_device *dev,
712				     struct ethtool_eee *edata)
713{
714	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
715
716	priv->eee_enabled = edata->eee_enabled;
 
 
717
718	if (!priv->eee_enabled)
719		stmmac_disable_eee_mode(priv);
720	else {
721		/* We are asking for enabling the EEE but it is safe
722		 * to verify all by invoking the eee_init function.
723		 * In case of failure it will return an error.
724		 */
725		priv->eee_enabled = stmmac_eee_init(priv);
726		if (!priv->eee_enabled)
727			return -EOPNOTSUPP;
728
729		/* Do not change tx_lpi_timer in case of failure */
 
 
 
 
 
730		priv->tx_lpi_timer = edata->tx_lpi_timer;
 
731	}
732
733	return phy_ethtool_set_eee(dev->phydev, edata);
734}
735
736static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
737{
738	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
739
740	if (!clk)
741		return 0;
 
 
 
742
743	return (usec * (clk / 1000000)) / 256;
744}
745
746static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
747{
748	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
749
750	if (!clk)
751		return 0;
 
 
 
752
753	return (riwt * 256) / (clk / 1000000);
754}
755
756static int stmmac_get_coalesce(struct net_device *dev,
757			       struct ethtool_coalesce *ec)
 
758{
759	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
760
761	ec->tx_coalesce_usecs = priv->tx_coal_timer;
762	ec->tx_max_coalesced_frames = priv->tx_coal_frames;
 
 
 
 
 
763
764	if (priv->use_riwt)
765		ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
 
 
 
 
 
 
766
767	return 0;
768}
769
770static int stmmac_set_coalesce(struct net_device *dev,
771			       struct ethtool_coalesce *ec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
772{
773	struct stmmac_priv *priv = netdev_priv(dev);
774	u32 rx_cnt = priv->plat->rx_queues_to_use;
775	unsigned int rx_riwt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
776
777	/* Check not supported parameters  */
778	if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
779	    (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
780	    (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
781	    (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
782	    (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
783	    (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
784	    (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
785	    (ec->rx_max_coalesced_frames_high) ||
786	    (ec->tx_max_coalesced_frames_irq) ||
787	    (ec->stats_block_coalesce_usecs) ||
788	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
789		return -EOPNOTSUPP;
790
791	if (ec->rx_coalesce_usecs == 0)
792		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
793
794	if ((ec->tx_coalesce_usecs == 0) &&
795	    (ec->tx_max_coalesced_frames == 0))
796		return -EINVAL;
797
798	if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
799	    (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
800		return -EINVAL;
801
802	rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803
804	if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
805		return -EINVAL;
806	else if (!priv->use_riwt)
 
 
 
 
 
 
 
807		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
808
809	/* Only copy relevant parameters, ignore all others. */
810	priv->tx_coal_frames = ec->tx_max_coalesced_frames;
811	priv->tx_coal_timer = ec->tx_coalesce_usecs;
812	priv->rx_riwt = rx_riwt;
813	priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
814
815	return 0;
816}
817
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
818static int stmmac_get_ts_info(struct net_device *dev,
819			      struct ethtool_ts_info *info)
820{
821	struct stmmac_priv *priv = netdev_priv(dev);
822
823	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
824
825		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
826					SOF_TIMESTAMPING_TX_HARDWARE |
827					SOF_TIMESTAMPING_RX_SOFTWARE |
828					SOF_TIMESTAMPING_RX_HARDWARE |
829					SOF_TIMESTAMPING_SOFTWARE |
830					SOF_TIMESTAMPING_RAW_HARDWARE;
831
832		if (priv->ptp_clock)
833			info->phc_index = ptp_clock_index(priv->ptp_clock);
834
835		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
836
837		info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
838				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
839				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
840				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
841				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
842				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
843				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
844				    (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
845				    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
846				    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
847				    (1 << HWTSTAMP_FILTER_ALL));
848		return 0;
849	} else
850		return ethtool_op_get_ts_info(dev, info);
851}
852
853static int stmmac_get_tunable(struct net_device *dev,
854			      const struct ethtool_tunable *tuna, void *data)
855{
856	struct stmmac_priv *priv = netdev_priv(dev);
857	int ret = 0;
858
859	switch (tuna->id) {
860	case ETHTOOL_RX_COPYBREAK:
861		*(u32 *)data = priv->rx_copybreak;
862		break;
863	default:
864		ret = -EINVAL;
865		break;
866	}
867
868	return ret;
869}
870
871static int stmmac_set_tunable(struct net_device *dev,
872			      const struct ethtool_tunable *tuna,
873			      const void *data)
874{
875	struct stmmac_priv *priv = netdev_priv(dev);
876	int ret = 0;
877
878	switch (tuna->id) {
879	case ETHTOOL_RX_COPYBREAK:
880		priv->rx_copybreak = *(u32 *)data;
881		break;
882	default:
883		ret = -EINVAL;
884		break;
885	}
886
887	return ret;
888}
889
890static const struct ethtool_ops stmmac_ethtool_ops = {
 
 
891	.begin = stmmac_check_if_running,
892	.get_drvinfo = stmmac_ethtool_getdrvinfo,
893	.get_msglevel = stmmac_ethtool_getmsglevel,
894	.set_msglevel = stmmac_ethtool_setmsglevel,
895	.get_regs = stmmac_ethtool_gregs,
896	.get_regs_len = stmmac_ethtool_get_regs_len,
897	.get_link = ethtool_op_get_link,
898	.nway_reset = phy_ethtool_nway_reset,
 
 
899	.get_pauseparam = stmmac_get_pauseparam,
900	.set_pauseparam = stmmac_set_pauseparam,
 
901	.get_ethtool_stats = stmmac_get_ethtool_stats,
902	.get_strings = stmmac_get_strings,
903	.get_wol = stmmac_get_wol,
904	.set_wol = stmmac_set_wol,
905	.get_eee = stmmac_ethtool_op_get_eee,
906	.set_eee = stmmac_ethtool_op_set_eee,
907	.get_sset_count	= stmmac_get_sset_count,
 
 
 
 
 
908	.get_ts_info = stmmac_get_ts_info,
909	.get_coalesce = stmmac_get_coalesce,
910	.set_coalesce = stmmac_set_coalesce,
 
 
 
 
911	.get_tunable = stmmac_get_tunable,
912	.set_tunable = stmmac_set_tunable,
913	.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
914	.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
915};
916
917void stmmac_set_ethtool_ops(struct net_device *netdev)
918{
919	netdev->ethtool_ops = &stmmac_ethtool_ops;
920}