Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*******************************************************************************
   3  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   4  ST Ethernet IPs are built around a Synopsys IP Core.
   5
   6	Copyright(C) 2007-2011 STMicroelectronics Ltd
   7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8
   9  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  10
  11  Documentation available at:
  12	http://www.stlinux.com
  13  Support available at:
  14	https://bugzilla.stlinux.com/
  15*******************************************************************************/
  16
  17#include <linux/clk.h>
  18#include <linux/kernel.h>
  19#include <linux/interrupt.h>
  20#include <linux/ip.h>
  21#include <linux/tcp.h>
  22#include <linux/skbuff.h>
  23#include <linux/ethtool.h>
  24#include <linux/if_ether.h>
  25#include <linux/crc32.h>
  26#include <linux/mii.h>
  27#include <linux/if.h>
  28#include <linux/if_vlan.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/slab.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/prefetch.h>
  33#include <linux/pinctrl/consumer.h>
  34#ifdef CONFIG_DEBUG_FS
  35#include <linux/debugfs.h>
  36#include <linux/seq_file.h>
  37#endif /* CONFIG_DEBUG_FS */
  38#include <linux/net_tstamp.h>
  39#include <linux/phylink.h>
  40#include <linux/udp.h>
  41#include <linux/bpf_trace.h>
  42#include <net/page_pool/helpers.h>
  43#include <net/pkt_cls.h>
  44#include <net/xdp_sock_drv.h>
  45#include "stmmac_ptp.h"
  46#include "stmmac_fpe.h"
  47#include "stmmac.h"
  48#include "stmmac_xdp.h"
  49#include <linux/reset.h>
  50#include <linux/of_mdio.h>
  51#include "dwmac1000.h"
  52#include "dwxgmac2.h"
  53#include "hwif.h"
  54
  55/* As long as the interface is active, we keep the timestamping counter enabled
  56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
  57 * (clock jumps) when changing timestamping settings at runtime.
  58 */
  59#define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
  60				 PTP_TCR_TSCTRLSSR)
  61
  62#define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
  63#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
  64
  65/* Module parameters */
  66#define TX_TIMEO	5000
  67static int watchdog = TX_TIMEO;
  68module_param(watchdog, int, 0644);
  69MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  70
  71static int debug = -1;
  72module_param(debug, int, 0644);
  73MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  74
  75static int phyaddr = -1;
  76module_param(phyaddr, int, 0444);
  77MODULE_PARM_DESC(phyaddr, "Physical device address");
  78
  79#define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
  80#define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
  81
  82/* Limit to make sure XDP TX and slow path can coexist */
  83#define STMMAC_XSK_TX_BUDGET_MAX	256
  84#define STMMAC_TX_XSK_AVAIL		16
  85#define STMMAC_RX_FILL_BATCH		16
  86
  87#define STMMAC_XDP_PASS		0
  88#define STMMAC_XDP_CONSUMED	BIT(0)
  89#define STMMAC_XDP_TX		BIT(1)
  90#define STMMAC_XDP_REDIRECT	BIT(2)
  91
  92static int flow_ctrl = FLOW_AUTO;
  93module_param(flow_ctrl, int, 0644);
  94MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  95
  96static int pause = PAUSE_TIME;
  97module_param(pause, int, 0644);
  98MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  99
 100#define TC_DEFAULT 64
 101static int tc = TC_DEFAULT;
 102module_param(tc, int, 0644);
 103MODULE_PARM_DESC(tc, "DMA threshold control value");
 104
 105#define	DEFAULT_BUFSIZE	1536
 106static int buf_sz = DEFAULT_BUFSIZE;
 107module_param(buf_sz, int, 0644);
 108MODULE_PARM_DESC(buf_sz, "DMA buffer size");
 109
 110#define	STMMAC_RX_COPYBREAK	256
 111
 112static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 113				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
 114				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
 115
 116#define STMMAC_DEFAULT_LPI_TIMER	1000
 117static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 118module_param(eee_timer, int, 0644);
 119MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 120#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
 121
 122/* By default the driver will use the ring mode to manage tx and rx descriptors,
 123 * but allow user to force to use the chain instead of the ring
 124 */
 125static unsigned int chain_mode;
 126module_param(chain_mode, int, 0444);
 127MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 128
 129static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 130/* For MSI interrupts handling */
 131static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
 132static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
 133static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
 134static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
 135static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
 136static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
 137static void stmmac_reset_queues_param(struct stmmac_priv *priv);
 138static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
 139static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
 140static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
 141					  u32 rxmode, u32 chan);
 142
 143#ifdef CONFIG_DEBUG_FS
 144static const struct net_device_ops stmmac_netdev_ops;
 145static void stmmac_init_fs(struct net_device *dev);
 146static void stmmac_exit_fs(struct net_device *dev);
 147#endif
 148
 149#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
 150
 151int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
 152{
 153	int ret = 0;
 154
 155	if (enabled) {
 156		ret = clk_prepare_enable(priv->plat->stmmac_clk);
 157		if (ret)
 158			return ret;
 159		ret = clk_prepare_enable(priv->plat->pclk);
 160		if (ret) {
 161			clk_disable_unprepare(priv->plat->stmmac_clk);
 162			return ret;
 163		}
 164		if (priv->plat->clks_config) {
 165			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 166			if (ret) {
 167				clk_disable_unprepare(priv->plat->stmmac_clk);
 168				clk_disable_unprepare(priv->plat->pclk);
 169				return ret;
 170			}
 171		}
 172	} else {
 173		clk_disable_unprepare(priv->plat->stmmac_clk);
 174		clk_disable_unprepare(priv->plat->pclk);
 175		if (priv->plat->clks_config)
 176			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 177	}
 178
 179	return ret;
 180}
 181EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
 182
 183/**
 184 * stmmac_verify_args - verify the driver parameters.
 185 * Description: it checks the driver parameters and set a default in case of
 186 * errors.
 187 */
 188static void stmmac_verify_args(void)
 189{
 190	if (unlikely(watchdog < 0))
 191		watchdog = TX_TIMEO;
 
 
 
 
 192	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 193		buf_sz = DEFAULT_BUFSIZE;
 194	if (unlikely(flow_ctrl > 1))
 195		flow_ctrl = FLOW_AUTO;
 196	else if (likely(flow_ctrl < 0))
 197		flow_ctrl = FLOW_OFF;
 198	if (unlikely((pause < 0) || (pause > 0xffff)))
 199		pause = PAUSE_TIME;
 200	if (eee_timer < 0)
 201		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 202}
 203
 204static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
 205{
 206	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 207	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 208	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 209	u32 queue;
 210
 211	for (queue = 0; queue < maxq; queue++) {
 212		struct stmmac_channel *ch = &priv->channel[queue];
 213
 214		if (stmmac_xdp_is_enabled(priv) &&
 215		    test_bit(queue, priv->af_xdp_zc_qps)) {
 216			napi_disable(&ch->rxtx_napi);
 217			continue;
 218		}
 219
 220		if (queue < rx_queues_cnt)
 221			napi_disable(&ch->rx_napi);
 222		if (queue < tx_queues_cnt)
 223			napi_disable(&ch->tx_napi);
 224	}
 225}
 226
 227/**
 228 * stmmac_disable_all_queues - Disable all queues
 229 * @priv: driver private structure
 230 */
 231static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 232{
 233	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 234	struct stmmac_rx_queue *rx_q;
 235	u32 queue;
 236
 237	/* synchronize_rcu() needed for pending XDP buffers to drain */
 238	for (queue = 0; queue < rx_queues_cnt; queue++) {
 239		rx_q = &priv->dma_conf.rx_queue[queue];
 240		if (rx_q->xsk_pool) {
 241			synchronize_rcu();
 242			break;
 243		}
 244	}
 245
 246	__stmmac_disable_all_queues(priv);
 247}
 248
 249/**
 250 * stmmac_enable_all_queues - Enable all queues
 251 * @priv: driver private structure
 252 */
 253static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 254{
 255	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 256	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 257	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 258	u32 queue;
 259
 260	for (queue = 0; queue < maxq; queue++) {
 261		struct stmmac_channel *ch = &priv->channel[queue];
 262
 263		if (stmmac_xdp_is_enabled(priv) &&
 264		    test_bit(queue, priv->af_xdp_zc_qps)) {
 265			napi_enable(&ch->rxtx_napi);
 266			continue;
 267		}
 268
 269		if (queue < rx_queues_cnt)
 270			napi_enable(&ch->rx_napi);
 271		if (queue < tx_queues_cnt)
 272			napi_enable(&ch->tx_napi);
 273	}
 274}
 275
 276static void stmmac_service_event_schedule(struct stmmac_priv *priv)
 277{
 278	if (!test_bit(STMMAC_DOWN, &priv->state) &&
 279	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
 280		queue_work(priv->wq, &priv->service_task);
 281}
 282
 283static void stmmac_global_err(struct stmmac_priv *priv)
 284{
 285	netif_carrier_off(priv->dev);
 286	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
 287	stmmac_service_event_schedule(priv);
 288}
 289
 290/**
 291 * stmmac_clk_csr_set - dynamically set the MDC clock
 292 * @priv: driver private structure
 293 * Description: this is to dynamically set the MDC clock according to the csr
 294 * clock input.
 295 * Note:
 296 *	If a specific clk_csr value is passed from the platform
 297 *	this means that the CSR Clock Range selection cannot be
 298 *	changed at run-time and it is fixed (as reported in the driver
 299 *	documentation). Viceversa the driver will try to set the MDC
 300 *	clock dynamically according to the actual clock input.
 301 */
 302static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 303{
 304	u32 clk_rate;
 305
 306	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 307
 308	/* Platform provided default clk_csr would be assumed valid
 309	 * for all other cases except for the below mentioned ones.
 310	 * For values higher than the IEEE 802.3 specified frequency
 311	 * we can not estimate the proper divider as it is not known
 312	 * the frequency of clk_csr_i. So we do not change the default
 313	 * divider.
 314	 */
 315	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 316		if (clk_rate < CSR_F_35M)
 317			priv->clk_csr = STMMAC_CSR_20_35M;
 318		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 319			priv->clk_csr = STMMAC_CSR_35_60M;
 320		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 321			priv->clk_csr = STMMAC_CSR_60_100M;
 322		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 323			priv->clk_csr = STMMAC_CSR_100_150M;
 324		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 325			priv->clk_csr = STMMAC_CSR_150_250M;
 326		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
 327			priv->clk_csr = STMMAC_CSR_250_300M;
 328	}
 329
 330	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
 331		if (clk_rate > 160000000)
 332			priv->clk_csr = 0x03;
 333		else if (clk_rate > 80000000)
 334			priv->clk_csr = 0x02;
 335		else if (clk_rate > 40000000)
 336			priv->clk_csr = 0x01;
 337		else
 338			priv->clk_csr = 0;
 339	}
 340
 341	if (priv->plat->has_xgmac) {
 342		if (clk_rate > 400000000)
 343			priv->clk_csr = 0x5;
 344		else if (clk_rate > 350000000)
 345			priv->clk_csr = 0x4;
 346		else if (clk_rate > 300000000)
 347			priv->clk_csr = 0x3;
 348		else if (clk_rate > 250000000)
 349			priv->clk_csr = 0x2;
 350		else if (clk_rate > 150000000)
 351			priv->clk_csr = 0x1;
 352		else
 353			priv->clk_csr = 0x0;
 354	}
 355}
 356
 357static void print_pkt(unsigned char *buf, int len)
 358{
 359	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 360	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 
 
 
 
 
 
 361}
 362
 363static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 364{
 365	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
 366	u32 avail;
 367
 368	if (tx_q->dirty_tx > tx_q->cur_tx)
 369		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 370	else
 371		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
 372
 373	return avail;
 374}
 375
 376/**
 377 * stmmac_rx_dirty - Get RX queue dirty
 378 * @priv: driver private structure
 379 * @queue: RX queue index
 
 380 */
 381static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 382{
 383	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
 384	u32 dirty;
 385
 386	if (rx_q->dirty_rx <= rx_q->cur_rx)
 387		dirty = rx_q->cur_rx - rx_q->dirty_rx;
 388	else
 389		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
 390
 391	return dirty;
 392}
 393
 394static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
 395{
 396	int tx_lpi_timer;
 397
 398	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
 399	priv->eee_sw_timer_en = en ? 0 : 1;
 400	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
 401	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
 402}
 403
 404/**
 405 * stmmac_enable_eee_mode - check and enter in LPI mode
 406 * @priv: driver private structure
 407 * Description: this function is to verify and enter in LPI mode in case of
 408 * EEE.
 409 */
 410static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
 411{
 412	u32 tx_cnt = priv->plat->tx_queues_to_use;
 413	u32 queue;
 414
 415	/* check if all TX queues have the work finished */
 416	for (queue = 0; queue < tx_cnt; queue++) {
 417		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
 418
 419		if (tx_q->dirty_tx != tx_q->cur_tx)
 420			return -EBUSY; /* still unfinished work */
 421	}
 422
 423	/* Check and enter in LPI mode */
 424	if (!priv->tx_path_in_lpi_mode)
 425		stmmac_set_eee_mode(priv, priv->hw,
 426			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
 427	return 0;
 428}
 429
 430/**
 431 * stmmac_disable_eee_mode - disable and exit from LPI mode
 432 * @priv: driver private structure
 433 * Description: this function is to exit and disable EEE in case of
 434 * LPI state is true. This is called by the xmit.
 435 */
 436void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 437{
 438	if (!priv->eee_sw_timer_en) {
 439		stmmac_lpi_entry_timer_config(priv, 0);
 440		return;
 441	}
 442
 443	stmmac_reset_eee_mode(priv, priv->hw);
 444	del_timer_sync(&priv->eee_ctrl_timer);
 445	priv->tx_path_in_lpi_mode = false;
 446}
 447
 448/**
 449 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 450 * @t:  timer_list struct containing private info
 451 * Description:
 452 *  if there is no data transfer and if we are not in LPI state,
 453 *  then MAC Transmitter can be moved to LPI state.
 454 */
 455static void stmmac_eee_ctrl_timer(struct timer_list *t)
 456{
 457	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 458
 459	if (stmmac_enable_eee_mode(priv))
 460		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
 461}
 462
 463/**
 464 * stmmac_eee_init - init EEE
 465 * @priv: driver private structure
 466 * Description:
 467 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 468 *  can also manage EEE, this function enable the LPI state and start related
 469 *  timer.
 
 470 */
 471bool stmmac_eee_init(struct stmmac_priv *priv)
 472{
 473	int eee_tw_timer = priv->eee_tw_timer;
 
 
 
 
 
 
 
 474
 475	/* Check if MAC core supports the EEE feature. */
 476	if (!priv->dma_cap.eee)
 477		return false;
 478
 479	mutex_lock(&priv->lock);
 480
 481	/* Check if it needs to be deactivated */
 482	if (!priv->eee_active) {
 483		if (priv->eee_enabled) {
 484			netdev_dbg(priv->dev, "disable EEE\n");
 485			stmmac_lpi_entry_timer_config(priv, 0);
 486			del_timer_sync(&priv->eee_ctrl_timer);
 487			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
 488			if (priv->hw->xpcs)
 489				xpcs_config_eee(priv->hw->xpcs,
 490						priv->plat->mult_fact_100ns,
 491						false);
 
 
 492		}
 493		mutex_unlock(&priv->lock);
 494		return false;
 495	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 496
 497	if (priv->eee_active && !priv->eee_enabled) {
 498		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
 499		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
 500				     eee_tw_timer);
 501		if (priv->hw->xpcs)
 502			xpcs_config_eee(priv->hw->xpcs,
 503					priv->plat->mult_fact_100ns,
 504					true);
 505	}
 506
 507	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
 508		del_timer_sync(&priv->eee_ctrl_timer);
 509		priv->tx_path_in_lpi_mode = false;
 510		stmmac_lpi_entry_timer_config(priv, 1);
 511	} else {
 512		stmmac_lpi_entry_timer_config(priv, 0);
 513		mod_timer(&priv->eee_ctrl_timer,
 514			  STMMAC_LPI_T(priv->tx_lpi_timer));
 515	}
 516
 517	mutex_unlock(&priv->lock);
 518	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 519	return true;
 520}
 521
 522/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 523 * @priv: driver private structure
 524 * @p : descriptor pointer
 525 * @skb : the socket buffer
 526 * Description :
 527 * This function will read timestamp from the descriptor & pass it to stack.
 528 * and also perform some sanity checks.
 529 */
 530static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 531				   struct dma_desc *p, struct sk_buff *skb)
 532{
 533	struct skb_shared_hwtstamps shhwtstamp;
 534	bool found = false;
 535	u64 ns = 0;
 536
 537	if (!priv->hwts_tx_en)
 538		return;
 539
 540	/* exit if skb doesn't support hw tstamp */
 541	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 542		return;
 543
 
 
 
 
 
 544	/* check tx tstamp status */
 545	if (stmmac_get_tx_timestamp_status(priv, p)) {
 546		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 547		found = true;
 548	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
 549		found = true;
 550	}
 551
 552	if (found) {
 553		ns -= priv->plat->cdc_error_adj;
 554
 555		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 556		shhwtstamp.hwtstamp = ns_to_ktime(ns);
 
 
 557
 558		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 559		/* pass tstamp to stack */
 560		skb_tstamp_tx(skb, &shhwtstamp);
 561	}
 562}
 563
 564/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 565 * @priv: driver private structure
 566 * @p : descriptor pointer
 567 * @np : next descriptor pointer
 568 * @skb : the socket buffer
 569 * Description :
 570 * This function will read received packet's timestamp from the descriptor
 571 * and pass it to stack. It also perform some sanity checks.
 572 */
 573static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 574				   struct dma_desc *np, struct sk_buff *skb)
 575{
 576	struct skb_shared_hwtstamps *shhwtstamp = NULL;
 577	struct dma_desc *desc = p;
 578	u64 ns = 0;
 579
 580	if (!priv->hwts_rx_en)
 581		return;
 582	/* For GMAC4, the valid timestamp is from CTX next desc. */
 583	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
 584		desc = np;
 585
 586	/* Check if timestamp is available */
 587	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
 588		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 
 589
 590		ns -= priv->plat->cdc_error_adj;
 
 
 591
 592		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 593		shhwtstamp = skb_hwtstamps(skb);
 594		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 595		shhwtstamp->hwtstamp = ns_to_ktime(ns);
 596	} else  {
 597		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 598	}
 599}
 600
 601/**
 602 *  stmmac_hwtstamp_set - control hardware timestamping.
 603 *  @dev: device pointer.
 604 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 605 *  a proprietary structure used to pass information to the driver.
 606 *  Description:
 607 *  This function configures the MAC to enable/disable both outgoing(TX)
 608 *  and incoming(RX) packets time stamping based on user input.
 609 *  Return Value:
 610 *  0 on success and an appropriate -ve integer on failure.
 611 */
 612static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 613{
 614	struct stmmac_priv *priv = netdev_priv(dev);
 615	struct hwtstamp_config config;
 
 
 616	u32 ptp_v2 = 0;
 617	u32 tstamp_all = 0;
 618	u32 ptp_over_ipv4_udp = 0;
 619	u32 ptp_over_ipv6_udp = 0;
 620	u32 ptp_over_ethernet = 0;
 621	u32 snap_type_sel = 0;
 622	u32 ts_master_en = 0;
 623	u32 ts_event_en = 0;
 
 624
 625	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 626		netdev_alert(priv->dev, "No support for HW time stamping\n");
 627		priv->hwts_tx_en = 0;
 628		priv->hwts_rx_en = 0;
 629
 630		return -EOPNOTSUPP;
 631	}
 632
 633	if (copy_from_user(&config, ifr->ifr_data,
 634			   sizeof(config)))
 635		return -EFAULT;
 636
 637	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 638		   __func__, config.flags, config.tx_type, config.rx_filter);
 
 
 
 
 639
 640	if (config.tx_type != HWTSTAMP_TX_OFF &&
 641	    config.tx_type != HWTSTAMP_TX_ON)
 642		return -ERANGE;
 643
 644	if (priv->adv_ts) {
 645		switch (config.rx_filter) {
 646		case HWTSTAMP_FILTER_NONE:
 647			/* time stamp no incoming packet at all */
 648			config.rx_filter = HWTSTAMP_FILTER_NONE;
 649			break;
 650
 651		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 652			/* PTP v1, UDP, any kind of event packet */
 653			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 654			/* 'xmac' hardware can support Sync, Pdelay_Req and
 655			 * Pdelay_resp by setting bit14 and bits17/16 to 01
 656			 * This leaves Delay_Req timestamps out.
 657			 * Enable all events *and* general purpose message
 658			 * timestamping
 659			 */
 660			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 661			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 662			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 663			break;
 664
 665		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 666			/* PTP v1, UDP, Sync packet */
 667			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 668			/* take time stamp for SYNC messages only */
 669			ts_event_en = PTP_TCR_TSEVNTENA;
 670
 671			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 672			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 673			break;
 674
 675		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 676			/* PTP v1, UDP, Delay_req packet */
 677			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 678			/* take time stamp for Delay_Req messages only */
 679			ts_master_en = PTP_TCR_TSMSTRENA;
 680			ts_event_en = PTP_TCR_TSEVNTENA;
 681
 682			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 683			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 684			break;
 685
 686		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 687			/* PTP v2, UDP, any kind of event packet */
 688			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 689			ptp_v2 = PTP_TCR_TSVER2ENA;
 690			/* take time stamp for all event messages */
 691			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 692
 693			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 694			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 695			break;
 696
 697		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 698			/* PTP v2, UDP, Sync packet */
 699			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 700			ptp_v2 = PTP_TCR_TSVER2ENA;
 701			/* take time stamp for SYNC messages only */
 702			ts_event_en = PTP_TCR_TSEVNTENA;
 703
 704			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 705			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 706			break;
 707
 708		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 709			/* PTP v2, UDP, Delay_req packet */
 710			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 711			ptp_v2 = PTP_TCR_TSVER2ENA;
 712			/* take time stamp for Delay_Req messages only */
 713			ts_master_en = PTP_TCR_TSMSTRENA;
 714			ts_event_en = PTP_TCR_TSEVNTENA;
 715
 716			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 717			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 718			break;
 719
 720		case HWTSTAMP_FILTER_PTP_V2_EVENT:
 721			/* PTP v2/802.AS1 any layer, any kind of event packet */
 722			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 723			ptp_v2 = PTP_TCR_TSVER2ENA;
 
 724			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 725			if (priv->synopsys_id < DWMAC_CORE_4_10)
 726				ts_event_en = PTP_TCR_TSEVNTENA;
 727			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 728			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 729			ptp_over_ethernet = PTP_TCR_TSIPENA;
 730			break;
 731
 732		case HWTSTAMP_FILTER_PTP_V2_SYNC:
 733			/* PTP v2/802.AS1, any layer, Sync packet */
 734			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 735			ptp_v2 = PTP_TCR_TSVER2ENA;
 736			/* take time stamp for SYNC messages only */
 737			ts_event_en = PTP_TCR_TSEVNTENA;
 738
 739			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 740			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 741			ptp_over_ethernet = PTP_TCR_TSIPENA;
 742			break;
 743
 744		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 745			/* PTP v2/802.AS1, any layer, Delay_req packet */
 746			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 747			ptp_v2 = PTP_TCR_TSVER2ENA;
 748			/* take time stamp for Delay_Req messages only */
 749			ts_master_en = PTP_TCR_TSMSTRENA;
 750			ts_event_en = PTP_TCR_TSEVNTENA;
 751
 752			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 753			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 754			ptp_over_ethernet = PTP_TCR_TSIPENA;
 755			break;
 756
 757		case HWTSTAMP_FILTER_NTP_ALL:
 758		case HWTSTAMP_FILTER_ALL:
 759			/* time stamp any incoming packet */
 760			config.rx_filter = HWTSTAMP_FILTER_ALL;
 761			tstamp_all = PTP_TCR_TSENALL;
 762			break;
 763
 764		default:
 765			return -ERANGE;
 766		}
 767	} else {
 768		switch (config.rx_filter) {
 769		case HWTSTAMP_FILTER_NONE:
 770			config.rx_filter = HWTSTAMP_FILTER_NONE;
 771			break;
 772		default:
 773			/* PTP v1, UDP, any kind of event packet */
 774			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 775			break;
 776		}
 777	}
 778	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 779	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 780
 781	priv->systime_flags = STMMAC_HWTS_ACTIVE;
 782
 783	if (priv->hwts_tx_en || priv->hwts_rx_en) {
 784		priv->systime_flags |= tstamp_all | ptp_v2 |
 785				       ptp_over_ethernet | ptp_over_ipv6_udp |
 786				       ptp_over_ipv4_udp | ts_event_en |
 787				       ts_master_en | snap_type_sel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788	}
 789
 790	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
 791
 792	memcpy(&priv->tstamp_config, &config, sizeof(config));
 793
 794	return copy_to_user(ifr->ifr_data, &config,
 795			    sizeof(config)) ? -EFAULT : 0;
 796}
 797
 798/**
 799 *  stmmac_hwtstamp_get - read hardware timestamping.
 800 *  @dev: device pointer.
 801 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 802 *  a proprietary structure used to pass information to the driver.
 803 *  Description:
 804 *  This function obtain the current hardware timestamping settings
 805 *  as requested.
 806 */
 807static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 808{
 809	struct stmmac_priv *priv = netdev_priv(dev);
 810	struct hwtstamp_config *config = &priv->tstamp_config;
 811
 812	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 813		return -EOPNOTSUPP;
 814
 815	return copy_to_user(ifr->ifr_data, config,
 816			    sizeof(*config)) ? -EFAULT : 0;
 817}
 818
 819/**
 820 * stmmac_init_tstamp_counter - init hardware timestamping counter
 821 * @priv: driver private structure
 822 * @systime_flags: timestamping flags
 823 * Description:
 824 * Initialize hardware counter for packet timestamping.
 825 * This is valid as long as the interface is open and not suspended.
 826 * Will be rerun after resuming from suspend, case in which the timestamping
 827 * flags updated by stmmac_hwtstamp_set() also need to be restored.
 828 */
 829int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
 830{
 831	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 832	struct timespec64 now;
 833	u32 sec_inc = 0;
 834	u64 temp = 0;
 835
 836	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 837		return -EOPNOTSUPP;
 838
 839	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
 840	priv->systime_flags = systime_flags;
 841
 842	/* program Sub Second Increment reg */
 843	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
 844					   priv->plat->clk_ptp_rate,
 845					   xmac, &sec_inc);
 846	temp = div_u64(1000000000ULL, sec_inc);
 847
 848	/* Store sub second increment for later use */
 849	priv->sub_second_inc = sec_inc;
 850
 851	/* calculate default added value:
 852	 * formula is :
 853	 * addend = (2^32)/freq_div_ratio;
 854	 * where, freq_div_ratio = 1e9ns/sec_inc
 855	 */
 856	temp = (u64)(temp << 32);
 857	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 858	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 859
 860	/* initialize system time */
 861	ktime_get_real_ts64(&now);
 862
 863	/* lower 32 bits of tv_sec are safe until y2106 */
 864	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
 865
 866	return 0;
 867}
 868EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
 869
 870/**
 871 * stmmac_init_ptp - init PTP
 872 * @priv: driver private structure
 873 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 874 * This is done by looking at the HW cap. register.
 875 * This function also registers the ptp driver.
 876 */
 877static int stmmac_init_ptp(struct stmmac_priv *priv)
 878{
 879	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 880	int ret;
 881
 882	if (priv->plat->ptp_clk_freq_config)
 883		priv->plat->ptp_clk_freq_config(priv);
 884
 885	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
 886	if (ret)
 887		return ret;
 888
 889	priv->adv_ts = 0;
 890	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
 891	if (xmac && priv->dma_cap.atime_stamp)
 892		priv->adv_ts = 1;
 893	/* Dwmac 3.x core with extend_desc can support adv_ts */
 894	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 895		priv->adv_ts = 1;
 896
 897	if (priv->dma_cap.time_stamp)
 898		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 899
 900	if (priv->adv_ts)
 901		netdev_info(priv->dev,
 902			    "IEEE 1588-2008 Advanced Timestamp supported\n");
 903
 
 904	priv->hwts_tx_en = 0;
 905	priv->hwts_rx_en = 0;
 906
 907	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
 908		stmmac_hwtstamp_correct_latency(priv, priv);
 909
 910	return 0;
 911}
 912
 913static void stmmac_release_ptp(struct stmmac_priv *priv)
 914{
 915	clk_disable_unprepare(priv->plat->clk_ptp_ref);
 916	stmmac_ptp_unregister(priv);
 917}
 918
 919/**
 920 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 921 *  @priv: driver private structure
 922 *  @duplex: duplex passed to the next function
 923 *  Description: It is used for configuring the flow control in all queues
 924 */
 925static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 926{
 927	u32 tx_cnt = priv->plat->tx_queues_to_use;
 928
 929	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
 930			priv->pause, tx_cnt);
 931}
 932
 933static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
 934					 phy_interface_t interface)
 935{
 936	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 937
 938	/* Refresh the MAC-specific capabilities */
 939	stmmac_mac_update_caps(priv);
 940
 941	config->mac_capabilities = priv->hw->link.caps;
 
 942
 943	if (priv->plat->max_speed)
 944		phylink_limit_mac_speed(config, priv->plat->max_speed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945
 946	return config->mac_capabilities;
 947}
 948
 949static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
 950						 phy_interface_t interface)
 951{
 952	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 953	struct phylink_pcs *pcs;
 954
 955	if (priv->plat->select_pcs) {
 956		pcs = priv->plat->select_pcs(priv, interface);
 957		if (!IS_ERR(pcs))
 958			return pcs;
 
 
 
 
 
 959	}
 960
 961	return NULL;
 962}
 963
 964static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
 965			      const struct phylink_link_state *state)
 966{
 967	/* Nothing to do, xpcs_config() handles everything */
 968}
 969
 970static void stmmac_mac_link_down(struct phylink_config *config,
 971				 unsigned int mode, phy_interface_t interface)
 972{
 973	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 974
 975	stmmac_mac_set(priv, priv->ioaddr, false);
 976	priv->eee_active = false;
 977	priv->tx_lpi_enabled = false;
 978	priv->eee_enabled = stmmac_eee_init(priv);
 979	stmmac_set_eee_pls(priv, priv->hw, false);
 980
 981	if (stmmac_fpe_supported(priv))
 982		stmmac_fpe_link_state_handle(priv, false);
 983}
 984
 985static void stmmac_mac_link_up(struct phylink_config *config,
 986			       struct phy_device *phy,
 987			       unsigned int mode, phy_interface_t interface,
 988			       int speed, int duplex,
 989			       bool tx_pause, bool rx_pause)
 990{
 991	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 992	u32 old_ctrl, ctrl;
 993
 994	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
 995	    priv->plat->serdes_powerup)
 996		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
 997
 998	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
 999	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1000
1001	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1002		switch (speed) {
1003		case SPEED_10000:
1004			ctrl |= priv->hw->link.xgmii.speed10000;
1005			break;
1006		case SPEED_5000:
1007			ctrl |= priv->hw->link.xgmii.speed5000;
1008			break;
1009		case SPEED_2500:
1010			ctrl |= priv->hw->link.xgmii.speed2500;
1011			break;
1012		default:
1013			return;
1014		}
1015	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1016		switch (speed) {
1017		case SPEED_100000:
1018			ctrl |= priv->hw->link.xlgmii.speed100000;
1019			break;
1020		case SPEED_50000:
1021			ctrl |= priv->hw->link.xlgmii.speed50000;
1022			break;
1023		case SPEED_40000:
1024			ctrl |= priv->hw->link.xlgmii.speed40000;
1025			break;
1026		case SPEED_25000:
1027			ctrl |= priv->hw->link.xlgmii.speed25000;
1028			break;
1029		case SPEED_10000:
1030			ctrl |= priv->hw->link.xgmii.speed10000;
1031			break;
1032		case SPEED_2500:
1033			ctrl |= priv->hw->link.speed2500;
1034			break;
1035		case SPEED_1000:
1036			ctrl |= priv->hw->link.speed1000;
1037			break;
1038		default:
1039			return;
1040		}
1041	} else {
1042		switch (speed) {
1043		case SPEED_2500:
1044			ctrl |= priv->hw->link.speed2500;
1045			break;
1046		case SPEED_1000:
1047			ctrl |= priv->hw->link.speed1000;
1048			break;
1049		case SPEED_100:
1050			ctrl |= priv->hw->link.speed100;
1051			break;
1052		case SPEED_10:
1053			ctrl |= priv->hw->link.speed10;
1054			break;
1055		default:
1056			return;
1057		}
1058	}
1059
1060	priv->speed = speed;
1061
1062	if (priv->plat->fix_mac_speed)
1063		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1064
1065	if (!duplex)
1066		ctrl &= ~priv->hw->link.duplex;
1067	else
1068		ctrl |= priv->hw->link.duplex;
1069
1070	/* Flow Control operation */
1071	if (rx_pause && tx_pause)
1072		priv->flow_ctrl = FLOW_AUTO;
1073	else if (rx_pause && !tx_pause)
1074		priv->flow_ctrl = FLOW_RX;
1075	else if (!rx_pause && tx_pause)
1076		priv->flow_ctrl = FLOW_TX;
1077	else
1078		priv->flow_ctrl = FLOW_OFF;
1079
1080	stmmac_mac_flow_ctrl(priv, duplex);
1081
1082	if (ctrl != old_ctrl)
1083		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084
1085	stmmac_mac_set(priv, priv->ioaddr, true);
1086	if (phy && priv->dma_cap.eee) {
1087		priv->eee_active =
1088			phy_init_eee(phy, !(priv->plat->flags &
1089				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1090		priv->eee_enabled = stmmac_eee_init(priv);
1091		priv->tx_lpi_enabled = priv->eee_enabled;
1092		stmmac_set_eee_pls(priv, priv->hw, true);
1093	}
1094
1095	if (stmmac_fpe_supported(priv))
1096		stmmac_fpe_link_state_handle(priv, true);
1097
1098	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1099		stmmac_hwtstamp_correct_latency(priv, priv);
1100}
1101
1102static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103	.mac_get_caps = stmmac_mac_get_caps,
1104	.mac_select_pcs = stmmac_mac_select_pcs,
1105	.mac_config = stmmac_mac_config,
1106	.mac_link_down = stmmac_mac_link_down,
1107	.mac_link_up = stmmac_mac_link_up,
1108};
1109
1110/**
1111 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1112 * @priv: driver private structure
1113 * Description: this is to verify if the HW supports the PCS.
1114 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1115 * configured for the TBI, RTBI, or SGMII PHY interface.
1116 */
1117static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1118{
1119	int interface = priv->plat->mac_interface;
1120
1121	if (priv->dma_cap.pcs) {
1122		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1123		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1124		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1125		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1126			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1127			priv->hw->pcs = STMMAC_PCS_RGMII;
1128		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1129			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1130			priv->hw->pcs = STMMAC_PCS_SGMII;
1131		}
1132	}
1133}
1134
1135/**
1136 * stmmac_init_phy - PHY initialization
1137 * @dev: net device structure
1138 * Description: it initializes the driver's PHY state, and attaches the PHY
1139 * to the mac driver.
1140 *  Return value:
1141 *  0 on success
1142 */
1143static int stmmac_init_phy(struct net_device *dev)
1144{
1145	struct stmmac_priv *priv = netdev_priv(dev);
1146	struct fwnode_handle *phy_fwnode;
1147	struct fwnode_handle *fwnode;
1148	int ret;
 
 
 
 
 
1149
1150	if (!phylink_expects_phy(priv->phylink))
1151		return 0;
1152
1153	fwnode = priv->plat->port_node;
1154	if (!fwnode)
1155		fwnode = dev_fwnode(priv->device);
1156
1157	if (fwnode)
1158		phy_fwnode = fwnode_get_phy_node(fwnode);
1159	else
1160		phy_fwnode = NULL;
 
1161
1162	/* Some DT bindings do not set-up the PHY handle. Let's try to
1163	 * manually parse it
1164	 */
1165	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1166		int addr = priv->plat->phy_addr;
1167		struct phy_device *phydev;
1168
1169		if (addr < 0) {
1170			netdev_err(priv->dev, "no phy found\n");
1171			return -ENODEV;
1172		}
1173
1174		phydev = mdiobus_get_phy(priv->mii, addr);
1175		if (!phydev) {
1176			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1177			return -ENODEV;
1178		}
1179
1180		if (priv->dma_cap.eee)
1181			phy_support_eee(phydev);
1182
1183		ret = phylink_connect_phy(priv->phylink, phydev);
1184	} else {
1185		fwnode_handle_put(phy_fwnode);
1186		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187	}
1188
1189	if (!priv->plat->pmt) {
1190		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
 
 
 
 
1191
1192		phylink_ethtool_get_wol(priv->phylink, &wol);
1193		device_set_wakeup_capable(priv->device, !!wol.supported);
1194		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195	}
1196
1197	return ret;
1198}
1199
1200static int stmmac_phy_setup(struct stmmac_priv *priv)
1201{
1202	struct stmmac_mdio_bus_data *mdio_bus_data;
1203	int mode = priv->plat->phy_interface;
1204	struct fwnode_handle *fwnode;
1205	struct phylink *phylink;
1206
1207	priv->phylink_config.dev = &priv->dev->dev;
1208	priv->phylink_config.type = PHYLINK_NETDEV;
1209	priv->phylink_config.mac_managed_pm = true;
1210
1211	/* Stmmac always requires an RX clock for hardware initialization */
1212	priv->phylink_config.mac_requires_rxc = true;
1213
1214	mdio_bus_data = priv->plat->mdio_bus_data;
1215	if (mdio_bus_data)
1216		priv->phylink_config.default_an_inband =
1217			mdio_bus_data->default_an_inband;
1218
1219	/* Set the platform/firmware specified interface mode. Note, phylink
1220	 * deals with the PHY interface mode, not the MAC interface mode.
1221	 */
1222	__set_bit(mode, priv->phylink_config.supported_interfaces);
 
 
 
 
 
1223
1224	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1225	if (priv->hw->xpcs)
1226		xpcs_get_interfaces(priv->hw->xpcs,
1227				    priv->phylink_config.supported_interfaces);
1228
1229	fwnode = priv->plat->port_node;
1230	if (!fwnode)
1231		fwnode = dev_fwnode(priv->device);
1232
1233	phylink = phylink_create(&priv->phylink_config, fwnode,
1234				 mode, &stmmac_phylink_mac_ops);
1235	if (IS_ERR(phylink))
1236		return PTR_ERR(phylink);
1237
1238	priv->phylink = phylink;
1239	return 0;
1240}
1241
1242static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1243				    struct stmmac_dma_conf *dma_conf)
 
 
 
 
 
 
1244{
1245	u32 rx_cnt = priv->plat->rx_queues_to_use;
1246	unsigned int desc_size;
1247	void *head_rx;
1248	u32 queue;
1249
1250	/* Display RX rings */
1251	for (queue = 0; queue < rx_cnt; queue++) {
1252		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1253
1254		pr_info("\tRX Queue %u rings\n", queue);
1255
1256		if (priv->extend_desc) {
1257			head_rx = (void *)rx_q->dma_erx;
1258			desc_size = sizeof(struct dma_extended_desc);
 
 
 
 
 
 
1259		} else {
1260			head_rx = (void *)rx_q->dma_rx;
1261			desc_size = sizeof(struct dma_desc);
 
 
 
 
1262		}
1263
1264		/* Display RX ring */
1265		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1266				    rx_q->dma_rx_phy, desc_size);
1267	}
1268}
1269
1270static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1271				    struct stmmac_dma_conf *dma_conf)
1272{
1273	u32 tx_cnt = priv->plat->tx_queues_to_use;
1274	unsigned int desc_size;
1275	void *head_tx;
1276	u32 queue;
1277
1278	/* Display TX rings */
1279	for (queue = 0; queue < tx_cnt; queue++) {
1280		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1281
1282		pr_info("\tTX Queue %d rings\n", queue);
1283
1284		if (priv->extend_desc) {
1285			head_tx = (void *)tx_q->dma_etx;
1286			desc_size = sizeof(struct dma_extended_desc);
1287		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1288			head_tx = (void *)tx_q->dma_entx;
1289			desc_size = sizeof(struct dma_edesc);
1290		} else {
1291			head_tx = (void *)tx_q->dma_tx;
1292			desc_size = sizeof(struct dma_desc);
1293		}
1294
1295		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1296				    tx_q->dma_tx_phy, desc_size);
1297	}
1298}
1299
1300static void stmmac_display_rings(struct stmmac_priv *priv,
1301				 struct stmmac_dma_conf *dma_conf)
1302{
1303	/* Display RX ring */
1304	stmmac_display_rx_rings(priv, dma_conf);
1305
1306	/* Display TX ring */
1307	stmmac_display_tx_rings(priv, dma_conf);
1308}
1309
1310static int stmmac_set_bfsize(int mtu, int bufsize)
1311{
1312	int ret = bufsize;
1313
1314	if (mtu >= BUF_SIZE_8KiB)
1315		ret = BUF_SIZE_16KiB;
1316	else if (mtu >= BUF_SIZE_4KiB)
1317		ret = BUF_SIZE_8KiB;
1318	else if (mtu >= BUF_SIZE_2KiB)
1319		ret = BUF_SIZE_4KiB;
1320	else if (mtu > DEFAULT_BUFSIZE)
1321		ret = BUF_SIZE_2KiB;
1322	else
1323		ret = DEFAULT_BUFSIZE;
1324
1325	return ret;
1326}
1327
1328/**
1329 * stmmac_clear_rx_descriptors - clear RX descriptors
1330 * @priv: driver private structure
1331 * @dma_conf: structure to take the dma data
1332 * @queue: RX queue index
1333 * Description: this function is called to clear the RX descriptors
1334 * in case of both basic and extended descriptors are used.
1335 */
1336static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1337					struct stmmac_dma_conf *dma_conf,
1338					u32 queue)
1339{
1340	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1341	int i;
 
 
1342
1343	/* Clear the RX descriptors */
1344	for (i = 0; i < dma_conf->dma_rx_size; i++)
1345		if (priv->extend_desc)
1346			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1347					priv->use_riwt, priv->mode,
1348					(i == dma_conf->dma_rx_size - 1),
1349					dma_conf->dma_buf_sz);
1350		else
1351			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1352					priv->use_riwt, priv->mode,
1353					(i == dma_conf->dma_rx_size - 1),
1354					dma_conf->dma_buf_sz);
1355}
1356
1357/**
1358 * stmmac_clear_tx_descriptors - clear tx descriptors
1359 * @priv: driver private structure
1360 * @dma_conf: structure to take the dma data
1361 * @queue: TX queue index.
1362 * Description: this function is called to clear the TX descriptors
1363 * in case of both basic and extended descriptors are used.
1364 */
1365static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1366					struct stmmac_dma_conf *dma_conf,
1367					u32 queue)
1368{
1369	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1370	int i;
1371
1372	/* Clear the TX descriptors */
1373	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1374		int last = (i == (dma_conf->dma_tx_size - 1));
1375		struct dma_desc *p;
1376
1377		if (priv->extend_desc)
1378			p = &tx_q->dma_etx[i].basic;
1379		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1380			p = &tx_q->dma_entx[i].basic;
1381		else
1382			p = &tx_q->dma_tx[i];
1383
1384		stmmac_init_tx_desc(priv, p, priv->mode, last);
1385	}
1386}
1387
1388/**
1389 * stmmac_clear_descriptors - clear descriptors
1390 * @priv: driver private structure
1391 * @dma_conf: structure to take the dma data
1392 * Description: this function is called to clear the TX and RX descriptors
1393 * in case of both basic and extended descriptors are used.
1394 */
1395static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1396				     struct stmmac_dma_conf *dma_conf)
1397{
1398	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1399	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1400	u32 queue;
1401
1402	/* Clear the RX descriptors */
1403	for (queue = 0; queue < rx_queue_cnt; queue++)
1404		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1405
1406	/* Clear the TX descriptors */
1407	for (queue = 0; queue < tx_queue_cnt; queue++)
1408		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1409}
1410
1411/**
1412 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * @p: descriptor pointer
1416 * @i: descriptor index
1417 * @flags: gfp flag
1418 * @queue: RX queue index
1419 * Description: this function is called to allocate a receive buffer, perform
1420 * the DMA mapping and init the descriptor.
1421 */
1422static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1423				  struct stmmac_dma_conf *dma_conf,
1424				  struct dma_desc *p,
1425				  int i, gfp_t flags, u32 queue)
1426{
1427	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1428	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1429	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1430
1431	if (priv->dma_cap.host_dma_width <= 32)
1432		gfp |= GFP_DMA32;
1433
1434	if (!buf->page) {
1435		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1436		if (!buf->page)
1437			return -ENOMEM;
1438		buf->page_offset = stmmac_rx_offset(priv);
1439	}
1440
1441	if (priv->sph && !buf->sec_page) {
1442		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1443		if (!buf->sec_page)
1444			return -ENOMEM;
1445
1446		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1447		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1448	} else {
1449		buf->sec_page = NULL;
1450		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
 
 
 
 
 
 
 
 
 
 
1451	}
1452
1453	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1454
1455	stmmac_set_desc_addr(priv, p, buf->addr);
1456	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1457		stmmac_init_desc3(priv, p);
1458
1459	return 0;
1460}
1461
1462/**
1463 * stmmac_free_rx_buffer - free RX dma buffers
1464 * @priv: private structure
1465 * @rx_q: RX queue
1466 * @i: buffer index.
1467 */
1468static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1469				  struct stmmac_rx_queue *rx_q,
1470				  int i)
1471{
1472	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1473
1474	if (buf->page)
1475		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1476	buf->page = NULL;
1477
1478	if (buf->sec_page)
1479		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1480	buf->sec_page = NULL;
1481}
1482
1483/**
1484 * stmmac_free_tx_buffer - free RX dma buffers
1485 * @priv: private structure
1486 * @dma_conf: structure to take the dma data
1487 * @queue: RX queue index
1488 * @i: buffer index.
1489 */
1490static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1491				  struct stmmac_dma_conf *dma_conf,
1492				  u32 queue, int i)
1493{
1494	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1495
1496	if (tx_q->tx_skbuff_dma[i].buf &&
1497	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1498		if (tx_q->tx_skbuff_dma[i].map_as_page)
1499			dma_unmap_page(priv->device,
1500				       tx_q->tx_skbuff_dma[i].buf,
1501				       tx_q->tx_skbuff_dma[i].len,
1502				       DMA_TO_DEVICE);
1503		else
1504			dma_unmap_single(priv->device,
1505					 tx_q->tx_skbuff_dma[i].buf,
1506					 tx_q->tx_skbuff_dma[i].len,
1507					 DMA_TO_DEVICE);
1508	}
1509
1510	if (tx_q->xdpf[i] &&
1511	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1512	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1513		xdp_return_frame(tx_q->xdpf[i]);
1514		tx_q->xdpf[i] = NULL;
1515	}
1516
1517	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1518		tx_q->xsk_frames_done++;
1519
1520	if (tx_q->tx_skbuff[i] &&
1521	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1522		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1523		tx_q->tx_skbuff[i] = NULL;
1524	}
1525
1526	tx_q->tx_skbuff_dma[i].buf = 0;
1527	tx_q->tx_skbuff_dma[i].map_as_page = false;
1528}
1529
1530/**
1531 * dma_free_rx_skbufs - free RX dma buffers
1532 * @priv: private structure
1533 * @dma_conf: structure to take the dma data
1534 * @queue: RX queue index
 
1535 */
1536static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1537			       struct stmmac_dma_conf *dma_conf,
1538			       u32 queue)
1539{
1540	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1541	int i;
1542
1543	for (i = 0; i < dma_conf->dma_rx_size; i++)
1544		stmmac_free_rx_buffer(priv, rx_q, i);
1545}
1546
1547static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1548				   struct stmmac_dma_conf *dma_conf,
1549				   u32 queue, gfp_t flags)
1550{
1551	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1552	int i;
 
 
 
 
 
1553
1554	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1555		struct dma_desc *p;
1556		int ret;
1557
1558		if (priv->extend_desc)
1559			p = &((rx_q->dma_erx + i)->basic);
1560		else
1561			p = rx_q->dma_rx + i;
1562
1563		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1564					     queue);
1565		if (ret)
1566			return ret;
1567
1568		rx_q->buf_alloc_num++;
1569	}
1570
1571	return 0;
1572}
1573
1574/**
1575 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1576 * @priv: private structure
1577 * @dma_conf: structure to take the dma data
1578 * @queue: RX queue index
1579 */
1580static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1581				struct stmmac_dma_conf *dma_conf,
1582				u32 queue)
1583{
1584	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1585	int i;
1586
1587	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
 
1589
1590		if (!buf->xdp)
1591			continue;
 
1592
1593		xsk_buff_free(buf->xdp);
1594		buf->xdp = NULL;
1595	}
1596}
1597
1598static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1599				      struct stmmac_dma_conf *dma_conf,
1600				      u32 queue)
1601{
1602	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1603	int i;
1604
1605	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1606	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1607	 * use this macro to make sure no size violations.
1608	 */
1609	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1610
1611	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1612		struct stmmac_rx_buffer *buf;
1613		dma_addr_t dma_addr;
1614		struct dma_desc *p;
1615
1616		if (priv->extend_desc)
1617			p = (struct dma_desc *)(rx_q->dma_erx + i);
1618		else
1619			p = rx_q->dma_rx + i;
1620
1621		buf = &rx_q->buf_pool[i];
1622
1623		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1624		if (!buf->xdp)
1625			return -ENOMEM;
1626
1627		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1628		stmmac_set_desc_addr(priv, p, dma_addr);
1629		rx_q->buf_alloc_num++;
1630	}
1631
1632	return 0;
1633}
1634
1635static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1636{
1637	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1638		return NULL;
1639
1640	return xsk_get_pool_from_qid(priv->dev, queue);
1641}
1642
1643/**
1644 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1645 * @priv: driver private structure
1646 * @dma_conf: structure to take the dma data
1647 * @queue: RX queue index
1648 * @flags: gfp flag.
1649 * Description: this function initializes the DMA RX descriptors
1650 * and allocates the socket buffers. It supports the chained and ring
1651 * modes.
1652 */
1653static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1654				    struct stmmac_dma_conf *dma_conf,
1655				    u32 queue, gfp_t flags)
1656{
1657	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1658	int ret;
1659
1660	netif_dbg(priv, probe, priv->dev,
1661		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1662		  (u32)rx_q->dma_rx_phy);
1663
1664	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1665
1666	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1667
1668	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1669
1670	if (rx_q->xsk_pool) {
1671		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1672						   MEM_TYPE_XSK_BUFF_POOL,
1673						   NULL));
1674		netdev_info(priv->dev,
1675			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1676			    rx_q->queue_index);
1677		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1678	} else {
1679		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680						   MEM_TYPE_PAGE_POOL,
1681						   rx_q->page_pool));
1682		netdev_info(priv->dev,
1683			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1684			    rx_q->queue_index);
1685	}
1686
1687	if (rx_q->xsk_pool) {
1688		/* RX XDP ZC buffer pool may not be populated, e.g.
1689		 * xdpsock TX-only.
1690		 */
1691		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1692	} else {
1693		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1694		if (ret < 0)
1695			return -ENOMEM;
1696	}
1697
1698	/* Setup the chained descriptor addresses */
1699	if (priv->mode == STMMAC_CHAIN_MODE) {
1700		if (priv->extend_desc)
1701			stmmac_mode_init(priv, rx_q->dma_erx,
1702					 rx_q->dma_rx_phy,
1703					 dma_conf->dma_rx_size, 1);
1704		else
1705			stmmac_mode_init(priv, rx_q->dma_rx,
1706					 rx_q->dma_rx_phy,
1707					 dma_conf->dma_rx_size, 0);
1708	}
1709
1710	return 0;
1711}
1712
1713static int init_dma_rx_desc_rings(struct net_device *dev,
1714				  struct stmmac_dma_conf *dma_conf,
1715				  gfp_t flags)
1716{
1717	struct stmmac_priv *priv = netdev_priv(dev);
1718	u32 rx_count = priv->plat->rx_queues_to_use;
1719	int queue;
1720	int ret;
1721
1722	/* RX INITIALIZATION */
1723	netif_dbg(priv, probe, priv->dev,
1724		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1725
1726	for (queue = 0; queue < rx_count; queue++) {
1727		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1728		if (ret)
1729			goto err_init_rx_buffers;
1730	}
1731
1732	return 0;
1733
1734err_init_rx_buffers:
1735	while (queue >= 0) {
1736		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1737
1738		if (rx_q->xsk_pool)
1739			dma_free_rx_xskbufs(priv, dma_conf, queue);
1740		else
1741			dma_free_rx_skbufs(priv, dma_conf, queue);
1742
1743		rx_q->buf_alloc_num = 0;
1744		rx_q->xsk_pool = NULL;
1745
1746		queue--;
1747	}
1748
1749	return ret;
1750}
1751
1752/**
1753 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1754 * @priv: driver private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: TX queue index
1757 * Description: this function initializes the DMA TX descriptors
1758 * and allocates the socket buffers. It supports the chained and ring
1759 * modes.
1760 */
1761static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1762				    struct stmmac_dma_conf *dma_conf,
1763				    u32 queue)
1764{
1765	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1766	int i;
1767
1768	netif_dbg(priv, probe, priv->dev,
1769		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1770		  (u32)tx_q->dma_tx_phy);
1771
1772	/* Setup the chained descriptor addresses */
1773	if (priv->mode == STMMAC_CHAIN_MODE) {
1774		if (priv->extend_desc)
1775			stmmac_mode_init(priv, tx_q->dma_etx,
1776					 tx_q->dma_tx_phy,
1777					 dma_conf->dma_tx_size, 1);
1778		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1779			stmmac_mode_init(priv, tx_q->dma_tx,
1780					 tx_q->dma_tx_phy,
1781					 dma_conf->dma_tx_size, 0);
 
 
 
1782	}
1783
1784	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1785
1786	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1787		struct dma_desc *p;
1788
1789		if (priv->extend_desc)
1790			p = &((tx_q->dma_etx + i)->basic);
1791		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1792			p = &((tx_q->dma_entx + i)->basic);
1793		else
1794			p = tx_q->dma_tx + i;
1795
1796		stmmac_clear_desc(priv, p);
1797
1798		tx_q->tx_skbuff_dma[i].buf = 0;
1799		tx_q->tx_skbuff_dma[i].map_as_page = false;
1800		tx_q->tx_skbuff_dma[i].len = 0;
1801		tx_q->tx_skbuff_dma[i].last_segment = false;
1802		tx_q->tx_skbuff[i] = NULL;
1803	}
1804
1805	return 0;
1806}
1807
1808static int init_dma_tx_desc_rings(struct net_device *dev,
1809				  struct stmmac_dma_conf *dma_conf)
1810{
1811	struct stmmac_priv *priv = netdev_priv(dev);
1812	u32 tx_queue_cnt;
1813	u32 queue;
1814
1815	tx_queue_cnt = priv->plat->tx_queues_to_use;
1816
1817	for (queue = 0; queue < tx_queue_cnt; queue++)
1818		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1819
1820	return 0;
1821}
1822
1823/**
1824 * init_dma_desc_rings - init the RX/TX descriptor rings
1825 * @dev: net device structure
1826 * @dma_conf: structure to take the dma data
1827 * @flags: gfp flag.
1828 * Description: this function initializes the DMA RX/TX descriptors
1829 * and allocates the socket buffers. It supports the chained and ring
1830 * modes.
1831 */
1832static int init_dma_desc_rings(struct net_device *dev,
1833			       struct stmmac_dma_conf *dma_conf,
1834			       gfp_t flags)
1835{
1836	struct stmmac_priv *priv = netdev_priv(dev);
1837	int ret;
1838
1839	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1840	if (ret)
1841		return ret;
1842
1843	ret = init_dma_tx_desc_rings(dev, dma_conf);
1844
1845	stmmac_clear_descriptors(priv, dma_conf);
1846
1847	if (netif_msg_hw(priv))
1848		stmmac_display_rings(priv, dma_conf);
1849
 
 
 
 
1850	return ret;
1851}
1852
1853/**
1854 * dma_free_tx_skbufs - free TX dma buffers
1855 * @priv: private structure
1856 * @dma_conf: structure to take the dma data
1857 * @queue: TX queue index
1858 */
1859static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1860			       struct stmmac_dma_conf *dma_conf,
1861			       u32 queue)
1862{
1863	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1864	int i;
1865
1866	tx_q->xsk_frames_done = 0;
1867
1868	for (i = 0; i < dma_conf->dma_tx_size; i++)
1869		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1870
1871	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1872		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1873		tx_q->xsk_frames_done = 0;
1874		tx_q->xsk_pool = NULL;
1875	}
1876}
1877
1878/**
1879 * stmmac_free_tx_skbufs - free TX skb buffers
1880 * @priv: private structure
1881 */
1882static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1883{
1884	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1885	u32 queue;
1886
1887	for (queue = 0; queue < tx_queue_cnt; queue++)
1888		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1889}
1890
1891/**
1892 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1893 * @priv: private structure
1894 * @dma_conf: structure to take the dma data
1895 * @queue: RX queue index
1896 */
1897static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1898					 struct stmmac_dma_conf *dma_conf,
1899					 u32 queue)
1900{
1901	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1902
1903	/* Release the DMA RX socket buffers */
1904	if (rx_q->xsk_pool)
1905		dma_free_rx_xskbufs(priv, dma_conf, queue);
1906	else
1907		dma_free_rx_skbufs(priv, dma_conf, queue);
1908
1909	rx_q->buf_alloc_num = 0;
1910	rx_q->xsk_pool = NULL;
1911
1912	/* Free DMA regions of consistent memory previously allocated */
1913	if (!priv->extend_desc)
1914		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1915				  sizeof(struct dma_desc),
1916				  rx_q->dma_rx, rx_q->dma_rx_phy);
1917	else
1918		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1919				  sizeof(struct dma_extended_desc),
1920				  rx_q->dma_erx, rx_q->dma_rx_phy);
1921
1922	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1923		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1924
1925	kfree(rx_q->buf_pool);
1926	if (rx_q->page_pool)
1927		page_pool_destroy(rx_q->page_pool);
1928}
1929
1930static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1931				       struct stmmac_dma_conf *dma_conf)
1932{
1933	u32 rx_count = priv->plat->rx_queues_to_use;
1934	u32 queue;
1935
1936	/* Free RX queue resources */
1937	for (queue = 0; queue < rx_count; queue++)
1938		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1939}
1940
1941/**
1942 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1943 * @priv: private structure
1944 * @dma_conf: structure to take the dma data
1945 * @queue: TX queue index
1946 */
1947static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1948					 struct stmmac_dma_conf *dma_conf,
1949					 u32 queue)
1950{
1951	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1952	size_t size;
1953	void *addr;
1954
1955	/* Release the DMA TX socket buffers */
1956	dma_free_tx_skbufs(priv, dma_conf, queue);
 
 
 
 
 
1957
1958	if (priv->extend_desc) {
1959		size = sizeof(struct dma_extended_desc);
1960		addr = tx_q->dma_etx;
1961	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1962		size = sizeof(struct dma_edesc);
1963		addr = tx_q->dma_entx;
1964	} else {
1965		size = sizeof(struct dma_desc);
1966		addr = tx_q->dma_tx;
1967	}
1968
1969	size *= dma_conf->dma_tx_size;
1970
1971	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1972
1973	kfree(tx_q->tx_skbuff_dma);
1974	kfree(tx_q->tx_skbuff);
1975}
1976
1977static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1978				       struct stmmac_dma_conf *dma_conf)
1979{
1980	u32 tx_count = priv->plat->tx_queues_to_use;
1981	u32 queue;
 
1982
1983	/* Free TX queue resources */
1984	for (queue = 0; queue < tx_count; queue++)
1985		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1986}
1987
1988/**
1989 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990 * @priv: private structure
1991 * @dma_conf: structure to take the dma data
1992 * @queue: RX queue index
1993 * Description: according to which descriptor can be used (extend or basic)
1994 * this function allocates the resources for TX and RX paths. In case of
1995 * reception, for example, it pre-allocated the RX socket buffer in order to
1996 * allow zero-copy mechanism.
1997 */
1998static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1999					 struct stmmac_dma_conf *dma_conf,
2000					 u32 queue)
2001{
2002	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2003	struct stmmac_channel *ch = &priv->channel[queue];
2004	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2005	struct page_pool_params pp_params = { 0 };
2006	unsigned int num_pages;
2007	unsigned int napi_id;
2008	int ret;
2009
2010	rx_q->queue_index = queue;
2011	rx_q->priv_data = priv;
2012
2013	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2014	pp_params.pool_size = dma_conf->dma_rx_size;
2015	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2016	pp_params.order = ilog2(num_pages);
2017	pp_params.nid = dev_to_node(priv->device);
2018	pp_params.dev = priv->device;
2019	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2020	pp_params.offset = stmmac_rx_offset(priv);
2021	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2022
2023	rx_q->page_pool = page_pool_create(&pp_params);
2024	if (IS_ERR(rx_q->page_pool)) {
2025		ret = PTR_ERR(rx_q->page_pool);
2026		rx_q->page_pool = NULL;
2027		return ret;
2028	}
2029
2030	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2031				 sizeof(*rx_q->buf_pool),
2032				 GFP_KERNEL);
2033	if (!rx_q->buf_pool)
2034		return -ENOMEM;
2035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2036	if (priv->extend_desc) {
2037		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2038						   dma_conf->dma_rx_size *
2039						   sizeof(struct dma_extended_desc),
2040						   &rx_q->dma_rx_phy,
2041						   GFP_KERNEL);
2042		if (!rx_q->dma_erx)
2043			return -ENOMEM;
2044
 
 
 
 
 
 
 
 
 
 
 
2045	} else {
2046		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2047						  dma_conf->dma_rx_size *
2048						  sizeof(struct dma_desc),
2049						  &rx_q->dma_rx_phy,
2050						  GFP_KERNEL);
2051		if (!rx_q->dma_rx)
2052			return -ENOMEM;
2053	}
2054
2055	if (stmmac_xdp_is_enabled(priv) &&
2056	    test_bit(queue, priv->af_xdp_zc_qps))
2057		napi_id = ch->rxtx_napi.napi_id;
2058	else
2059		napi_id = ch->rx_napi.napi_id;
2060
2061	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2062			       rx_q->queue_index,
2063			       napi_id);
2064	if (ret) {
2065		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2066		return -EINVAL;
2067	}
2068
2069	return 0;
2070}
2071
2072static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2073				       struct stmmac_dma_conf *dma_conf)
2074{
2075	u32 rx_count = priv->plat->rx_queues_to_use;
2076	u32 queue;
2077	int ret;
2078
2079	/* RX queues buffers and DMA */
2080	for (queue = 0; queue < rx_count; queue++) {
2081		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2082		if (ret)
2083			goto err_dma;
2084	}
2085
2086	return 0;
2087
2088err_dma:
2089	free_dma_rx_desc_resources(priv, dma_conf);
2090
2091	return ret;
2092}
2093
2094/**
2095 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2096 * @priv: private structure
2097 * @dma_conf: structure to take the dma data
2098 * @queue: TX queue index
2099 * Description: according to which descriptor can be used (extend or basic)
2100 * this function allocates the resources for TX and RX paths. In case of
2101 * reception, for example, it pre-allocated the RX socket buffer in order to
2102 * allow zero-copy mechanism.
2103 */
2104static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2105					 struct stmmac_dma_conf *dma_conf,
2106					 u32 queue)
2107{
2108	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2109	size_t size;
2110	void *addr;
2111
2112	tx_q->queue_index = queue;
2113	tx_q->priv_data = priv;
2114
2115	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2116				      sizeof(*tx_q->tx_skbuff_dma),
2117				      GFP_KERNEL);
2118	if (!tx_q->tx_skbuff_dma)
2119		return -ENOMEM;
2120
2121	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2122				  sizeof(struct sk_buff *),
2123				  GFP_KERNEL);
2124	if (!tx_q->tx_skbuff)
2125		return -ENOMEM;
2126
2127	if (priv->extend_desc)
2128		size = sizeof(struct dma_extended_desc);
2129	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2130		size = sizeof(struct dma_edesc);
2131	else
2132		size = sizeof(struct dma_desc);
2133
2134	size *= dma_conf->dma_tx_size;
2135
2136	addr = dma_alloc_coherent(priv->device, size,
2137				  &tx_q->dma_tx_phy, GFP_KERNEL);
2138	if (!addr)
2139		return -ENOMEM;
2140
2141	if (priv->extend_desc)
2142		tx_q->dma_etx = addr;
2143	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144		tx_q->dma_entx = addr;
2145	else
2146		tx_q->dma_tx = addr;
2147
2148	return 0;
2149}
2150
2151static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2152				       struct stmmac_dma_conf *dma_conf)
2153{
2154	u32 tx_count = priv->plat->tx_queues_to_use;
2155	u32 queue;
2156	int ret;
2157
2158	/* TX queues buffers and DMA */
2159	for (queue = 0; queue < tx_count; queue++) {
2160		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2161		if (ret)
2162			goto err_dma;
 
2163	}
2164
2165	return 0;
2166
2167err_dma:
2168	free_dma_tx_desc_resources(priv, dma_conf);
2169	return ret;
2170}
2171
2172/**
2173 * alloc_dma_desc_resources - alloc TX/RX resources.
2174 * @priv: private structure
2175 * @dma_conf: structure to take the dma data
2176 * Description: according to which descriptor can be used (extend or basic)
2177 * this function allocates the resources for TX and RX paths. In case of
2178 * reception, for example, it pre-allocated the RX socket buffer in order to
2179 * allow zero-copy mechanism.
2180 */
2181static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2182				    struct stmmac_dma_conf *dma_conf)
2183{
2184	/* RX Allocation */
2185	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2186
2187	if (ret)
2188		return ret;
2189
2190	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2191
2192	return ret;
2193}
2194
2195/**
2196 * free_dma_desc_resources - free dma desc resources
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 */
2200static void free_dma_desc_resources(struct stmmac_priv *priv,
2201				    struct stmmac_dma_conf *dma_conf)
2202{
2203	/* Release the DMA TX socket buffers */
2204	free_dma_tx_desc_resources(priv, dma_conf);
2205
2206	/* Release the DMA RX socket buffers later
2207	 * to ensure all pending XDP_TX buffers are returned.
2208	 */
2209	free_dma_rx_desc_resources(priv, dma_conf);
2210}
2211
2212/**
2213 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2214 *  @priv: driver private structure
2215 *  Description: It is used for enabling the rx queues in the MAC
2216 */
2217static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2218{
2219	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2220	int queue;
2221	u8 mode;
2222
2223	for (queue = 0; queue < rx_queues_count; queue++) {
2224		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2225		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2226	}
2227}
2228
2229/**
2230 * stmmac_start_rx_dma - start RX DMA channel
2231 * @priv: driver private structure
2232 * @chan: RX channel index
2233 * Description:
2234 * This starts a RX DMA channel
2235 */
2236static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2237{
2238	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2239	stmmac_start_rx(priv, priv->ioaddr, chan);
2240}
2241
2242/**
2243 * stmmac_start_tx_dma - start TX DMA channel
2244 * @priv: driver private structure
2245 * @chan: TX channel index
2246 * Description:
2247 * This starts a TX DMA channel
2248 */
2249static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2250{
2251	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2252	stmmac_start_tx(priv, priv->ioaddr, chan);
2253}
2254
2255/**
2256 * stmmac_stop_rx_dma - stop RX DMA channel
2257 * @priv: driver private structure
2258 * @chan: RX channel index
2259 * Description:
2260 * This stops a RX DMA channel
2261 */
2262static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2263{
2264	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2265	stmmac_stop_rx(priv, priv->ioaddr, chan);
2266}
2267
2268/**
2269 * stmmac_stop_tx_dma - stop TX DMA channel
2270 * @priv: driver private structure
2271 * @chan: TX channel index
2272 * Description:
2273 * This stops a TX DMA channel
2274 */
2275static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2276{
2277	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2278	stmmac_stop_tx(priv, priv->ioaddr, chan);
2279}
2280
2281static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2282{
2283	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2284	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2285	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2286	u32 chan;
2287
2288	for (chan = 0; chan < dma_csr_ch; chan++) {
2289		struct stmmac_channel *ch = &priv->channel[chan];
2290		unsigned long flags;
2291
2292		spin_lock_irqsave(&ch->lock, flags);
2293		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2294		spin_unlock_irqrestore(&ch->lock, flags);
2295	}
2296}
2297
2298/**
2299 * stmmac_start_all_dma - start all RX and TX DMA channels
2300 * @priv: driver private structure
2301 * Description:
2302 * This starts all the RX and TX DMA channels
2303 */
2304static void stmmac_start_all_dma(struct stmmac_priv *priv)
2305{
2306	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308	u32 chan = 0;
2309
2310	for (chan = 0; chan < rx_channels_count; chan++)
2311		stmmac_start_rx_dma(priv, chan);
2312
2313	for (chan = 0; chan < tx_channels_count; chan++)
2314		stmmac_start_tx_dma(priv, chan);
2315}
2316
2317/**
2318 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2319 * @priv: driver private structure
2320 * Description:
2321 * This stops the RX and TX DMA channels
2322 */
2323static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2324{
2325	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2326	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2327	u32 chan = 0;
2328
2329	for (chan = 0; chan < rx_channels_count; chan++)
2330		stmmac_stop_rx_dma(priv, chan);
2331
2332	for (chan = 0; chan < tx_channels_count; chan++)
2333		stmmac_stop_tx_dma(priv, chan);
2334}
2335
2336/**
2337 *  stmmac_dma_operation_mode - HW DMA operation mode
2338 *  @priv: driver private structure
2339 *  Description: it is used for configuring the DMA operation mode register in
2340 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2341 */
2342static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2343{
2344	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2345	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2346	int rxfifosz = priv->plat->rx_fifo_size;
2347	int txfifosz = priv->plat->tx_fifo_size;
2348	u32 txmode = 0;
2349	u32 rxmode = 0;
2350	u32 chan = 0;
2351	u8 qmode = 0;
2352
2353	if (rxfifosz == 0)
2354		rxfifosz = priv->dma_cap.rx_fifo_size;
2355	if (txfifosz == 0)
2356		txfifosz = priv->dma_cap.tx_fifo_size;
2357
2358	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2359	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2360		rxfifosz /= rx_channels_count;
2361		txfifosz /= tx_channels_count;
2362	}
2363
2364	if (priv->plat->force_thresh_dma_mode) {
2365		txmode = tc;
2366		rxmode = tc;
2367	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2368		/*
2369		 * In case of GMAC, SF mode can be enabled
2370		 * to perform the TX COE in HW. This depends on:
2371		 * 1) TX COE if actually supported
2372		 * 2) There is no bugged Jumbo frame support
2373		 *    that needs to not insert csum in the TDES.
2374		 */
2375		txmode = SF_DMA_MODE;
2376		rxmode = SF_DMA_MODE;
2377		priv->xstats.threshold = SF_DMA_MODE;
2378	} else {
2379		txmode = tc;
2380		rxmode = SF_DMA_MODE;
2381	}
2382
2383	/* configure all channels */
2384	for (chan = 0; chan < rx_channels_count; chan++) {
2385		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2386		u32 buf_size;
2387
2388		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2389
2390		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2391				rxfifosz, qmode);
2392
2393		if (rx_q->xsk_pool) {
2394			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2395			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2396					      buf_size,
2397					      chan);
2398		} else {
2399			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2400					      priv->dma_conf.dma_buf_sz,
2401					      chan);
2402		}
2403	}
2404
2405	for (chan = 0; chan < tx_channels_count; chan++) {
2406		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2407
2408		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2409				txfifosz, qmode);
2410	}
2411}
2412
2413static void stmmac_xsk_request_timestamp(void *_priv)
2414{
2415	struct stmmac_metadata_request *meta_req = _priv;
2416
2417	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2418	*meta_req->set_ic = true;
2419}
2420
2421static u64 stmmac_xsk_fill_timestamp(void *_priv)
2422{
2423	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2424	struct stmmac_priv *priv = tx_compl->priv;
2425	struct dma_desc *desc = tx_compl->desc;
2426	bool found = false;
2427	u64 ns = 0;
2428
2429	if (!priv->hwts_tx_en)
2430		return 0;
2431
2432	/* check tx tstamp status */
2433	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2434		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2435		found = true;
2436	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2437		found = true;
2438	}
2439
2440	if (found) {
2441		ns -= priv->plat->cdc_error_adj;
2442		return ns_to_ktime(ns);
2443	}
2444
2445	return 0;
2446}
2447
2448static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2449	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2450	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2451};
2452
2453static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2454{
2455	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2456	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2457	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2458	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2459	unsigned int entry = tx_q->cur_tx;
2460	struct dma_desc *tx_desc = NULL;
2461	struct xdp_desc xdp_desc;
2462	bool work_done = true;
2463	u32 tx_set_ic_bit = 0;
2464
2465	/* Avoids TX time-out as we are sharing with slow path */
2466	txq_trans_cond_update(nq);
2467
2468	budget = min(budget, stmmac_tx_avail(priv, queue));
2469
2470	while (budget-- > 0) {
2471		struct stmmac_metadata_request meta_req;
2472		struct xsk_tx_metadata *meta = NULL;
2473		dma_addr_t dma_addr;
2474		bool set_ic;
2475
2476		/* We are sharing with slow path and stop XSK TX desc submission when
2477		 * available TX ring is less than threshold.
2478		 */
2479		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2480		    !netif_carrier_ok(priv->dev)) {
2481			work_done = false;
2482			break;
2483		}
2484
2485		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2486			break;
2487
2488		if (priv->est && priv->est->enable &&
2489		    priv->est->max_sdu[queue] &&
2490		    xdp_desc.len > priv->est->max_sdu[queue]) {
2491			priv->xstats.max_sdu_txq_drop[queue]++;
2492			continue;
2493		}
2494
2495		if (likely(priv->extend_desc))
2496			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2497		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2498			tx_desc = &tx_q->dma_entx[entry].basic;
2499		else
2500			tx_desc = tx_q->dma_tx + entry;
2501
2502		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2503		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2504		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2505
2506		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2507
2508		/* To return XDP buffer to XSK pool, we simple call
2509		 * xsk_tx_completed(), so we don't need to fill up
2510		 * 'buf' and 'xdpf'.
2511		 */
2512		tx_q->tx_skbuff_dma[entry].buf = 0;
2513		tx_q->xdpf[entry] = NULL;
2514
2515		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2516		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2517		tx_q->tx_skbuff_dma[entry].last_segment = true;
2518		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2519
2520		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2521
2522		tx_q->tx_count_frames++;
2523
2524		if (!priv->tx_coal_frames[queue])
2525			set_ic = false;
2526		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2527			set_ic = true;
2528		else
2529			set_ic = false;
2530
2531		meta_req.priv = priv;
2532		meta_req.tx_desc = tx_desc;
2533		meta_req.set_ic = &set_ic;
2534		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2535					&meta_req);
2536		if (set_ic) {
2537			tx_q->tx_count_frames = 0;
2538			stmmac_set_tx_ic(priv, tx_desc);
2539			tx_set_ic_bit++;
2540		}
2541
2542		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2543				       true, priv->mode, true, true,
2544				       xdp_desc.len);
2545
2546		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2547
2548		xsk_tx_metadata_to_compl(meta,
2549					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2550
2551		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2552		entry = tx_q->cur_tx;
2553	}
2554	u64_stats_update_begin(&txq_stats->napi_syncp);
2555	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2556	u64_stats_update_end(&txq_stats->napi_syncp);
2557
2558	if (tx_desc) {
2559		stmmac_flush_tx_descriptors(priv, queue);
2560		xsk_tx_release(pool);
2561	}
2562
2563	/* Return true if all of the 3 conditions are met
2564	 *  a) TX Budget is still available
2565	 *  b) work_done = true when XSK TX desc peek is empty (no more
2566	 *     pending XSK TX for transmission)
2567	 */
2568	return !!budget && work_done;
2569}
2570
2571static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
 
 
 
 
 
2572{
2573	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2574		tc += 64;
2575
2576		if (priv->plat->force_thresh_dma_mode)
2577			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2578		else
2579			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2580						      chan);
2581
2582		priv->xstats.threshold = tc;
2583	}
2584}
2585
2586/**
2587 * stmmac_tx_clean - to manage the transmission completion
2588 * @priv: driver private structure
2589 * @budget: napi budget limiting this functions packet handling
2590 * @queue: TX queue index
2591 * @pending_packets: signal to arm the TX coal timer
2592 * Description: it reclaims the transmit resources after transmission completes.
2593 * If some packets still needs to be handled, due to TX coalesce, set
2594 * pending_packets to true to make NAPI arm the TX coal timer.
2595 */
2596static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2597			   bool *pending_packets)
2598{
2599	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2600	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2601	unsigned int bytes_compl = 0, pkts_compl = 0;
2602	unsigned int entry, xmits = 0, count = 0;
2603	u32 tx_packets = 0, tx_errors = 0;
2604
2605	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2606
2607	tx_q->xsk_frames_done = 0;
2608
2609	entry = tx_q->dirty_tx;
2610
2611	/* Try to clean all TX complete frame in 1 shot */
2612	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2613		struct xdp_frame *xdpf;
2614		struct sk_buff *skb;
2615		struct dma_desc *p;
2616		int status;
2617
2618		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2619		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2620			xdpf = tx_q->xdpf[entry];
2621			skb = NULL;
2622		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2623			xdpf = NULL;
2624			skb = tx_q->tx_skbuff[entry];
2625		} else {
2626			xdpf = NULL;
2627			skb = NULL;
2628		}
2629
2630		if (priv->extend_desc)
2631			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2632		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2633			p = &tx_q->dma_entx[entry].basic;
2634		else
2635			p = tx_q->dma_tx + entry;
2636
2637		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2638		/* Check if the descriptor is owned by the DMA */
2639		if (unlikely(status & tx_dma_own))
2640			break;
2641
2642		count++;
2643
2644		/* Make sure descriptor fields are read after reading
2645		 * the own bit.
2646		 */
2647		dma_rmb();
 
 
 
 
 
 
 
 
 
 
 
 
2648
2649		/* Just consider the last segment and ...*/
2650		if (likely(!(status & tx_not_ls))) {
2651			/* ... verify the status error condition */
2652			if (unlikely(status & tx_err)) {
2653				tx_errors++;
2654				if (unlikely(status & tx_err_bump_tc))
2655					stmmac_bump_dma_threshold(priv, queue);
2656			} else {
2657				tx_packets++;
2658			}
2659			if (skb) {
2660				stmmac_get_tx_hwtstamp(priv, p, skb);
2661			} else if (tx_q->xsk_pool &&
2662				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2663				struct stmmac_xsk_tx_complete tx_compl = {
2664					.priv = priv,
2665					.desc = p,
2666				};
2667
2668				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2669							 &stmmac_xsk_tx_metadata_ops,
2670							 &tx_compl);
2671			}
2672		}
2673
2674		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2675			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2676			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2677				dma_unmap_page(priv->device,
2678					       tx_q->tx_skbuff_dma[entry].buf,
2679					       tx_q->tx_skbuff_dma[entry].len,
2680					       DMA_TO_DEVICE);
2681			else
2682				dma_unmap_single(priv->device,
2683						 tx_q->tx_skbuff_dma[entry].buf,
2684						 tx_q->tx_skbuff_dma[entry].len,
2685						 DMA_TO_DEVICE);
2686			tx_q->tx_skbuff_dma[entry].buf = 0;
2687			tx_q->tx_skbuff_dma[entry].len = 0;
2688			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2689		}
2690
2691		stmmac_clean_desc3(priv, tx_q, p);
2692
2693		tx_q->tx_skbuff_dma[entry].last_segment = false;
2694		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2695
2696		if (xdpf &&
2697		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2698			xdp_return_frame_rx_napi(xdpf);
2699			tx_q->xdpf[entry] = NULL;
2700		}
2701
2702		if (xdpf &&
2703		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2704			xdp_return_frame(xdpf);
2705			tx_q->xdpf[entry] = NULL;
2706		}
 
2707
2708		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2709			tx_q->xsk_frames_done++;
2710
2711		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2712			if (likely(skb)) {
2713				pkts_compl++;
2714				bytes_compl += skb->len;
2715				dev_consume_skb_any(skb);
2716				tx_q->tx_skbuff[entry] = NULL;
2717			}
2718		}
2719
2720		stmmac_release_tx_desc(priv, p, priv->mode);
2721
2722		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2723	}
2724	tx_q->dirty_tx = entry;
2725
2726	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2727				  pkts_compl, bytes_compl);
2728
2729	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2730								queue))) &&
2731	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2732
2733		netif_dbg(priv, tx_done, priv->dev,
2734			  "%s: restart transmit\n", __func__);
2735		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2736	}
2737
2738	if (tx_q->xsk_pool) {
2739		bool work_done;
2740
2741		if (tx_q->xsk_frames_done)
2742			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2743
2744		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2745			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2746
2747		/* For XSK TX, we try to send as many as possible.
2748		 * If XSK work done (XSK TX desc empty and budget still
2749		 * available), return "budget - 1" to reenable TX IRQ.
2750		 * Else, return "budget" to make NAPI continue polling.
2751		 */
2752		work_done = stmmac_xdp_xmit_zc(priv, queue,
2753					       STMMAC_XSK_TX_BUDGET_MAX);
2754		if (work_done)
2755			xmits = budget - 1;
2756		else
2757			xmits = budget;
2758	}
2759
2760	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2761	    priv->eee_sw_timer_en) {
2762		if (stmmac_enable_eee_mode(priv))
2763			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2764	}
2765
2766	/* We still have pending packets, let's call for a new scheduling */
2767	if (tx_q->dirty_tx != tx_q->cur_tx)
2768		*pending_packets = true;
2769
2770	u64_stats_update_begin(&txq_stats->napi_syncp);
2771	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2772	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2773	u64_stats_inc(&txq_stats->napi.tx_clean);
2774	u64_stats_update_end(&txq_stats->napi_syncp);
2775
2776	priv->xstats.tx_errors += tx_errors;
2777
2778	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2779
2780	/* Combine decisions from TX clean and XSK TX */
2781	return max(count, xmits);
2782}
2783
2784/**
2785 * stmmac_tx_err - to manage the tx error
2786 * @priv: driver private structure
2787 * @chan: channel index
2788 * Description: it cleans the descriptors and restarts the transmission
2789 * in case of transmission errors.
2790 */
2791static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2792{
2793	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2794
2795	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2796
2797	stmmac_stop_tx_dma(priv, chan);
2798	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2799	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2800	stmmac_reset_tx_queue(priv, chan);
2801	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2802			    tx_q->dma_tx_phy, chan);
2803	stmmac_start_tx_dma(priv, chan);
2804
2805	priv->xstats.tx_errors++;
2806	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2807}
2808
2809/**
2810 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2811 *  @priv: driver private structure
2812 *  @txmode: TX operating mode
2813 *  @rxmode: RX operating mode
2814 *  @chan: channel index
2815 *  Description: it is used for configuring of the DMA operation mode in
2816 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2817 *  mode.
2818 */
2819static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2820					  u32 rxmode, u32 chan)
2821{
2822	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2823	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2824	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2825	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2826	int rxfifosz = priv->plat->rx_fifo_size;
2827	int txfifosz = priv->plat->tx_fifo_size;
2828
2829	if (rxfifosz == 0)
2830		rxfifosz = priv->dma_cap.rx_fifo_size;
2831	if (txfifosz == 0)
2832		txfifosz = priv->dma_cap.tx_fifo_size;
2833
2834	/* Adjust for real per queue fifo size */
2835	rxfifosz /= rx_channels_count;
2836	txfifosz /= tx_channels_count;
2837
2838	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2839	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2840}
2841
2842static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2843{
2844	int ret;
2845
2846	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2847			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2848	if (ret && (ret != -EINVAL)) {
2849		stmmac_global_err(priv);
2850		return true;
2851	}
2852
2853	return false;
2854}
2855
2856static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
 
 
 
 
 
 
2857{
2858	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2859						 &priv->xstats, chan, dir);
2860	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2861	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2862	struct stmmac_channel *ch = &priv->channel[chan];
2863	struct napi_struct *rx_napi;
2864	struct napi_struct *tx_napi;
2865	unsigned long flags;
2866
2867	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2868	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2869
2870	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2871		if (napi_schedule_prep(rx_napi)) {
2872			spin_lock_irqsave(&ch->lock, flags);
2873			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2874			spin_unlock_irqrestore(&ch->lock, flags);
2875			__napi_schedule(rx_napi);
2876		}
2877	}
2878
2879	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2880		if (napi_schedule_prep(tx_napi)) {
2881			spin_lock_irqsave(&ch->lock, flags);
2882			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2883			spin_unlock_irqrestore(&ch->lock, flags);
2884			__napi_schedule(tx_napi);
2885		}
2886	}
 
 
 
 
 
 
2887
2888	return status;
 
2889}
2890
2891/**
2892 * stmmac_dma_interrupt - DMA ISR
2893 * @priv: driver private structure
2894 * Description: this is the DMA ISR. It is called by the main ISR.
2895 * It calls the dwmac dma routine and schedule poll method in case of some
2896 * work can be done.
 
2897 */
2898static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2899{
2900	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2901	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2902	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2903				tx_channel_count : rx_channel_count;
2904	u32 chan;
2905	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2906
2907	/* Make sure we never check beyond our status buffer. */
2908	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2909		channels_to_check = ARRAY_SIZE(status);
2910
2911	for (chan = 0; chan < channels_to_check; chan++)
2912		status[chan] = stmmac_napi_check(priv, chan,
2913						 DMA_DIR_RXTX);
2914
2915	for (chan = 0; chan < tx_channel_count; chan++) {
2916		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2917			/* Try to bump up the dma threshold on this failure */
2918			stmmac_bump_dma_threshold(priv, chan);
2919		} else if (unlikely(status[chan] == tx_hard_error)) {
2920			stmmac_tx_err(priv, chan);
2921		}
2922	}
 
 
 
 
 
 
 
 
 
2923}
2924
2925/**
2926 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2927 * @priv: driver private structure
2928 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2929 */
2930static void stmmac_mmc_setup(struct stmmac_priv *priv)
2931{
2932	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2933			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2934
2935	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2936
2937	if (priv->dma_cap.rmon) {
2938		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2939		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2940	} else
2941		netdev_info(priv->dev, "No MAC Management Counters available\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2942}
2943
2944/**
2945 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2946 * @priv: driver private structure
2947 * Description:
2948 *  new GMAC chip generations have a new register to indicate the
2949 *  presence of the optional feature/functions.
2950 *  This can be also used to override the value passed through the
2951 *  platform and necessary for old MAC10/100 and GMAC chips.
2952 */
2953static int stmmac_get_hw_features(struct stmmac_priv *priv)
2954{
2955	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2956}
2957
2958/**
2959 * stmmac_check_ether_addr - check if the MAC addr is valid
2960 * @priv: driver private structure
2961 * Description:
2962 * it is to verify if the MAC address is valid, in case of failures it
2963 * generates a random MAC address
2964 */
2965static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2966{
2967	u8 addr[ETH_ALEN];
2968
2969	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2970		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2971		if (is_valid_ether_addr(addr))
2972			eth_hw_addr_set(priv->dev, addr);
2973		else
2974			eth_hw_addr_random(priv->dev);
2975		dev_info(priv->device, "device MAC address %pM\n",
2976			 priv->dev->dev_addr);
2977	}
2978}
2979
2980/**
2981 * stmmac_init_dma_engine - DMA init.
2982 * @priv: driver private structure
2983 * Description:
2984 * It inits the DMA invoking the specific MAC/GMAC callback.
2985 * Some DMA parameters can be passed from the platform;
2986 * in case of these are not passed a default is kept for the MAC or GMAC.
2987 */
2988static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2989{
2990	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2991	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2992	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2993	struct stmmac_rx_queue *rx_q;
2994	struct stmmac_tx_queue *tx_q;
2995	u32 chan = 0;
2996	int ret = 0;
2997
2998	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2999		dev_err(priv->device, "Invalid DMA configuration\n");
3000		return -EINVAL;
3001	}
3002
3003	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3004		priv->plat->dma_cfg->atds = 1;
3005
3006	ret = stmmac_reset(priv, priv->ioaddr);
3007	if (ret) {
3008		dev_err(priv->device, "Failed to reset the dma\n");
3009		return ret;
3010	}
3011
3012	/* DMA Configuration */
3013	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3014
3015	if (priv->plat->axi)
3016		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3017
3018	/* DMA CSR Channel configuration */
3019	for (chan = 0; chan < dma_csr_ch; chan++) {
3020		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3021		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3022	}
3023
3024	/* DMA RX Channel Configuration */
3025	for (chan = 0; chan < rx_channels_count; chan++) {
3026		rx_q = &priv->dma_conf.rx_queue[chan];
3027
3028		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3029				    rx_q->dma_rx_phy, chan);
3030
3031		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3032				     (rx_q->buf_alloc_num *
3033				      sizeof(struct dma_desc));
3034		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3035				       rx_q->rx_tail_addr, chan);
3036	}
3037
3038	/* DMA TX Channel Configuration */
3039	for (chan = 0; chan < tx_channels_count; chan++) {
3040		tx_q = &priv->dma_conf.tx_queue[chan];
3041
3042		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043				    tx_q->dma_tx_phy, chan);
3044
3045		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3046		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3047				       tx_q->tx_tail_addr, chan);
3048	}
3049
3050	return ret;
3051}
3052
3053static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3054{
3055	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3056	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3057	struct stmmac_channel *ch;
3058	struct napi_struct *napi;
3059
3060	if (!tx_coal_timer)
3061		return;
3062
3063	ch = &priv->channel[tx_q->queue_index];
3064	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3065
3066	/* Arm timer only if napi is not already scheduled.
3067	 * Try to cancel any timer if napi is scheduled, timer will be armed
3068	 * again in the next scheduled napi.
3069	 */
3070	if (unlikely(!napi_is_scheduled(napi)))
3071		hrtimer_start(&tx_q->txtimer,
3072			      STMMAC_COAL_TIMER(tx_coal_timer),
3073			      HRTIMER_MODE_REL);
3074	else
3075		hrtimer_try_to_cancel(&tx_q->txtimer);
3076}
3077
3078/**
3079 * stmmac_tx_timer - mitigation sw timer for tx.
3080 * @t: data pointer
3081 * Description:
3082 * This is the timer handler to directly invoke the stmmac_tx_clean.
3083 */
3084static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3085{
3086	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3087	struct stmmac_priv *priv = tx_q->priv_data;
3088	struct stmmac_channel *ch;
3089	struct napi_struct *napi;
3090
3091	ch = &priv->channel[tx_q->queue_index];
3092	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093
3094	if (likely(napi_schedule_prep(napi))) {
3095		unsigned long flags;
3096
3097		spin_lock_irqsave(&ch->lock, flags);
3098		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3099		spin_unlock_irqrestore(&ch->lock, flags);
3100		__napi_schedule(napi);
3101	}
3102
3103	return HRTIMER_NORESTART;
3104}
3105
3106/**
3107 * stmmac_init_coalesce - init mitigation options.
3108 * @priv: driver private structure
3109 * Description:
3110 * This inits the coalesce parameters: i.e. timer rate,
3111 * timer handler and default threshold used for enabling the
3112 * interrupt on completion bit.
3113 */
3114static void stmmac_init_coalesce(struct stmmac_priv *priv)
3115{
3116	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3117	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3118	u32 chan;
3119
3120	for (chan = 0; chan < tx_channel_count; chan++) {
3121		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3122
3123		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3124		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3125
3126		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3127		tx_q->txtimer.function = stmmac_tx_timer;
3128	}
3129
3130	for (chan = 0; chan < rx_channel_count; chan++)
3131		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3132}
3133
3134static void stmmac_set_rings_length(struct stmmac_priv *priv)
3135{
3136	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3137	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3138	u32 chan;
3139
3140	/* set TX ring length */
3141	for (chan = 0; chan < tx_channels_count; chan++)
3142		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3143				       (priv->dma_conf.dma_tx_size - 1), chan);
3144
3145	/* set RX ring length */
3146	for (chan = 0; chan < rx_channels_count; chan++)
3147		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3148				       (priv->dma_conf.dma_rx_size - 1), chan);
3149}
3150
3151/**
3152 *  stmmac_set_tx_queue_weight - Set TX queue weight
3153 *  @priv: driver private structure
3154 *  Description: It is used for setting TX queues weight
3155 */
3156static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3157{
3158	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3159	u32 weight;
3160	u32 queue;
3161
3162	for (queue = 0; queue < tx_queues_count; queue++) {
3163		weight = priv->plat->tx_queues_cfg[queue].weight;
3164		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3165	}
3166}
3167
3168/**
3169 *  stmmac_configure_cbs - Configure CBS in TX queue
3170 *  @priv: driver private structure
3171 *  Description: It is used for configuring CBS in AVB TX queues
3172 */
3173static void stmmac_configure_cbs(struct stmmac_priv *priv)
3174{
3175	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176	u32 mode_to_use;
3177	u32 queue;
3178
3179	/* queue 0 is reserved for legacy traffic */
3180	for (queue = 1; queue < tx_queues_count; queue++) {
3181		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3182		if (mode_to_use == MTL_QUEUE_DCB)
3183			continue;
3184
3185		stmmac_config_cbs(priv, priv->hw,
3186				priv->plat->tx_queues_cfg[queue].send_slope,
3187				priv->plat->tx_queues_cfg[queue].idle_slope,
3188				priv->plat->tx_queues_cfg[queue].high_credit,
3189				priv->plat->tx_queues_cfg[queue].low_credit,
3190				queue);
3191	}
3192}
3193
3194/**
3195 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3196 *  @priv: driver private structure
3197 *  Description: It is used for mapping RX queues to RX dma channels
3198 */
3199static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3200{
3201	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3202	u32 queue;
3203	u32 chan;
3204
3205	for (queue = 0; queue < rx_queues_count; queue++) {
3206		chan = priv->plat->rx_queues_cfg[queue].chan;
3207		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3208	}
3209}
3210
3211/**
3212 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3213 *  @priv: driver private structure
3214 *  Description: It is used for configuring the RX Queue Priority
3215 */
3216static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3217{
3218	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3219	u32 queue;
3220	u32 prio;
3221
3222	for (queue = 0; queue < rx_queues_count; queue++) {
3223		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3224			continue;
3225
3226		prio = priv->plat->rx_queues_cfg[queue].prio;
3227		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3228	}
3229}
3230
3231/**
3232 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3233 *  @priv: driver private structure
3234 *  Description: It is used for configuring the TX Queue Priority
3235 */
3236static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3237{
3238	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3239	u32 queue;
3240	u32 prio;
3241
3242	for (queue = 0; queue < tx_queues_count; queue++) {
3243		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3244			continue;
3245
3246		prio = priv->plat->tx_queues_cfg[queue].prio;
3247		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3248	}
3249}
3250
3251/**
3252 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3253 *  @priv: driver private structure
3254 *  Description: It is used for configuring the RX queue routing
3255 */
3256static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3257{
3258	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3259	u32 queue;
3260	u8 packet;
3261
3262	for (queue = 0; queue < rx_queues_count; queue++) {
3263		/* no specific packet type routing specified for the queue */
3264		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3265			continue;
3266
3267		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3268		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3269	}
3270}
3271
3272static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3273{
3274	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3275		priv->rss.enable = false;
3276		return;
3277	}
3278
3279	if (priv->dev->features & NETIF_F_RXHASH)
3280		priv->rss.enable = true;
3281	else
3282		priv->rss.enable = false;
3283
3284	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3285			     priv->plat->rx_queues_to_use);
3286}
3287
3288/**
3289 *  stmmac_mtl_configuration - Configure MTL
3290 *  @priv: driver private structure
3291 *  Description: It is used for configurring MTL
3292 */
3293static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3294{
3295	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3296	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3297
3298	if (tx_queues_count > 1)
3299		stmmac_set_tx_queue_weight(priv);
3300
3301	/* Configure MTL RX algorithms */
3302	if (rx_queues_count > 1)
3303		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3304				priv->plat->rx_sched_algorithm);
3305
3306	/* Configure MTL TX algorithms */
3307	if (tx_queues_count > 1)
3308		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3309				priv->plat->tx_sched_algorithm);
3310
3311	/* Configure CBS in AVB TX queues */
3312	if (tx_queues_count > 1)
3313		stmmac_configure_cbs(priv);
3314
3315	/* Map RX MTL to DMA channels */
3316	stmmac_rx_queue_dma_chan_map(priv);
3317
3318	/* Enable MAC RX Queues */
3319	stmmac_mac_enable_rx_queues(priv);
3320
3321	/* Set RX priorities */
3322	if (rx_queues_count > 1)
3323		stmmac_mac_config_rx_queues_prio(priv);
3324
3325	/* Set TX priorities */
3326	if (tx_queues_count > 1)
3327		stmmac_mac_config_tx_queues_prio(priv);
3328
3329	/* Set RX routing */
3330	if (rx_queues_count > 1)
3331		stmmac_mac_config_rx_queues_routing(priv);
3332
3333	/* Receive Side Scaling */
3334	if (rx_queues_count > 1)
3335		stmmac_mac_config_rss(priv);
3336}
3337
3338static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3339{
3340	if (priv->dma_cap.asp) {
3341		netdev_info(priv->dev, "Enabling Safety Features\n");
3342		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3343					  priv->plat->safety_feat_cfg);
3344	} else {
3345		netdev_info(priv->dev, "No Safety Features support found\n");
3346	}
3347}
3348
3349/**
3350 * stmmac_hw_setup - setup mac in a usable state.
3351 *  @dev : pointer to the device structure.
3352 *  @ptp_register: register PTP if set
3353 *  Description:
3354 *  this is the main function to setup the HW in a usable state because the
3355 *  dma engine is reset, the core registers are configured (e.g. AXI,
3356 *  Checksum features, timers). The DMA is ready to start receiving and
3357 *  transmitting.
3358 *  Return value:
3359 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3360 *  file on failure.
3361 */
3362static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3363{
3364	struct stmmac_priv *priv = netdev_priv(dev);
3365	u32 rx_cnt = priv->plat->rx_queues_to_use;
3366	u32 tx_cnt = priv->plat->tx_queues_to_use;
3367	bool sph_en;
3368	u32 chan;
3369	int ret;
3370
3371	/* Make sure RX clock is enabled */
3372	if (priv->hw->phylink_pcs)
3373		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3374
 
3375	/* DMA initialization and SW reset */
3376	ret = stmmac_init_dma_engine(priv);
3377	if (ret < 0) {
3378		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3379			   __func__);
3380		return ret;
3381	}
3382
3383	/* Copy the MAC addr into the HW  */
3384	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3385
3386	/* PS and related bits will be programmed according to the speed */
3387	if (priv->hw->pcs) {
3388		int speed = priv->plat->mac_port_sel_speed;
3389
3390		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3391		    (speed == SPEED_1000)) {
3392			priv->hw->ps = speed;
3393		} else {
3394			dev_warn(priv->device, "invalid port speed\n");
3395			priv->hw->ps = 0;
3396		}
3397	}
3398
3399	/* Initialize the MAC Core */
3400	stmmac_core_init(priv, priv->hw, dev);
3401
3402	/* Initialize MTL*/
3403	stmmac_mtl_configuration(priv);
3404
3405	/* Initialize Safety Features */
3406	stmmac_safety_feat_configuration(priv);
3407
3408	ret = stmmac_rx_ipc(priv, priv->hw);
3409	if (!ret) {
3410		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3411		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3412		priv->hw->rx_csum = 0;
3413	}
3414
3415	/* Enable the MAC Rx/Tx */
3416	stmmac_mac_set(priv, priv->ioaddr, true);
3417
3418	/* Set the HW DMA mode and the COE */
3419	stmmac_dma_operation_mode(priv);
3420
3421	stmmac_mmc_setup(priv);
3422
3423	if (ptp_register) {
3424		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3425		if (ret < 0)
3426			netdev_warn(priv->dev,
3427				    "failed to enable PTP reference clock: %pe\n",
3428				    ERR_PTR(ret));
3429	}
3430
3431	ret = stmmac_init_ptp(priv);
3432	if (ret == -EOPNOTSUPP)
3433		netdev_info(priv->dev, "PTP not supported by HW\n");
3434	else if (ret)
3435		netdev_warn(priv->dev, "PTP init failed\n");
3436	else if (ptp_register)
3437		stmmac_ptp_register(priv);
3438
3439	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3440
3441	/* Convert the timer from msec to usec */
3442	if (!priv->tx_lpi_timer)
3443		priv->tx_lpi_timer = eee_timer * 1000;
3444
3445	if (priv->use_riwt) {
3446		u32 queue;
3447
3448		for (queue = 0; queue < rx_cnt; queue++) {
3449			if (!priv->rx_riwt[queue])
3450				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3451
3452			stmmac_rx_watchdog(priv, priv->ioaddr,
3453					   priv->rx_riwt[queue], queue);
3454		}
3455	}
3456
3457	if (priv->hw->pcs)
3458		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3459
3460	/* set TX and RX rings length */
3461	stmmac_set_rings_length(priv);
3462
3463	/* Enable TSO */
3464	if (priv->tso) {
3465		for (chan = 0; chan < tx_cnt; chan++) {
3466			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3467
3468			/* TSO and TBS cannot co-exist */
3469			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3470				continue;
3471
3472			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3473		}
3474	}
 
3475
3476	/* Enable Split Header */
3477	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3478	for (chan = 0; chan < rx_cnt; chan++)
3479		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3480
3481
3482	/* VLAN Tag Insertion */
3483	if (priv->dma_cap.vlins)
3484		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3485
3486	/* TBS */
3487	for (chan = 0; chan < tx_cnt; chan++) {
3488		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3489		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3490
3491		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
 
 
3492	}
3493
3494	/* Configure real RX and TX queues */
3495	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3496	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3497
3498	/* Start the ball rolling... */
3499	stmmac_start_all_dma(priv);
3500
3501	stmmac_set_hw_vlan_mode(priv, priv->hw);
3502
3503	return 0;
3504}
3505
3506static void stmmac_hw_teardown(struct net_device *dev)
3507{
3508	struct stmmac_priv *priv = netdev_priv(dev);
3509
3510	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3511}
3512
3513static void stmmac_free_irq(struct net_device *dev,
3514			    enum request_irq_err irq_err, int irq_idx)
3515{
3516	struct stmmac_priv *priv = netdev_priv(dev);
3517	int j;
3518
3519	switch (irq_err) {
3520	case REQ_IRQ_ERR_ALL:
3521		irq_idx = priv->plat->tx_queues_to_use;
3522		fallthrough;
3523	case REQ_IRQ_ERR_TX:
3524		for (j = irq_idx - 1; j >= 0; j--) {
3525			if (priv->tx_irq[j] > 0) {
3526				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3527				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3528			}
3529		}
3530		irq_idx = priv->plat->rx_queues_to_use;
3531		fallthrough;
3532	case REQ_IRQ_ERR_RX:
3533		for (j = irq_idx - 1; j >= 0; j--) {
3534			if (priv->rx_irq[j] > 0) {
3535				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3536				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3537			}
3538		}
3539
3540		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3541			free_irq(priv->sfty_ue_irq, dev);
3542		fallthrough;
3543	case REQ_IRQ_ERR_SFTY_UE:
3544		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3545			free_irq(priv->sfty_ce_irq, dev);
3546		fallthrough;
3547	case REQ_IRQ_ERR_SFTY_CE:
3548		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3549			free_irq(priv->lpi_irq, dev);
3550		fallthrough;
3551	case REQ_IRQ_ERR_LPI:
3552		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3553			free_irq(priv->wol_irq, dev);
3554		fallthrough;
3555	case REQ_IRQ_ERR_SFTY:
3556		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3557			free_irq(priv->sfty_irq, dev);
3558		fallthrough;
3559	case REQ_IRQ_ERR_WOL:
3560		free_irq(dev->irq, dev);
3561		fallthrough;
3562	case REQ_IRQ_ERR_MAC:
3563	case REQ_IRQ_ERR_NO:
3564		/* If MAC IRQ request error, no more IRQ to free */
3565		break;
3566	}
3567}
3568
3569static int stmmac_request_irq_multi_msi(struct net_device *dev)
3570{
3571	struct stmmac_priv *priv = netdev_priv(dev);
3572	enum request_irq_err irq_err;
3573	cpumask_t cpu_mask;
3574	int irq_idx = 0;
3575	char *int_name;
3576	int ret;
3577	int i;
3578
3579	/* For common interrupt */
3580	int_name = priv->int_name_mac;
3581	sprintf(int_name, "%s:%s", dev->name, "mac");
3582	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3583			  0, int_name, dev);
3584	if (unlikely(ret < 0)) {
3585		netdev_err(priv->dev,
3586			   "%s: alloc mac MSI %d (error: %d)\n",
3587			   __func__, dev->irq, ret);
3588		irq_err = REQ_IRQ_ERR_MAC;
3589		goto irq_error;
3590	}
3591
3592	/* Request the Wake IRQ in case of another line
3593	 * is used for WoL
3594	 */
3595	priv->wol_irq_disabled = true;
3596	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3597		int_name = priv->int_name_wol;
3598		sprintf(int_name, "%s:%s", dev->name, "wol");
3599		ret = request_irq(priv->wol_irq,
3600				  stmmac_mac_interrupt,
3601				  0, int_name, dev);
3602		if (unlikely(ret < 0)) {
3603			netdev_err(priv->dev,
3604				   "%s: alloc wol MSI %d (error: %d)\n",
3605				   __func__, priv->wol_irq, ret);
3606			irq_err = REQ_IRQ_ERR_WOL;
3607			goto irq_error;
3608		}
3609	}
3610
3611	/* Request the LPI IRQ in case of another line
3612	 * is used for LPI
3613	 */
3614	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3615		int_name = priv->int_name_lpi;
3616		sprintf(int_name, "%s:%s", dev->name, "lpi");
3617		ret = request_irq(priv->lpi_irq,
3618				  stmmac_mac_interrupt,
3619				  0, int_name, dev);
3620		if (unlikely(ret < 0)) {
3621			netdev_err(priv->dev,
3622				   "%s: alloc lpi MSI %d (error: %d)\n",
3623				   __func__, priv->lpi_irq, ret);
3624			irq_err = REQ_IRQ_ERR_LPI;
3625			goto irq_error;
3626		}
3627	}
3628
3629	/* Request the common Safety Feature Correctible/Uncorrectible
3630	 * Error line in case of another line is used
3631	 */
3632	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3633		int_name = priv->int_name_sfty;
3634		sprintf(int_name, "%s:%s", dev->name, "safety");
3635		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3636				  0, int_name, dev);
3637		if (unlikely(ret < 0)) {
3638			netdev_err(priv->dev,
3639				   "%s: alloc sfty MSI %d (error: %d)\n",
3640				   __func__, priv->sfty_irq, ret);
3641			irq_err = REQ_IRQ_ERR_SFTY;
3642			goto irq_error;
3643		}
3644	}
3645
3646	/* Request the Safety Feature Correctible Error line in
3647	 * case of another line is used
3648	 */
3649	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3650		int_name = priv->int_name_sfty_ce;
3651		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3652		ret = request_irq(priv->sfty_ce_irq,
3653				  stmmac_safety_interrupt,
3654				  0, int_name, dev);
3655		if (unlikely(ret < 0)) {
3656			netdev_err(priv->dev,
3657				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3658				   __func__, priv->sfty_ce_irq, ret);
3659			irq_err = REQ_IRQ_ERR_SFTY_CE;
3660			goto irq_error;
3661		}
3662	}
3663
3664	/* Request the Safety Feature Uncorrectible Error line in
3665	 * case of another line is used
3666	 */
3667	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3668		int_name = priv->int_name_sfty_ue;
3669		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3670		ret = request_irq(priv->sfty_ue_irq,
3671				  stmmac_safety_interrupt,
3672				  0, int_name, dev);
3673		if (unlikely(ret < 0)) {
3674			netdev_err(priv->dev,
3675				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3676				   __func__, priv->sfty_ue_irq, ret);
3677			irq_err = REQ_IRQ_ERR_SFTY_UE;
3678			goto irq_error;
3679		}
3680	}
3681
3682	/* Request Rx MSI irq */
3683	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3684		if (i >= MTL_MAX_RX_QUEUES)
3685			break;
3686		if (priv->rx_irq[i] == 0)
3687			continue;
3688
3689		int_name = priv->int_name_rx_irq[i];
3690		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3691		ret = request_irq(priv->rx_irq[i],
3692				  stmmac_msi_intr_rx,
3693				  0, int_name, &priv->dma_conf.rx_queue[i]);
3694		if (unlikely(ret < 0)) {
3695			netdev_err(priv->dev,
3696				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3697				   __func__, i, priv->rx_irq[i], ret);
3698			irq_err = REQ_IRQ_ERR_RX;
3699			irq_idx = i;
3700			goto irq_error;
3701		}
3702		cpumask_clear(&cpu_mask);
3703		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3704		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3705	}
3706
3707	/* Request Tx MSI irq */
3708	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3709		if (i >= MTL_MAX_TX_QUEUES)
3710			break;
3711		if (priv->tx_irq[i] == 0)
3712			continue;
3713
3714		int_name = priv->int_name_tx_irq[i];
3715		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3716		ret = request_irq(priv->tx_irq[i],
3717				  stmmac_msi_intr_tx,
3718				  0, int_name, &priv->dma_conf.tx_queue[i]);
3719		if (unlikely(ret < 0)) {
3720			netdev_err(priv->dev,
3721				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3722				   __func__, i, priv->tx_irq[i], ret);
3723			irq_err = REQ_IRQ_ERR_TX;
3724			irq_idx = i;
3725			goto irq_error;
3726		}
3727		cpumask_clear(&cpu_mask);
3728		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3730	}
3731
3732	return 0;
3733
3734irq_error:
3735	stmmac_free_irq(dev, irq_err, irq_idx);
3736	return ret;
3737}
3738
3739static int stmmac_request_irq_single(struct net_device *dev)
3740{
3741	struct stmmac_priv *priv = netdev_priv(dev);
3742	enum request_irq_err irq_err;
3743	int ret;
3744
 
3745	ret = request_irq(dev->irq, stmmac_interrupt,
3746			  IRQF_SHARED, dev->name, dev);
3747	if (unlikely(ret < 0)) {
3748		netdev_err(priv->dev,
3749			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3750			   __func__, dev->irq, ret);
3751		irq_err = REQ_IRQ_ERR_MAC;
3752		goto irq_error;
3753	}
3754
3755	/* Request the Wake IRQ in case of another line
3756	 * is used for WoL
3757	 */
3758	priv->wol_irq_disabled = true;
3759	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3760		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3761				  IRQF_SHARED, dev->name, dev);
3762		if (unlikely(ret < 0)) {
3763			netdev_err(priv->dev,
3764				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3765				   __func__, priv->wol_irq, ret);
3766			irq_err = REQ_IRQ_ERR_WOL;
3767			goto irq_error;
3768		}
3769	}
3770
3771	/* Request the IRQ lines */
3772	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3773		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3774				  IRQF_SHARED, dev->name, dev);
3775		if (unlikely(ret < 0)) {
3776			netdev_err(priv->dev,
3777				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3778				   __func__, priv->lpi_irq, ret);
3779			irq_err = REQ_IRQ_ERR_LPI;
3780			goto irq_error;
3781		}
3782	}
3783
3784	/* Request the common Safety Feature Correctible/Uncorrectible
3785	 * Error line in case of another line is used
3786	 */
3787	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3788		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3789				  IRQF_SHARED, dev->name, dev);
3790		if (unlikely(ret < 0)) {
3791			netdev_err(priv->dev,
3792				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3793				   __func__, priv->sfty_irq, ret);
3794			irq_err = REQ_IRQ_ERR_SFTY;
3795			goto irq_error;
3796		}
3797	}
3798
3799	return 0;
3800
3801irq_error:
3802	stmmac_free_irq(dev, irq_err, 0);
3803	return ret;
3804}
3805
3806static int stmmac_request_irq(struct net_device *dev)
3807{
3808	struct stmmac_priv *priv = netdev_priv(dev);
3809	int ret;
3810
3811	/* Request the IRQ lines */
3812	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3813		ret = stmmac_request_irq_multi_msi(dev);
3814	else
3815		ret = stmmac_request_irq_single(dev);
3816
3817	return ret;
3818}
3819
3820/**
3821 *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3822 *  @priv: driver private structure
3823 *  @mtu: MTU to setup the dma queue and buf with
3824 *  Description: Allocate and generate a dma_conf based on the provided MTU.
3825 *  Allocate the Tx/Rx DMA queue and init them.
3826 *  Return value:
3827 *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3828 */
3829static struct stmmac_dma_conf *
3830stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3831{
3832	struct stmmac_dma_conf *dma_conf;
3833	int chan, bfsize, ret;
3834
3835	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3836	if (!dma_conf) {
3837		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3838			   __func__);
3839		return ERR_PTR(-ENOMEM);
3840	}
3841
3842	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3843	if (bfsize < 0)
3844		bfsize = 0;
3845
3846	if (bfsize < BUF_SIZE_16KiB)
3847		bfsize = stmmac_set_bfsize(mtu, 0);
3848
3849	dma_conf->dma_buf_sz = bfsize;
3850	/* Chose the tx/rx size from the already defined one in the
3851	 * priv struct. (if defined)
3852	 */
3853	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3854	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3855
3856	if (!dma_conf->dma_tx_size)
3857		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3858	if (!dma_conf->dma_rx_size)
3859		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3860
3861	/* Earlier check for TBS */
3862	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3863		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3864		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3865
3866		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3867		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3868	}
3869
3870	ret = alloc_dma_desc_resources(priv, dma_conf);
3871	if (ret < 0) {
3872		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3873			   __func__);
3874		goto alloc_error;
3875	}
3876
3877	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3878	if (ret < 0) {
3879		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3880			   __func__);
3881		goto init_error;
3882	}
3883
3884	return dma_conf;
3885
3886init_error:
3887	free_dma_desc_resources(priv, dma_conf);
3888alloc_error:
3889	kfree(dma_conf);
3890	return ERR_PTR(ret);
3891}
3892
3893/**
3894 *  __stmmac_open - open entry point of the driver
3895 *  @dev : pointer to the device structure.
3896 *  @dma_conf :  structure to take the dma data
3897 *  Description:
3898 *  This function is the open entry point of the driver.
3899 *  Return value:
3900 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3901 *  file on failure.
3902 */
3903static int __stmmac_open(struct net_device *dev,
3904			 struct stmmac_dma_conf *dma_conf)
3905{
3906	struct stmmac_priv *priv = netdev_priv(dev);
3907	int mode = priv->plat->phy_interface;
3908	u32 chan;
3909	int ret;
3910
3911	ret = pm_runtime_resume_and_get(priv->device);
3912	if (ret < 0)
3913		return ret;
3914
3915	if ((!priv->hw->xpcs ||
3916	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3917		ret = stmmac_init_phy(dev);
3918		if (ret) {
3919			netdev_err(priv->dev,
3920				   "%s: Cannot attach to PHY (error: %d)\n",
3921				   __func__, ret);
3922			goto init_phy_error;
3923		}
3924	}
3925
3926	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3927
3928	buf_sz = dma_conf->dma_buf_sz;
3929	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3930		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3931			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3932	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3933
3934	stmmac_reset_queues_param(priv);
3935
3936	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3937	    priv->plat->serdes_powerup) {
3938		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3939		if (ret < 0) {
3940			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3941				   __func__);
3942			goto init_error;
3943		}
3944	}
3945
3946	ret = stmmac_hw_setup(dev, true);
3947	if (ret < 0) {
3948		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3949		goto init_error;
3950	}
3951
3952	stmmac_init_coalesce(priv);
3953
3954	phylink_start(priv->phylink);
3955	/* We may have called phylink_speed_down before */
3956	phylink_speed_up(priv->phylink);
3957
3958	ret = stmmac_request_irq(dev);
3959	if (ret)
3960		goto irq_error;
3961
3962	stmmac_enable_all_queues(priv);
3963	netif_tx_start_all_queues(priv->dev);
3964	stmmac_enable_all_dma_irq(priv);
3965
3966	return 0;
3967
3968irq_error:
3969	phylink_stop(priv->phylink);
3970
3971	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3972		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3973
3974	stmmac_hw_teardown(dev);
3975init_error:
3976	phylink_disconnect_phy(priv->phylink);
3977init_phy_error:
3978	pm_runtime_put(priv->device);
3979	return ret;
3980}
3981
3982static int stmmac_open(struct net_device *dev)
3983{
3984	struct stmmac_priv *priv = netdev_priv(dev);
3985	struct stmmac_dma_conf *dma_conf;
3986	int ret;
3987
3988	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3989	if (IS_ERR(dma_conf))
3990		return PTR_ERR(dma_conf);
3991
3992	ret = __stmmac_open(dev, dma_conf);
3993	if (ret)
3994		free_dma_desc_resources(priv, dma_conf);
3995
3996	kfree(dma_conf);
3997	return ret;
3998}
3999
4000/**
4001 *  stmmac_release - close entry point of the driver
4002 *  @dev : device pointer.
4003 *  Description:
4004 *  This is the stop entry point of the driver.
4005 */
4006static int stmmac_release(struct net_device *dev)
4007{
4008	struct stmmac_priv *priv = netdev_priv(dev);
4009	u32 chan;
4010
4011	if (device_may_wakeup(priv->device))
4012		phylink_speed_down(priv->phylink, false);
 
4013	/* Stop and disconnect the PHY */
4014	phylink_stop(priv->phylink);
4015	phylink_disconnect_phy(priv->phylink);
 
 
 
4016
4017	stmmac_disable_all_queues(priv);
4018
4019	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4020		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4021
4022	netif_tx_disable(dev);
4023
4024	/* Free the IRQ lines */
4025	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4026
4027	if (priv->eee_enabled) {
4028		priv->tx_path_in_lpi_mode = false;
4029		del_timer_sync(&priv->eee_ctrl_timer);
4030	}
4031
4032	/* Stop TX/RX DMA and clear the descriptors */
4033	stmmac_stop_all_dma(priv);
 
4034
4035	/* Release and free the Rx/Tx resources */
4036	free_dma_desc_resources(priv, &priv->dma_conf);
4037
4038	/* Disable the MAC Rx/Tx */
4039	stmmac_mac_set(priv, priv->ioaddr, false);
4040
4041	/* Powerdown Serdes if there is */
4042	if (priv->plat->serdes_powerdown)
4043		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4044
4045	stmmac_release_ptp(priv);
4046
4047	if (stmmac_fpe_supported(priv))
4048		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
 
4049
4050	pm_runtime_put(priv->device);
4051
4052	return 0;
4053}
4054
4055static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4056			       struct stmmac_tx_queue *tx_q)
4057{
4058	u16 tag = 0x0, inner_tag = 0x0;
4059	u32 inner_type = 0x0;
4060	struct dma_desc *p;
4061
4062	if (!priv->dma_cap.vlins)
4063		return false;
4064	if (!skb_vlan_tag_present(skb))
4065		return false;
4066	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4067		inner_tag = skb_vlan_tag_get(skb);
4068		inner_type = STMMAC_VLAN_INSERT;
4069	}
4070
4071	tag = skb_vlan_tag_get(skb);
4072
4073	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4074		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4075	else
4076		p = &tx_q->dma_tx[tx_q->cur_tx];
4077
4078	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4079		return false;
4080
4081	stmmac_set_tx_owner(priv, p);
4082	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4083	return true;
4084}
4085
4086/**
4087 *  stmmac_tso_allocator - close entry point of the driver
4088 *  @priv: driver private structure
4089 *  @des: buffer start address
4090 *  @total_len: total length to fill in descriptors
4091 *  @last_segment: condition for the last descriptor
4092 *  @queue: TX queue index
4093 *  Description:
4094 *  This function fills descriptor and request new descriptors according to
4095 *  buffer length to fill
4096 */
4097static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4098				 int total_len, bool last_segment, u32 queue)
4099{
4100	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4101	struct dma_desc *desc;
4102	u32 buff_size;
4103	int tmp_len;
4104
4105	tmp_len = total_len;
4106
4107	while (tmp_len > 0) {
4108		dma_addr_t curr_addr;
4109
4110		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4111						priv->dma_conf.dma_tx_size);
4112		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4113
4114		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4115			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4116		else
4117			desc = &tx_q->dma_tx[tx_q->cur_tx];
4118
4119		curr_addr = des + (total_len - tmp_len);
4120		if (priv->dma_cap.addr64 <= 32)
4121			desc->des0 = cpu_to_le32(curr_addr);
4122		else
4123			stmmac_set_desc_addr(priv, desc, curr_addr);
4124
4125		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4126			    TSO_MAX_BUFF_SIZE : tmp_len;
4127
4128		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4129				0, 1,
4130				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4131				0, 0);
4132
4133		tmp_len -= TSO_MAX_BUFF_SIZE;
4134	}
4135}
4136
4137static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4138{
4139	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4140	int desc_size;
4141
4142	if (likely(priv->extend_desc))
4143		desc_size = sizeof(struct dma_extended_desc);
4144	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4145		desc_size = sizeof(struct dma_edesc);
4146	else
4147		desc_size = sizeof(struct dma_desc);
4148
4149	/* The own bit must be the latest setting done when prepare the
4150	 * descriptor and then barrier is needed to make sure that
4151	 * all is coherent before granting the DMA engine.
4152	 */
4153	wmb();
4154
4155	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4156	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4157}
4158
4159/**
4160 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4161 *  @skb : the socket buffer
4162 *  @dev : device pointer
4163 *  Description: this is the transmit function that is called on TSO frames
4164 *  (support available on GMAC4 and newer chips).
4165 *  Diagram below show the ring programming in case of TSO frames:
4166 *
4167 *  First Descriptor
4168 *   --------
4169 *   | DES0 |---> buffer1 = L2/L3/L4 header
4170 *   | DES1 |---> TCP Payload (can continue on next descr...)
4171 *   | DES2 |---> buffer 1 and 2 len
4172 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4173 *   --------
4174 *	|
4175 *     ...
4176 *	|
4177 *   --------
4178 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4179 *   | DES1 | --|
4180 *   | DES2 | --> buffer 1 and 2 len
4181 *   | DES3 |
4182 *   --------
4183 *
4184 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4185 */
4186static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4187{
4188	struct dma_desc *desc, *first, *mss_desc = NULL;
4189	struct stmmac_priv *priv = netdev_priv(dev);
4190	int tmp_pay_len = 0, first_tx, nfrags;
4191	unsigned int first_entry, tx_packets;
4192	struct stmmac_txq_stats *txq_stats;
4193	struct stmmac_tx_queue *tx_q;
4194	u32 pay_len, mss, queue;
4195	dma_addr_t tso_des, des;
4196	u8 proto_hdr_len, hdr;
4197	bool set_ic;
4198	int i;
4199
4200	/* Always insert VLAN tag to SKB payload for TSO frames.
4201	 *
4202	 * Never insert VLAN tag by HW, since segments splited by
4203	 * TSO engine will be un-tagged by mistake.
4204	 */
4205	if (skb_vlan_tag_present(skb)) {
4206		skb = __vlan_hwaccel_push_inside(skb);
4207		if (unlikely(!skb)) {
4208			priv->xstats.tx_dropped++;
4209			return NETDEV_TX_OK;
4210		}
4211	}
4212
4213	nfrags = skb_shinfo(skb)->nr_frags;
4214	queue = skb_get_queue_mapping(skb);
4215
4216	tx_q = &priv->dma_conf.tx_queue[queue];
4217	txq_stats = &priv->xstats.txq_stats[queue];
4218	first_tx = tx_q->cur_tx;
4219
4220	/* Compute header lengths */
4221	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4222		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4223		hdr = sizeof(struct udphdr);
4224	} else {
4225		proto_hdr_len = skb_tcp_all_headers(skb);
4226		hdr = tcp_hdrlen(skb);
4227	}
4228
4229	/* Desc availability based on threshold should be enough safe */
4230	if (unlikely(stmmac_tx_avail(priv, queue) <
4231		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4232		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4233			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4234								queue));
4235			/* This is a hard error, log it. */
4236			netdev_err(priv->dev,
4237				   "%s: Tx Ring full when queue awake\n",
4238				   __func__);
4239		}
4240		return NETDEV_TX_BUSY;
4241	}
4242
4243	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4244
4245	mss = skb_shinfo(skb)->gso_size;
4246
4247	/* set new MSS value if needed */
4248	if (mss != tx_q->mss) {
4249		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4250			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4251		else
4252			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4253
4254		stmmac_set_mss(priv, mss_desc, mss);
4255		tx_q->mss = mss;
4256		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4257						priv->dma_conf.dma_tx_size);
4258		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4259	}
4260
4261	if (netif_msg_tx_queued(priv)) {
4262		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4263			__func__, hdr, proto_hdr_len, pay_len, mss);
4264		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4265			skb->data_len);
4266	}
4267
4268	first_entry = tx_q->cur_tx;
4269	WARN_ON(tx_q->tx_skbuff[first_entry]);
4270
4271	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272		desc = &tx_q->dma_entx[first_entry].basic;
4273	else
4274		desc = &tx_q->dma_tx[first_entry];
4275	first = desc;
4276
4277	/* first descriptor: fill Headers on Buf1 */
4278	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4279			     DMA_TO_DEVICE);
4280	if (dma_mapping_error(priv->device, des))
4281		goto dma_map_err;
4282
4283	if (priv->dma_cap.addr64 <= 32) {
4284		first->des0 = cpu_to_le32(des);
4285
4286		/* Fill start of payload in buff2 of first descriptor */
4287		if (pay_len)
4288			first->des1 = cpu_to_le32(des + proto_hdr_len);
4289
4290		/* If needed take extra descriptors to fill the remaining payload */
4291		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4292		tso_des = des;
4293	} else {
4294		stmmac_set_desc_addr(priv, first, des);
4295		tmp_pay_len = pay_len;
4296		tso_des = des + proto_hdr_len;
4297		pay_len = 0;
4298	}
4299
4300	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4301
4302	/* In case two or more DMA transmit descriptors are allocated for this
4303	 * non-paged SKB data, the DMA buffer address should be saved to
4304	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4305	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4306	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4307	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4308	 * sooner or later.
4309	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4310	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4311	 * this DMA buffer right after the DMA engine completely finishes the
4312	 * full buffer transmission.
4313	 */
4314	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4315	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4316	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4317	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4318
4319	/* Prepare fragments */
4320	for (i = 0; i < nfrags; i++) {
4321		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4322
4323		des = skb_frag_dma_map(priv->device, frag, 0,
4324				       skb_frag_size(frag),
4325				       DMA_TO_DEVICE);
4326		if (dma_mapping_error(priv->device, des))
4327			goto dma_map_err;
4328
4329		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4330				     (i == nfrags - 1), queue);
4331
4332		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4333		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4334		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4335		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4336	}
4337
4338	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4339
4340	/* Only the last descriptor gets to point to the skb. */
4341	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4342	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4343
4344	/* Manage tx mitigation */
4345	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4346	tx_q->tx_count_frames += tx_packets;
4347
4348	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4349		set_ic = true;
4350	else if (!priv->tx_coal_frames[queue])
4351		set_ic = false;
4352	else if (tx_packets > priv->tx_coal_frames[queue])
4353		set_ic = true;
4354	else if ((tx_q->tx_count_frames %
4355		  priv->tx_coal_frames[queue]) < tx_packets)
4356		set_ic = true;
4357	else
4358		set_ic = false;
4359
4360	if (set_ic) {
4361		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4362			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4363		else
4364			desc = &tx_q->dma_tx[tx_q->cur_tx];
4365
4366		tx_q->tx_count_frames = 0;
4367		stmmac_set_tx_ic(priv, desc);
4368	}
4369
4370	/* We've used all descriptors we need for this skb, however,
4371	 * advance cur_tx so that it references a fresh descriptor.
4372	 * ndo_start_xmit will fill this descriptor the next time it's
4373	 * called and stmmac_tx_clean may clean up to this descriptor.
4374	 */
4375	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4376
4377	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4378		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4379			  __func__);
4380		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4381	}
4382
4383	u64_stats_update_begin(&txq_stats->q_syncp);
4384	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4385	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4386	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4387	if (set_ic)
4388		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4389	u64_stats_update_end(&txq_stats->q_syncp);
4390
4391	if (priv->sarc_type)
4392		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4393
4394	skb_tx_timestamp(skb);
4395
4396	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4397		     priv->hwts_tx_en)) {
4398		/* declare that device is doing timestamping */
4399		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4400		stmmac_enable_tx_timestamp(priv, first);
4401	}
4402
4403	/* Complete the first descriptor before granting the DMA */
4404	stmmac_prepare_tso_tx_desc(priv, first, 1,
4405			proto_hdr_len,
4406			pay_len,
4407			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4408			hdr / 4, (skb->len - proto_hdr_len));
4409
4410	/* If context desc is used to change MSS */
4411	if (mss_desc) {
4412		/* Make sure that first descriptor has been completely
4413		 * written, including its own bit. This is because MSS is
4414		 * actually before first descriptor, so we need to make
4415		 * sure that MSS's own bit is the last thing written.
4416		 */
4417		dma_wmb();
4418		stmmac_set_tx_owner(priv, mss_desc);
4419	}
4420
4421	if (netif_msg_pktdata(priv)) {
4422		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4423			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4424			tx_q->cur_tx, first, nfrags);
4425		pr_info(">>> frame to be transmitted: ");
4426		print_pkt(skb->data, skb_headlen(skb));
4427	}
4428
4429	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4430
4431	stmmac_flush_tx_descriptors(priv, queue);
4432	stmmac_tx_timer_arm(priv, queue);
4433
4434	return NETDEV_TX_OK;
4435
4436dma_map_err:
4437	dev_err(priv->device, "Tx dma map failed\n");
4438	dev_kfree_skb(skb);
4439	priv->xstats.tx_dropped++;
4440	return NETDEV_TX_OK;
4441}
4442
4443/**
4444 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4445 * @skb: socket buffer to check
4446 *
4447 * Check if a packet has an ethertype that will trigger the IP header checks
4448 * and IP/TCP checksum engine of the stmmac core.
4449 *
4450 * Return: true if the ethertype can trigger the checksum engine, false
4451 * otherwise
4452 */
4453static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4454{
4455	int depth = 0;
4456	__be16 proto;
4457
4458	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4459				    &depth);
4460
4461	return (depth <= ETH_HLEN) &&
4462		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4463}
4464
4465/**
4466 *  stmmac_xmit - Tx entry point of the driver
4467 *  @skb : the socket buffer
4468 *  @dev : device pointer
4469 *  Description : this is the tx entry point of the driver.
4470 *  It programs the chain or the ring and supports oversized frames
4471 *  and SG feature.
4472 */
4473static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4474{
4475	unsigned int first_entry, tx_packets, enh_desc;
4476	struct stmmac_priv *priv = netdev_priv(dev);
4477	unsigned int nopaged_len = skb_headlen(skb);
 
4478	int i, csum_insertion = 0, is_jumbo = 0;
4479	u32 queue = skb_get_queue_mapping(skb);
4480	int nfrags = skb_shinfo(skb)->nr_frags;
4481	int gso = skb_shinfo(skb)->gso_type;
4482	struct stmmac_txq_stats *txq_stats;
4483	struct dma_edesc *tbs_desc = NULL;
4484	struct dma_desc *desc, *first;
4485	struct stmmac_tx_queue *tx_q;
4486	bool has_vlan, set_ic;
4487	int entry, first_tx;
4488	dma_addr_t des;
4489
4490	tx_q = &priv->dma_conf.tx_queue[queue];
4491	txq_stats = &priv->xstats.txq_stats[queue];
4492	first_tx = tx_q->cur_tx;
4493
4494	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4495		stmmac_disable_eee_mode(priv);
4496
4497	/* Manage oversized TCP frames for GMAC4 device */
4498	if (skb_is_gso(skb) && priv->tso) {
4499		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4500			return stmmac_tso_xmit(skb, dev);
4501		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4502			return stmmac_tso_xmit(skb, dev);
4503	}
4504
4505	if (priv->est && priv->est->enable &&
4506	    priv->est->max_sdu[queue] &&
4507	    skb->len > priv->est->max_sdu[queue]){
4508		priv->xstats.max_sdu_txq_drop[queue]++;
4509		goto max_sdu_err;
4510	}
4511
4512	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4513		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4514			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4515								queue));
4516			/* This is a hard error, log it. */
4517			netdev_err(priv->dev,
4518				   "%s: Tx Ring full when queue awake\n",
4519				   __func__);
4520		}
4521		return NETDEV_TX_BUSY;
4522	}
4523
4524	/* Check if VLAN can be inserted by HW */
4525	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4526
4527	entry = tx_q->cur_tx;
4528	first_entry = entry;
4529	WARN_ON(tx_q->tx_skbuff[first_entry]);
 
4530
4531	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4532	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4533	 * queues. In that case, checksum offloading for those queues that don't
4534	 * support tx coe needs to fallback to software checksum calculation.
4535	 *
4536	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4537	 * also have to be checksummed in software.
4538	 */
4539	if (csum_insertion &&
4540	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4541	     !stmmac_has_ip_ethertype(skb))) {
4542		if (unlikely(skb_checksum_help(skb)))
4543			goto dma_map_err;
4544		csum_insertion = !csum_insertion;
4545	}
4546
4547	if (likely(priv->extend_desc))
4548		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4549	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4550		desc = &tx_q->dma_entx[entry].basic;
4551	else
4552		desc = tx_q->dma_tx + entry;
4553
4554	first = desc;
4555
4556	if (has_vlan)
4557		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4558
4559	enh_desc = priv->plat->enh_desc;
4560	/* To program the descriptors according to the size of the frame */
4561	if (enh_desc)
4562		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4563
4564	if (unlikely(is_jumbo)) {
4565		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4566		if (unlikely(entry < 0) && (entry != -EINVAL))
4567			goto dma_map_err;
 
 
 
 
 
4568	}
4569
4570	for (i = 0; i < nfrags; i++) {
4571		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4572		int len = skb_frag_size(frag);
4573		bool last_segment = (i == (nfrags - 1));
4574
4575		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4576		WARN_ON(tx_q->tx_skbuff[entry]);
4577
4578		if (likely(priv->extend_desc))
4579			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4580		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4581			desc = &tx_q->dma_entx[entry].basic;
4582		else
4583			desc = tx_q->dma_tx + entry;
4584
4585		des = skb_frag_dma_map(priv->device, frag, 0, len,
4586				       DMA_TO_DEVICE);
4587		if (dma_mapping_error(priv->device, des))
4588			goto dma_map_err; /* should reuse desc w/o issues */
4589
4590		tx_q->tx_skbuff_dma[entry].buf = des;
4591
4592		stmmac_set_desc_addr(priv, desc, des);
4593
4594		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4595		tx_q->tx_skbuff_dma[entry].len = len;
4596		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4597		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4598
4599		/* Prepare the descriptor and set the own bit too */
4600		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4601				priv->mode, 1, last_segment, skb->len);
 
 
 
 
 
4602	}
4603
4604	/* Only the last descriptor gets to point to the skb. */
4605	tx_q->tx_skbuff[entry] = skb;
4606	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4607
 
 
 
 
4608	/* According to the coalesce parameter the IC bit for the latest
4609	 * segment is reset and the timer re-started to clean the tx status.
4610	 * This approach takes care about the fragments: desc is the first
4611	 * element in case of no SG.
4612	 */
4613	tx_packets = (entry + 1) - first_tx;
4614	tx_q->tx_count_frames += tx_packets;
4615
4616	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4617		set_ic = true;
4618	else if (!priv->tx_coal_frames[queue])
4619		set_ic = false;
4620	else if (tx_packets > priv->tx_coal_frames[queue])
4621		set_ic = true;
4622	else if ((tx_q->tx_count_frames %
4623		  priv->tx_coal_frames[queue]) < tx_packets)
4624		set_ic = true;
4625	else
4626		set_ic = false;
4627
4628	if (set_ic) {
4629		if (likely(priv->extend_desc))
4630			desc = &tx_q->dma_etx[entry].basic;
4631		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4632			desc = &tx_q->dma_entx[entry].basic;
4633		else
4634			desc = &tx_q->dma_tx[entry];
4635
4636		tx_q->tx_count_frames = 0;
4637		stmmac_set_tx_ic(priv, desc);
4638	}
4639
4640	/* We've used all descriptors we need for this skb, however,
4641	 * advance cur_tx so that it references a fresh descriptor.
4642	 * ndo_start_xmit will fill this descriptor the next time it's
4643	 * called and stmmac_tx_clean may clean up to this descriptor.
4644	 */
4645	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4646	tx_q->cur_tx = entry;
4647
4648	if (netif_msg_pktdata(priv)) {
4649		netdev_dbg(priv->dev,
4650			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4651			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4652			   entry, first, nfrags);
 
 
 
 
4653
4654		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4655		print_pkt(skb->data, skb->len);
4656	}
4657
4658	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4659		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4660			  __func__);
4661		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4662	}
4663
4664	u64_stats_update_begin(&txq_stats->q_syncp);
4665	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4666	if (set_ic)
4667		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4668	u64_stats_update_end(&txq_stats->q_syncp);
4669
4670	if (priv->sarc_type)
4671		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4672
4673	skb_tx_timestamp(skb);
4674
4675	/* Ready to fill the first descriptor and set the OWN bit w/o any
4676	 * problems because all the descriptors are actually ready to be
4677	 * passed to the DMA engine.
4678	 */
4679	if (likely(!is_jumbo)) {
4680		bool last_segment = (nfrags == 0);
4681
4682		des = dma_map_single(priv->device, skb->data,
4683				     nopaged_len, DMA_TO_DEVICE);
4684		if (dma_mapping_error(priv->device, des))
4685			goto dma_map_err;
4686
4687		tx_q->tx_skbuff_dma[first_entry].buf = des;
4688		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4689		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4690
4691		stmmac_set_desc_addr(priv, first, des);
4692
4693		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4694		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4695
4696		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4697			     priv->hwts_tx_en)) {
4698			/* declare that device is doing timestamping */
4699			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4700			stmmac_enable_tx_timestamp(priv, first);
4701		}
4702
4703		/* Prepare the first descriptor setting the OWN bit too */
4704		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4705				csum_insertion, priv->mode, 0, last_segment,
4706				skb->len);
4707	}
4708
4709	if (tx_q->tbs & STMMAC_TBS_EN) {
4710		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4711
4712		tbs_desc = &tx_q->dma_entx[first_entry];
4713		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
 
 
 
4714	}
4715
4716	stmmac_set_tx_owner(priv, first);
4717
4718	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4719
4720	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4721
4722	stmmac_flush_tx_descriptors(priv, queue);
4723	stmmac_tx_timer_arm(priv, queue);
4724
4725	return NETDEV_TX_OK;
4726
4727dma_map_err:
4728	netdev_err(priv->dev, "Tx DMA map failed\n");
4729max_sdu_err:
4730	dev_kfree_skb(skb);
4731	priv->xstats.tx_dropped++;
4732	return NETDEV_TX_OK;
4733}
4734
4735static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4736{
4737	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4738	__be16 vlan_proto = veth->h_vlan_proto;
4739	u16 vlanid;
4740
4741	if ((vlan_proto == htons(ETH_P_8021Q) &&
4742	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4743	    (vlan_proto == htons(ETH_P_8021AD) &&
4744	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4745		/* pop the vlan tag */
4746		vlanid = ntohs(veth->h_vlan_TCI);
4747		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4748		skb_pull(skb, VLAN_HLEN);
4749		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4750	}
4751}
4752
 
4753/**
4754 * stmmac_rx_refill - refill used skb preallocated buffers
4755 * @priv: driver private structure
4756 * @queue: RX queue index
4757 * Description : this is to reallocate the skb for the reception process
4758 * that is based on zero-copy.
4759 */
4760static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4761{
4762	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4763	int dirty = stmmac_rx_dirty(priv, queue);
4764	unsigned int entry = rx_q->dirty_rx;
4765	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4766
4767	if (priv->dma_cap.host_dma_width <= 32)
4768		gfp |= GFP_DMA32;
4769
4770	while (dirty-- > 0) {
4771		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4772		struct dma_desc *p;
4773		bool use_rx_wd;
4774
4775		if (priv->extend_desc)
4776			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4777		else
4778			p = rx_q->dma_rx + entry;
4779
4780		if (!buf->page) {
4781			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4782			if (!buf->page)
4783				break;
4784		}
4785
4786		if (priv->sph && !buf->sec_page) {
4787			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4788			if (!buf->sec_page)
4789				break;
4790
4791			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4792		}
4793
4794		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4795
4796		stmmac_set_desc_addr(priv, p, buf->addr);
4797		if (priv->sph)
4798			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4799		else
4800			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4801		stmmac_refill_desc3(priv, rx_q, p);
4802
4803		rx_q->rx_count_frames++;
4804		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4805		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4806			rx_q->rx_count_frames = 0;
4807
4808		use_rx_wd = !priv->rx_coal_frames[queue];
4809		use_rx_wd |= rx_q->rx_count_frames > 0;
4810		if (!priv->use_riwt)
4811			use_rx_wd = false;
4812
4813		dma_wmb();
4814		stmmac_set_rx_owner(priv, p, use_rx_wd);
4815
4816		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4817	}
4818	rx_q->dirty_rx = entry;
4819	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4820			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4821	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4822}
4823
4824static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4825				       struct dma_desc *p,
4826				       int status, unsigned int len)
4827{
4828	unsigned int plen = 0, hlen = 0;
4829	int coe = priv->hw->rx_csum;
4830
4831	/* Not first descriptor, buffer is always zero */
4832	if (priv->sph && len)
4833		return 0;
4834
4835	/* First descriptor, get split header length */
4836	stmmac_get_rx_header_len(priv, p, &hlen);
4837	if (priv->sph && hlen) {
4838		priv->xstats.rx_split_hdr_pkt_n++;
4839		return hlen;
4840	}
4841
4842	/* First descriptor, not last descriptor and not split header */
4843	if (status & rx_not_ls)
4844		return priv->dma_conf.dma_buf_sz;
4845
4846	plen = stmmac_get_rx_frame_len(priv, p, coe);
4847
4848	/* First descriptor and last descriptor and not split header */
4849	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4850}
4851
4852static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4853				       struct dma_desc *p,
4854				       int status, unsigned int len)
4855{
4856	int coe = priv->hw->rx_csum;
4857	unsigned int plen = 0;
4858
4859	/* Not split header, buffer is not available */
4860	if (!priv->sph)
4861		return 0;
4862
4863	/* Not last descriptor */
4864	if (status & rx_not_ls)
4865		return priv->dma_conf.dma_buf_sz;
4866
4867	plen = stmmac_get_rx_frame_len(priv, p, coe);
4868
4869	/* Last descriptor */
4870	return plen - len;
4871}
4872
4873static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4874				struct xdp_frame *xdpf, bool dma_map)
4875{
4876	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4877	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4878	unsigned int entry = tx_q->cur_tx;
4879	struct dma_desc *tx_desc;
4880	dma_addr_t dma_addr;
4881	bool set_ic;
4882
4883	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4884		return STMMAC_XDP_CONSUMED;
4885
4886	if (priv->est && priv->est->enable &&
4887	    priv->est->max_sdu[queue] &&
4888	    xdpf->len > priv->est->max_sdu[queue]) {
4889		priv->xstats.max_sdu_txq_drop[queue]++;
4890		return STMMAC_XDP_CONSUMED;
4891	}
4892
4893	if (likely(priv->extend_desc))
4894		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4895	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4896		tx_desc = &tx_q->dma_entx[entry].basic;
4897	else
4898		tx_desc = tx_q->dma_tx + entry;
4899
4900	if (dma_map) {
4901		dma_addr = dma_map_single(priv->device, xdpf->data,
4902					  xdpf->len, DMA_TO_DEVICE);
4903		if (dma_mapping_error(priv->device, dma_addr))
4904			return STMMAC_XDP_CONSUMED;
4905
4906		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4907	} else {
4908		struct page *page = virt_to_page(xdpf->data);
4909
4910		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4911			   xdpf->headroom;
4912		dma_sync_single_for_device(priv->device, dma_addr,
4913					   xdpf->len, DMA_BIDIRECTIONAL);
4914
4915		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4916	}
4917
4918	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4919	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4920	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4921	tx_q->tx_skbuff_dma[entry].last_segment = true;
4922	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4923
4924	tx_q->xdpf[entry] = xdpf;
4925
4926	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4927
4928	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4929			       true, priv->mode, true, true,
4930			       xdpf->len);
4931
4932	tx_q->tx_count_frames++;
4933
4934	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4935		set_ic = true;
4936	else
4937		set_ic = false;
4938
4939	if (set_ic) {
4940		tx_q->tx_count_frames = 0;
4941		stmmac_set_tx_ic(priv, tx_desc);
4942		u64_stats_update_begin(&txq_stats->q_syncp);
4943		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4944		u64_stats_update_end(&txq_stats->q_syncp);
4945	}
4946
4947	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4948
4949	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4950	tx_q->cur_tx = entry;
4951
4952	return STMMAC_XDP_TX;
4953}
4954
4955static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4956				   int cpu)
4957{
4958	int index = cpu;
4959
4960	if (unlikely(index < 0))
4961		index = 0;
4962
4963	while (index >= priv->plat->tx_queues_to_use)
4964		index -= priv->plat->tx_queues_to_use;
4965
4966	return index;
4967}
4968
4969static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4970				struct xdp_buff *xdp)
4971{
4972	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4973	int cpu = smp_processor_id();
4974	struct netdev_queue *nq;
4975	int queue;
4976	int res;
4977
4978	if (unlikely(!xdpf))
4979		return STMMAC_XDP_CONSUMED;
4980
4981	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4982	nq = netdev_get_tx_queue(priv->dev, queue);
4983
4984	__netif_tx_lock(nq, cpu);
4985	/* Avoids TX time-out as we are sharing with slow path */
4986	txq_trans_cond_update(nq);
4987
4988	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4989	if (res == STMMAC_XDP_TX)
4990		stmmac_flush_tx_descriptors(priv, queue);
4991
4992	__netif_tx_unlock(nq);
4993
4994	return res;
4995}
4996
4997static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4998				 struct bpf_prog *prog,
4999				 struct xdp_buff *xdp)
5000{
5001	u32 act;
5002	int res;
5003
5004	act = bpf_prog_run_xdp(prog, xdp);
5005	switch (act) {
5006	case XDP_PASS:
5007		res = STMMAC_XDP_PASS;
5008		break;
5009	case XDP_TX:
5010		res = stmmac_xdp_xmit_back(priv, xdp);
5011		break;
5012	case XDP_REDIRECT:
5013		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5014			res = STMMAC_XDP_CONSUMED;
5015		else
5016			res = STMMAC_XDP_REDIRECT;
5017		break;
5018	default:
5019		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5020		fallthrough;
5021	case XDP_ABORTED:
5022		trace_xdp_exception(priv->dev, prog, act);
5023		fallthrough;
5024	case XDP_DROP:
5025		res = STMMAC_XDP_CONSUMED;
5026		break;
5027	}
5028
5029	return res;
5030}
5031
5032static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5033					   struct xdp_buff *xdp)
5034{
5035	struct bpf_prog *prog;
5036	int res;
5037
5038	prog = READ_ONCE(priv->xdp_prog);
5039	if (!prog) {
5040		res = STMMAC_XDP_PASS;
5041		goto out;
5042	}
5043
5044	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5045out:
5046	return ERR_PTR(-res);
5047}
5048
5049static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5050				   int xdp_status)
5051{
5052	int cpu = smp_processor_id();
5053	int queue;
5054
5055	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5056
5057	if (xdp_status & STMMAC_XDP_TX)
5058		stmmac_tx_timer_arm(priv, queue);
5059
5060	if (xdp_status & STMMAC_XDP_REDIRECT)
5061		xdp_do_flush();
5062}
5063
5064static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5065					       struct xdp_buff *xdp)
5066{
5067	unsigned int metasize = xdp->data - xdp->data_meta;
5068	unsigned int datasize = xdp->data_end - xdp->data;
5069	struct sk_buff *skb;
5070
5071	skb = napi_alloc_skb(&ch->rxtx_napi,
5072			     xdp->data_end - xdp->data_hard_start);
5073	if (unlikely(!skb))
5074		return NULL;
5075
5076	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5077	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5078	if (metasize)
5079		skb_metadata_set(skb, metasize);
5080
5081	return skb;
5082}
5083
5084static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5085				   struct dma_desc *p, struct dma_desc *np,
5086				   struct xdp_buff *xdp)
5087{
5088	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5089	struct stmmac_channel *ch = &priv->channel[queue];
5090	unsigned int len = xdp->data_end - xdp->data;
5091	enum pkt_hash_types hash_type;
5092	int coe = priv->hw->rx_csum;
5093	struct sk_buff *skb;
5094	u32 hash;
5095
5096	skb = stmmac_construct_skb_zc(ch, xdp);
5097	if (!skb) {
5098		priv->xstats.rx_dropped++;
5099		return;
5100	}
5101
5102	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5103	if (priv->hw->hw_vlan_en)
5104		/* MAC level stripping. */
5105		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5106	else
5107		/* Driver level stripping. */
5108		stmmac_rx_vlan(priv->dev, skb);
5109	skb->protocol = eth_type_trans(skb, priv->dev);
5110
5111	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5112		skb_checksum_none_assert(skb);
5113	else
5114		skb->ip_summed = CHECKSUM_UNNECESSARY;
5115
5116	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5117		skb_set_hash(skb, hash, hash_type);
5118
5119	skb_record_rx_queue(skb, queue);
5120	napi_gro_receive(&ch->rxtx_napi, skb);
5121
5122	u64_stats_update_begin(&rxq_stats->napi_syncp);
5123	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5124	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5125	u64_stats_update_end(&rxq_stats->napi_syncp);
5126}
5127
5128static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5129{
5130	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5131	unsigned int entry = rx_q->dirty_rx;
5132	struct dma_desc *rx_desc = NULL;
5133	bool ret = true;
5134
5135	budget = min(budget, stmmac_rx_dirty(priv, queue));
5136
5137	while (budget-- > 0 && entry != rx_q->cur_rx) {
5138		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5139		dma_addr_t dma_addr;
5140		bool use_rx_wd;
5141
5142		if (!buf->xdp) {
5143			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5144			if (!buf->xdp) {
5145				ret = false;
5146				break;
5147			}
5148		}
5149
5150		if (priv->extend_desc)
5151			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5152		else
5153			rx_desc = rx_q->dma_rx + entry;
5154
5155		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5156		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5157		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5158		stmmac_refill_desc3(priv, rx_q, rx_desc);
5159
5160		rx_q->rx_count_frames++;
5161		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5162		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5163			rx_q->rx_count_frames = 0;
5164
5165		use_rx_wd = !priv->rx_coal_frames[queue];
5166		use_rx_wd |= rx_q->rx_count_frames > 0;
5167		if (!priv->use_riwt)
5168			use_rx_wd = false;
5169
5170		dma_wmb();
5171		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5172
5173		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5174	}
5175
5176	if (rx_desc) {
5177		rx_q->dirty_rx = entry;
5178		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5179				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5180		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5181	}
5182
5183	return ret;
5184}
5185
5186static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5187{
5188	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5189	 * to represent incoming packet, whereas cb field in the same structure
5190	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5191	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5192	 */
5193	return (struct stmmac_xdp_buff *)xdp;
5194}
5195
5196static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5197{
5198	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5199	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5200	unsigned int count = 0, error = 0, len = 0;
5201	int dirty = stmmac_rx_dirty(priv, queue);
5202	unsigned int next_entry = rx_q->cur_rx;
5203	u32 rx_errors = 0, rx_dropped = 0;
5204	unsigned int desc_size;
5205	struct bpf_prog *prog;
5206	bool failure = false;
5207	int xdp_status = 0;
5208	int status = 0;
5209
5210	if (netif_msg_rx_status(priv)) {
5211		void *rx_head;
5212
5213		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5214		if (priv->extend_desc) {
5215			rx_head = (void *)rx_q->dma_erx;
5216			desc_size = sizeof(struct dma_extended_desc);
5217		} else {
5218			rx_head = (void *)rx_q->dma_rx;
5219			desc_size = sizeof(struct dma_desc);
5220		}
5221
5222		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5223				    rx_q->dma_rx_phy, desc_size);
5224	}
5225	while (count < limit) {
5226		struct stmmac_rx_buffer *buf;
5227		struct stmmac_xdp_buff *ctx;
5228		unsigned int buf1_len = 0;
5229		struct dma_desc *np, *p;
5230		int entry;
5231		int res;
5232
5233		if (!count && rx_q->state_saved) {
5234			error = rx_q->state.error;
5235			len = rx_q->state.len;
5236		} else {
5237			rx_q->state_saved = false;
5238			error = 0;
5239			len = 0;
5240		}
5241
5242		if (count >= limit)
5243			break;
5244
5245read_again:
5246		buf1_len = 0;
5247		entry = next_entry;
5248		buf = &rx_q->buf_pool[entry];
5249
5250		if (dirty >= STMMAC_RX_FILL_BATCH) {
5251			failure = failure ||
5252				  !stmmac_rx_refill_zc(priv, queue, dirty);
5253			dirty = 0;
5254		}
5255
5256		if (priv->extend_desc)
5257			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5258		else
5259			p = rx_q->dma_rx + entry;
5260
5261		/* read the status of the incoming frame */
5262		status = stmmac_rx_status(priv, &priv->xstats, p);
5263		/* check if managed by the DMA otherwise go ahead */
5264		if (unlikely(status & dma_own))
5265			break;
5266
5267		/* Prefetch the next RX descriptor */
5268		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5269						priv->dma_conf.dma_rx_size);
5270		next_entry = rx_q->cur_rx;
5271
5272		if (priv->extend_desc)
5273			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5274		else
5275			np = rx_q->dma_rx + next_entry;
5276
5277		prefetch(np);
5278
5279		/* Ensure a valid XSK buffer before proceed */
5280		if (!buf->xdp)
5281			break;
5282
5283		if (priv->extend_desc)
5284			stmmac_rx_extended_status(priv, &priv->xstats,
5285						  rx_q->dma_erx + entry);
5286		if (unlikely(status == discard_frame)) {
5287			xsk_buff_free(buf->xdp);
5288			buf->xdp = NULL;
5289			dirty++;
5290			error = 1;
5291			if (!priv->hwts_rx_en)
5292				rx_errors++;
5293		}
5294
5295		if (unlikely(error && (status & rx_not_ls)))
5296			goto read_again;
5297		if (unlikely(error)) {
5298			count++;
5299			continue;
5300		}
5301
5302		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5303		if (likely(status & rx_not_ls)) {
5304			xsk_buff_free(buf->xdp);
5305			buf->xdp = NULL;
5306			dirty++;
5307			count++;
5308			goto read_again;
5309		}
5310
5311		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5312		ctx->priv = priv;
5313		ctx->desc = p;
5314		ctx->ndesc = np;
5315
5316		/* XDP ZC Frame only support primary buffers for now */
5317		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5318		len += buf1_len;
5319
5320		/* ACS is disabled; strip manually. */
5321		if (likely(!(status & rx_not_ls))) {
5322			buf1_len -= ETH_FCS_LEN;
5323			len -= ETH_FCS_LEN;
5324		}
5325
5326		/* RX buffer is good and fit into a XSK pool buffer */
5327		buf->xdp->data_end = buf->xdp->data + buf1_len;
5328		xsk_buff_dma_sync_for_cpu(buf->xdp);
5329
5330		prog = READ_ONCE(priv->xdp_prog);
5331		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5332
5333		switch (res) {
5334		case STMMAC_XDP_PASS:
5335			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5336			xsk_buff_free(buf->xdp);
5337			break;
5338		case STMMAC_XDP_CONSUMED:
5339			xsk_buff_free(buf->xdp);
5340			rx_dropped++;
5341			break;
5342		case STMMAC_XDP_TX:
5343		case STMMAC_XDP_REDIRECT:
5344			xdp_status |= res;
5345			break;
5346		}
5347
5348		buf->xdp = NULL;
5349		dirty++;
5350		count++;
5351	}
5352
5353	if (status & rx_not_ls) {
5354		rx_q->state_saved = true;
5355		rx_q->state.error = error;
5356		rx_q->state.len = len;
5357	}
5358
5359	stmmac_finalize_xdp_rx(priv, xdp_status);
5360
5361	u64_stats_update_begin(&rxq_stats->napi_syncp);
5362	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5363	u64_stats_update_end(&rxq_stats->napi_syncp);
5364
5365	priv->xstats.rx_dropped += rx_dropped;
5366	priv->xstats.rx_errors += rx_errors;
5367
5368	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5369		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5370			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5371		else
5372			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5373
5374		return (int)count;
5375	}
5376
5377	return failure ? limit : (int)count;
5378}
5379
5380/**
5381 * stmmac_rx - manage the receive process
5382 * @priv: driver private structure
5383 * @limit: napi bugget
5384 * @queue: RX queue index.
5385 * Description :  this the function called by the napi poll method.
5386 * It gets all the frames inside the ring.
5387 */
5388static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5389{
5390	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5391	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5392	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5393	struct stmmac_channel *ch = &priv->channel[queue];
5394	unsigned int count = 0, error = 0, len = 0;
5395	int status = 0, coe = priv->hw->rx_csum;
5396	unsigned int next_entry = rx_q->cur_rx;
5397	enum dma_data_direction dma_dir;
5398	unsigned int desc_size;
5399	struct sk_buff *skb = NULL;
5400	struct stmmac_xdp_buff ctx;
5401	int xdp_status = 0;
5402	int buf_sz;
5403
5404	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5405	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5406	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5407
5408	if (netif_msg_rx_status(priv)) {
5409		void *rx_head;
5410
5411		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5412		if (priv->extend_desc) {
5413			rx_head = (void *)rx_q->dma_erx;
5414			desc_size = sizeof(struct dma_extended_desc);
5415		} else {
5416			rx_head = (void *)rx_q->dma_rx;
5417			desc_size = sizeof(struct dma_desc);
5418		}
5419
5420		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5421				    rx_q->dma_rx_phy, desc_size);
5422	}
5423	while (count < limit) {
5424		unsigned int buf1_len = 0, buf2_len = 0;
5425		enum pkt_hash_types hash_type;
5426		struct stmmac_rx_buffer *buf;
5427		struct dma_desc *np, *p;
5428		int entry;
5429		u32 hash;
5430
5431		if (!count && rx_q->state_saved) {
5432			skb = rx_q->state.skb;
5433			error = rx_q->state.error;
5434			len = rx_q->state.len;
5435		} else {
5436			rx_q->state_saved = false;
5437			skb = NULL;
5438			error = 0;
5439			len = 0;
5440		}
5441
5442read_again:
5443		if (count >= limit)
5444			break;
5445
5446		buf1_len = 0;
5447		buf2_len = 0;
5448		entry = next_entry;
5449		buf = &rx_q->buf_pool[entry];
5450
5451		if (priv->extend_desc)
5452			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5453		else
5454			p = rx_q->dma_rx + entry;
5455
5456		/* read the status of the incoming frame */
5457		status = stmmac_rx_status(priv, &priv->xstats, p);
5458		/* check if managed by the DMA otherwise go ahead */
5459		if (unlikely(status & dma_own))
5460			break;
5461
5462		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5463						priv->dma_conf.dma_rx_size);
5464		next_entry = rx_q->cur_rx;
5465
 
5466		if (priv->extend_desc)
5467			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5468		else
5469			np = rx_q->dma_rx + next_entry;
5470
5471		prefetch(np);
5472
5473		if (priv->extend_desc)
5474			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
 
 
 
 
5475		if (unlikely(status == discard_frame)) {
5476			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5477			buf->page = NULL;
5478			error = 1;
5479			if (!priv->hwts_rx_en)
5480				rx_errors++;
5481		}
5482
5483		if (unlikely(error && (status & rx_not_ls)))
5484			goto read_again;
5485		if (unlikely(error)) {
5486			dev_kfree_skb(skb);
5487			skb = NULL;
5488			count++;
5489			continue;
5490		}
5491
5492		/* Buffer is good. Go on. */
5493
5494		prefetch(page_address(buf->page) + buf->page_offset);
5495		if (buf->sec_page)
5496			prefetch(page_address(buf->sec_page));
5497
5498		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5499		len += buf1_len;
5500		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5501		len += buf2_len;
5502
5503		/* ACS is disabled; strip manually. */
5504		if (likely(!(status & rx_not_ls))) {
5505			if (buf2_len) {
5506				buf2_len -= ETH_FCS_LEN;
5507				len -= ETH_FCS_LEN;
5508			} else if (buf1_len) {
5509				buf1_len -= ETH_FCS_LEN;
5510				len -= ETH_FCS_LEN;
5511			}
5512		}
5513
5514		if (!skb) {
5515			unsigned int pre_len, sync_len;
5516
5517			dma_sync_single_for_cpu(priv->device, buf->addr,
5518						buf1_len, dma_dir);
5519
5520			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5521			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5522					 buf->page_offset, buf1_len, true);
5523
5524			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5525				  buf->page_offset;
5526
5527			ctx.priv = priv;
5528			ctx.desc = p;
5529			ctx.ndesc = np;
5530
5531			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5532			/* Due xdp_adjust_tail: DMA sync for_device
5533			 * cover max len CPU touch
5534			 */
5535			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5536				   buf->page_offset;
5537			sync_len = max(sync_len, pre_len);
5538
5539			/* For Not XDP_PASS verdict */
5540			if (IS_ERR(skb)) {
5541				unsigned int xdp_res = -PTR_ERR(skb);
5542
5543				if (xdp_res & STMMAC_XDP_CONSUMED) {
5544					page_pool_put_page(rx_q->page_pool,
5545							   virt_to_head_page(ctx.xdp.data),
5546							   sync_len, true);
5547					buf->page = NULL;
5548					rx_dropped++;
5549
5550					/* Clear skb as it was set as
5551					 * status by XDP program.
5552					 */
5553					skb = NULL;
5554
5555					if (unlikely((status & rx_not_ls)))
5556						goto read_again;
5557
5558					count++;
5559					continue;
5560				} else if (xdp_res & (STMMAC_XDP_TX |
5561						      STMMAC_XDP_REDIRECT)) {
5562					xdp_status |= xdp_res;
5563					buf->page = NULL;
5564					skb = NULL;
5565					count++;
5566					continue;
5567				}
5568			}
5569		}
5570
5571		if (!skb) {
5572			/* XDP program may expand or reduce tail */
5573			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5574
5575			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5576			if (!skb) {
5577				rx_dropped++;
5578				count++;
5579				goto drain_data;
5580			}
 
 
 
 
 
 
 
 
 
5581
5582			/* XDP program may adjust header */
5583			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5584			skb_put(skb, buf1_len);
5585
5586			/* Data payload copied into SKB, page ready for recycle */
5587			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5588			buf->page = NULL;
5589		} else if (buf1_len) {
5590			dma_sync_single_for_cpu(priv->device, buf->addr,
5591						buf1_len, dma_dir);
5592			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5593					buf->page, buf->page_offset, buf1_len,
5594					priv->dma_conf.dma_buf_sz);
5595
5596			/* Data payload appended into SKB */
5597			skb_mark_for_recycle(skb);
5598			buf->page = NULL;
5599		}
5600
5601		if (buf2_len) {
5602			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5603						buf2_len, dma_dir);
5604			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5605					buf->sec_page, 0, buf2_len,
5606					priv->dma_conf.dma_buf_sz);
5607
5608			/* Data payload appended into SKB */
5609			skb_mark_for_recycle(skb);
5610			buf->sec_page = NULL;
5611		}
5612
5613drain_data:
5614		if (likely(status & rx_not_ls))
5615			goto read_again;
5616		if (!skb)
5617			continue;
5618
5619		/* Got entire packet into SKB. Finish it. */
5620
5621		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5622
5623		if (priv->hw->hw_vlan_en)
5624			/* MAC level stripping. */
5625			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5626		else
5627			/* Driver level stripping. */
5628			stmmac_rx_vlan(priv->dev, skb);
5629
5630		skb->protocol = eth_type_trans(skb, priv->dev);
 
 
 
5631
5632		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5633			skb_checksum_none_assert(skb);
5634		else
5635			skb->ip_summed = CHECKSUM_UNNECESSARY;
5636
5637		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5638			skb_set_hash(skb, hash, hash_type);
5639
5640		skb_record_rx_queue(skb, queue);
5641		napi_gro_receive(&ch->rx_napi, skb);
5642		skb = NULL;
 
5643
5644		rx_packets++;
5645		rx_bytes += len;
5646		count++;
5647	}
5648
5649	if (status & rx_not_ls || skb) {
5650		rx_q->state_saved = true;
5651		rx_q->state.skb = skb;
5652		rx_q->state.error = error;
5653		rx_q->state.len = len;
5654	}
5655
5656	stmmac_finalize_xdp_rx(priv, xdp_status);
5657
5658	stmmac_rx_refill(priv, queue);
5659
5660	u64_stats_update_begin(&rxq_stats->napi_syncp);
5661	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5662	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5663	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5664	u64_stats_update_end(&rxq_stats->napi_syncp);
5665
5666	priv->xstats.rx_dropped += rx_dropped;
5667	priv->xstats.rx_errors += rx_errors;
5668
5669	return count;
5670}
5671
5672static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5673{
5674	struct stmmac_channel *ch =
5675		container_of(napi, struct stmmac_channel, rx_napi);
5676	struct stmmac_priv *priv = ch->priv_data;
5677	struct stmmac_rxq_stats *rxq_stats;
5678	u32 chan = ch->index;
5679	int work_done;
5680
5681	rxq_stats = &priv->xstats.rxq_stats[chan];
5682	u64_stats_update_begin(&rxq_stats->napi_syncp);
5683	u64_stats_inc(&rxq_stats->napi.poll);
5684	u64_stats_update_end(&rxq_stats->napi_syncp);
5685
5686	work_done = stmmac_rx(priv, budget, chan);
5687	if (work_done < budget && napi_complete_done(napi, work_done)) {
5688		unsigned long flags;
5689
5690		spin_lock_irqsave(&ch->lock, flags);
5691		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5692		spin_unlock_irqrestore(&ch->lock, flags);
5693	}
5694
5695	return work_done;
5696}
5697
5698static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5699{
5700	struct stmmac_channel *ch =
5701		container_of(napi, struct stmmac_channel, tx_napi);
5702	struct stmmac_priv *priv = ch->priv_data;
5703	struct stmmac_txq_stats *txq_stats;
5704	bool pending_packets = false;
5705	u32 chan = ch->index;
5706	int work_done;
5707
5708	txq_stats = &priv->xstats.txq_stats[chan];
5709	u64_stats_update_begin(&txq_stats->napi_syncp);
5710	u64_stats_inc(&txq_stats->napi.poll);
5711	u64_stats_update_end(&txq_stats->napi_syncp);
5712
5713	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5714	work_done = min(work_done, budget);
5715
5716	if (work_done < budget && napi_complete_done(napi, work_done)) {
5717		unsigned long flags;
5718
5719		spin_lock_irqsave(&ch->lock, flags);
5720		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5721		spin_unlock_irqrestore(&ch->lock, flags);
5722	}
5723
5724	/* TX still have packet to handle, check if we need to arm tx timer */
5725	if (pending_packets)
5726		stmmac_tx_timer_arm(priv, chan);
5727
5728	return work_done;
5729}
5730
5731static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5732{
5733	struct stmmac_channel *ch =
5734		container_of(napi, struct stmmac_channel, rxtx_napi);
5735	struct stmmac_priv *priv = ch->priv_data;
5736	bool tx_pending_packets = false;
5737	int rx_done, tx_done, rxtx_done;
5738	struct stmmac_rxq_stats *rxq_stats;
5739	struct stmmac_txq_stats *txq_stats;
5740	u32 chan = ch->index;
5741
5742	rxq_stats = &priv->xstats.rxq_stats[chan];
5743	u64_stats_update_begin(&rxq_stats->napi_syncp);
5744	u64_stats_inc(&rxq_stats->napi.poll);
5745	u64_stats_update_end(&rxq_stats->napi_syncp);
5746
5747	txq_stats = &priv->xstats.txq_stats[chan];
5748	u64_stats_update_begin(&txq_stats->napi_syncp);
5749	u64_stats_inc(&txq_stats->napi.poll);
5750	u64_stats_update_end(&txq_stats->napi_syncp);
5751
5752	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5753	tx_done = min(tx_done, budget);
5754
5755	rx_done = stmmac_rx_zc(priv, budget, chan);
5756
5757	rxtx_done = max(tx_done, rx_done);
5758
5759	/* If either TX or RX work is not complete, return budget
5760	 * and keep pooling
5761	 */
5762	if (rxtx_done >= budget)
5763		return budget;
5764
5765	/* all work done, exit the polling mode */
5766	if (napi_complete_done(napi, rxtx_done)) {
5767		unsigned long flags;
5768
5769		spin_lock_irqsave(&ch->lock, flags);
5770		/* Both RX and TX work done are compelte,
5771		 * so enable both RX & TX IRQs.
5772		 */
5773		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5774		spin_unlock_irqrestore(&ch->lock, flags);
5775	}
5776
5777	/* TX still have packet to handle, check if we need to arm tx timer */
5778	if (tx_pending_packets)
5779		stmmac_tx_timer_arm(priv, chan);
5780
5781	return min(rxtx_done, budget - 1);
5782}
5783
5784/**
5785 *  stmmac_tx_timeout
5786 *  @dev : Pointer to net device structure
5787 *  @txqueue: the index of the hanging transmit queue
5788 *  Description: this function is called when a packet transmission fails to
5789 *   complete within a reasonable time. The driver will mark the error in the
5790 *   netdev structure and arrange for the device to be reset to a sane state
5791 *   in order to transmit a new packet.
5792 */
5793static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5794{
5795	struct stmmac_priv *priv = netdev_priv(dev);
5796
5797	stmmac_global_err(priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5798}
5799
5800/**
5801 *  stmmac_set_rx_mode - entry point for multicast addressing
5802 *  @dev : pointer to the device structure
5803 *  Description:
5804 *  This function is a driver entry point which gets called by the kernel
5805 *  whenever multicast addresses must be enabled/disabled.
5806 *  Return value:
5807 *  void.
5808 */
5809static void stmmac_set_rx_mode(struct net_device *dev)
5810{
5811	struct stmmac_priv *priv = netdev_priv(dev);
5812
5813	stmmac_set_filter(priv, priv->hw, dev);
 
 
5814}
5815
5816/**
5817 *  stmmac_change_mtu - entry point to change MTU size for the device.
5818 *  @dev : device pointer.
5819 *  @new_mtu : the new MTU size for the device.
5820 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5821 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5822 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5823 *  Return value:
5824 *  0 on success and an appropriate (-)ve integer as defined in errno.h
5825 *  file on failure.
5826 */
5827static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5828{
5829	struct stmmac_priv *priv = netdev_priv(dev);
5830	int txfifosz = priv->plat->tx_fifo_size;
5831	struct stmmac_dma_conf *dma_conf;
5832	const int mtu = new_mtu;
5833	int ret;
5834
5835	if (txfifosz == 0)
5836		txfifosz = priv->dma_cap.tx_fifo_size;
5837
5838	txfifosz /= priv->plat->tx_queues_to_use;
5839
5840	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5841		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5842		return -EINVAL;
5843	}
5844
5845	new_mtu = STMMAC_ALIGN(new_mtu);
5846
5847	/* If condition true, FIFO is too small or MTU too large */
5848	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5849		return -EINVAL;
5850
5851	if (netif_running(dev)) {
5852		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5853		/* Try to allocate the new DMA conf with the new mtu */
5854		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5855		if (IS_ERR(dma_conf)) {
5856			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5857				   mtu);
5858			return PTR_ERR(dma_conf);
5859		}
5860
5861		stmmac_release(dev);
5862
5863		ret = __stmmac_open(dev, dma_conf);
5864		if (ret) {
5865			free_dma_desc_resources(priv, dma_conf);
5866			kfree(dma_conf);
5867			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5868			return ret;
5869		}
5870
5871		kfree(dma_conf);
 
5872
5873		stmmac_set_rx_mode(dev);
 
 
5874	}
5875
5876	WRITE_ONCE(dev->mtu, mtu);
5877	netdev_update_features(dev);
5878
5879	return 0;
5880}
5881
5882static netdev_features_t stmmac_fix_features(struct net_device *dev,
5883					     netdev_features_t features)
5884{
5885	struct stmmac_priv *priv = netdev_priv(dev);
5886
5887	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5888		features &= ~NETIF_F_RXCSUM;
5889
 
5890	if (!priv->plat->tx_coe)
5891		features &= ~NETIF_F_CSUM_MASK;
5892
5893	/* Some GMAC devices have a bugged Jumbo frame support that
5894	 * needs to have the Tx COE disabled for oversized frames
5895	 * (due to limited buffer sizes). In this case we disable
5896	 * the TX csum insertion in the TDES and not use SF.
5897	 */
5898	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5899		features &= ~NETIF_F_CSUM_MASK;
5900
5901	/* Disable tso if asked by ethtool */
5902	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5903		if (features & NETIF_F_TSO)
5904			priv->tso = true;
5905		else
5906			priv->tso = false;
5907	}
5908
5909	return features;
5910}
5911
5912static int stmmac_set_features(struct net_device *netdev,
5913			       netdev_features_t features)
5914{
5915	struct stmmac_priv *priv = netdev_priv(netdev);
5916
5917	/* Keep the COE Type in case of csum is supporting */
5918	if (features & NETIF_F_RXCSUM)
5919		priv->hw->rx_csum = priv->plat->rx_coe;
5920	else
5921		priv->hw->rx_csum = 0;
5922	/* No check needed because rx_coe has been set before and it will be
5923	 * fixed in case of issue.
5924	 */
5925	stmmac_rx_ipc(priv, priv->hw);
5926
5927	if (priv->sph_cap) {
5928		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5929		u32 chan;
5930
5931		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5932			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5933	}
5934
5935	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5936		priv->hw->hw_vlan_en = true;
5937	else
5938		priv->hw->hw_vlan_en = false;
5939
5940	stmmac_set_hw_vlan_mode(priv, priv->hw);
5941
5942	return 0;
5943}
5944
5945static void stmmac_common_interrupt(struct stmmac_priv *priv)
5946{
5947	u32 rx_cnt = priv->plat->rx_queues_to_use;
5948	u32 tx_cnt = priv->plat->tx_queues_to_use;
5949	u32 queues_count;
5950	u32 queue;
5951	bool xmac;
5952
5953	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5954	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5955
5956	if (priv->irq_wake)
5957		pm_wakeup_event(priv->device, 0);
5958
5959	if (priv->dma_cap.estsel)
5960		stmmac_est_irq_status(priv, priv, priv->dev,
5961				      &priv->xstats, tx_cnt);
5962
5963	if (stmmac_fpe_supported(priv))
5964		stmmac_fpe_irq_status(priv);
5965
5966	/* To handle GMAC own interrupts */
5967	if ((priv->plat->has_gmac) || xmac) {
5968		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5969
 
5970		if (unlikely(status)) {
5971			/* For LPI we need to save the tx status */
5972			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5973				priv->tx_path_in_lpi_mode = true;
5974			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5975				priv->tx_path_in_lpi_mode = false;
5976		}
5977
5978		for (queue = 0; queue < queues_count; queue++)
5979			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5980
5981		/* PCS link status */
5982		if (priv->hw->pcs &&
5983		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5984			if (priv->xstats.pcs_link)
5985				netif_carrier_on(priv->dev);
5986			else
5987				netif_carrier_off(priv->dev);
5988		}
5989
5990		stmmac_timestamp_interrupt(priv, priv);
5991	}
5992}
5993
5994/**
5995 *  stmmac_interrupt - main ISR
5996 *  @irq: interrupt number.
5997 *  @dev_id: to pass the net device pointer.
5998 *  Description: this is the main driver interrupt service routine.
5999 *  It can call:
6000 *  o DMA service routine (to manage incoming frame reception and transmission
6001 *    status)
6002 *  o Core interrupts to manage: remote wake-up, management counter, LPI
6003 *    interrupts.
6004 */
6005static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6006{
6007	struct net_device *dev = (struct net_device *)dev_id;
6008	struct stmmac_priv *priv = netdev_priv(dev);
6009
6010	/* Check if adapter is up */
6011	if (test_bit(STMMAC_DOWN, &priv->state))
6012		return IRQ_HANDLED;
6013
6014	/* Check ASP error if it isn't delivered via an individual IRQ */
6015	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6016		return IRQ_HANDLED;
6017
6018	/* To handle Common interrupts */
6019	stmmac_common_interrupt(priv);
6020
6021	/* To handle DMA interrupts */
6022	stmmac_dma_interrupt(priv);
6023
6024	return IRQ_HANDLED;
6025}
6026
6027static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6028{
6029	struct net_device *dev = (struct net_device *)dev_id;
6030	struct stmmac_priv *priv = netdev_priv(dev);
6031
6032	/* Check if adapter is up */
6033	if (test_bit(STMMAC_DOWN, &priv->state))
6034		return IRQ_HANDLED;
6035
6036	/* To handle Common interrupts */
6037	stmmac_common_interrupt(priv);
6038
6039	return IRQ_HANDLED;
6040}
6041
6042static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6043{
6044	struct net_device *dev = (struct net_device *)dev_id;
6045	struct stmmac_priv *priv = netdev_priv(dev);
6046
6047	/* Check if adapter is up */
6048	if (test_bit(STMMAC_DOWN, &priv->state))
6049		return IRQ_HANDLED;
6050
6051	/* Check if a fatal error happened */
6052	stmmac_safety_feat_interrupt(priv);
6053
6054	return IRQ_HANDLED;
6055}
6056
6057static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6058{
6059	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6060	struct stmmac_dma_conf *dma_conf;
6061	int chan = tx_q->queue_index;
6062	struct stmmac_priv *priv;
6063	int status;
6064
6065	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6066	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6067
6068	/* Check if adapter is up */
6069	if (test_bit(STMMAC_DOWN, &priv->state))
6070		return IRQ_HANDLED;
6071
6072	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6073
6074	if (unlikely(status & tx_hard_error_bump_tc)) {
6075		/* Try to bump up the dma threshold on this failure */
6076		stmmac_bump_dma_threshold(priv, chan);
6077	} else if (unlikely(status == tx_hard_error)) {
6078		stmmac_tx_err(priv, chan);
6079	}
6080
6081	return IRQ_HANDLED;
6082}
6083
6084static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6085{
6086	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6087	struct stmmac_dma_conf *dma_conf;
6088	int chan = rx_q->queue_index;
6089	struct stmmac_priv *priv;
6090
6091	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6092	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6093
6094	/* Check if adapter is up */
6095	if (test_bit(STMMAC_DOWN, &priv->state))
6096		return IRQ_HANDLED;
6097
6098	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6099
6100	return IRQ_HANDLED;
6101}
 
6102
6103/**
6104 *  stmmac_ioctl - Entry point for the Ioctl
6105 *  @dev: Device pointer.
6106 *  @rq: An IOCTL specefic structure, that can contain a pointer to
6107 *  a proprietary structure used to pass information to the driver.
6108 *  @cmd: IOCTL command
6109 *  Description:
6110 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6111 */
6112static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6113{
6114	struct stmmac_priv *priv = netdev_priv (dev);
6115	int ret = -EOPNOTSUPP;
6116
6117	if (!netif_running(dev))
6118		return -EINVAL;
6119
6120	switch (cmd) {
6121	case SIOCGMIIPHY:
6122	case SIOCGMIIREG:
6123	case SIOCSMIIREG:
6124		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
 
 
6125		break;
6126	case SIOCSHWTSTAMP:
6127		ret = stmmac_hwtstamp_set(dev, rq);
6128		break;
6129	case SIOCGHWTSTAMP:
6130		ret = stmmac_hwtstamp_get(dev, rq);
6131		break;
6132	default:
6133		break;
6134	}
6135
6136	return ret;
6137}
6138
6139static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6140				    void *cb_priv)
6141{
6142	struct stmmac_priv *priv = cb_priv;
6143	int ret = -EOPNOTSUPP;
6144
6145	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6146		return ret;
6147
6148	__stmmac_disable_all_queues(priv);
6149
6150	switch (type) {
6151	case TC_SETUP_CLSU32:
6152		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6153		break;
6154	case TC_SETUP_CLSFLOWER:
6155		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6156		break;
6157	default:
6158		break;
6159	}
6160
6161	stmmac_enable_all_queues(priv);
6162	return ret;
6163}
6164
6165static LIST_HEAD(stmmac_block_cb_list);
6166
6167static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6168			   void *type_data)
6169{
6170	struct stmmac_priv *priv = netdev_priv(ndev);
6171
6172	switch (type) {
6173	case TC_QUERY_CAPS:
6174		return stmmac_tc_query_caps(priv, priv, type_data);
6175	case TC_SETUP_QDISC_MQPRIO:
6176		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6177	case TC_SETUP_BLOCK:
6178		return flow_block_cb_setup_simple(type_data,
6179						  &stmmac_block_cb_list,
6180						  stmmac_setup_tc_block_cb,
6181						  priv, priv, true);
6182	case TC_SETUP_QDISC_CBS:
6183		return stmmac_tc_setup_cbs(priv, priv, type_data);
6184	case TC_SETUP_QDISC_TAPRIO:
6185		return stmmac_tc_setup_taprio(priv, priv, type_data);
6186	case TC_SETUP_QDISC_ETF:
6187		return stmmac_tc_setup_etf(priv, priv, type_data);
6188	default:
6189		return -EOPNOTSUPP;
6190	}
6191}
6192
6193static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6194			       struct net_device *sb_dev)
6195{
6196	int gso = skb_shinfo(skb)->gso_type;
6197
6198	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6199		/*
6200		 * There is no way to determine the number of TSO/USO
6201		 * capable Queues. Let's use always the Queue 0
6202		 * because if TSO/USO is supported then at least this
6203		 * one will be capable.
6204		 */
6205		return 0;
6206	}
6207
6208	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6209}
6210
6211static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6212{
6213	struct stmmac_priv *priv = netdev_priv(ndev);
6214	int ret = 0;
6215
6216	ret = pm_runtime_resume_and_get(priv->device);
6217	if (ret < 0)
6218		return ret;
6219
6220	ret = eth_mac_addr(ndev, addr);
6221	if (ret)
6222		goto set_mac_error;
6223
6224	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6225
6226set_mac_error:
6227	pm_runtime_put(priv->device);
6228
6229	return ret;
6230}
6231
6232#ifdef CONFIG_DEBUG_FS
6233static struct dentry *stmmac_fs_dir;
 
 
6234
6235static void sysfs_display_ring(void *head, int size, int extend_desc,
6236			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6237{
 
6238	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6239	struct dma_desc *p = (struct dma_desc *)head;
6240	unsigned int desc_size;
6241	dma_addr_t dma_addr;
6242	int i;
6243
6244	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6245	for (i = 0; i < size; i++) {
6246		dma_addr = dma_phy_addr + i * desc_size;
6247		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6248				i, &dma_addr,
6249				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6250				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6251		if (extend_desc)
6252			p = &(++ep)->basic;
6253		else
 
 
 
 
 
 
6254			p++;
 
 
6255	}
6256}
6257
6258static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6259{
6260	struct net_device *dev = seq->private;
6261	struct stmmac_priv *priv = netdev_priv(dev);
6262	u32 rx_count = priv->plat->rx_queues_to_use;
6263	u32 tx_count = priv->plat->tx_queues_to_use;
6264	u32 queue;
6265
6266	if ((dev->flags & IFF_UP) == 0)
6267		return 0;
6268
6269	for (queue = 0; queue < rx_count; queue++) {
6270		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6271
6272		seq_printf(seq, "RX Queue %d:\n", queue);
6273
6274		if (priv->extend_desc) {
6275			seq_printf(seq, "Extended descriptor ring:\n");
6276			sysfs_display_ring((void *)rx_q->dma_erx,
6277					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6278		} else {
6279			seq_printf(seq, "Descriptor ring:\n");
6280			sysfs_display_ring((void *)rx_q->dma_rx,
6281					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6282		}
6283	}
6284
6285	for (queue = 0; queue < tx_count; queue++) {
6286		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6287
6288		seq_printf(seq, "TX Queue %d:\n", queue);
6289
6290		if (priv->extend_desc) {
6291			seq_printf(seq, "Extended descriptor ring:\n");
6292			sysfs_display_ring((void *)tx_q->dma_etx,
6293					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6294		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6295			seq_printf(seq, "Descriptor ring:\n");
6296			sysfs_display_ring((void *)tx_q->dma_tx,
6297					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6298		}
6299	}
6300
6301	return 0;
6302}
6303DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6304
6305static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
 
 
 
 
 
 
 
 
 
 
 
 
 
6306{
6307	static const char * const dwxgmac_timestamp_source[] = {
6308		"None",
6309		"Internal",
6310		"External",
6311		"Both",
6312	};
6313	static const char * const dwxgmac_safety_feature_desc[] = {
6314		"No",
6315		"All Safety Features with ECC and Parity",
6316		"All Safety Features without ECC or Parity",
6317		"All Safety Features with Parity Only",
6318		"ECC Only",
6319		"UNDEFINED",
6320		"UNDEFINED",
6321		"UNDEFINED",
6322	};
6323	struct net_device *dev = seq->private;
6324	struct stmmac_priv *priv = netdev_priv(dev);
6325
6326	if (!priv->hw_cap_support) {
6327		seq_printf(seq, "DMA HW features not supported\n");
6328		return 0;
6329	}
6330
6331	seq_printf(seq, "==============================\n");
6332	seq_printf(seq, "\tDMA HW features\n");
6333	seq_printf(seq, "==============================\n");
6334
6335	seq_printf(seq, "\t10/100 Mbps: %s\n",
6336		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6337	seq_printf(seq, "\t1000 Mbps: %s\n",
6338		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6339	seq_printf(seq, "\tHalf duplex: %s\n",
6340		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6341	if (priv->plat->has_xgmac) {
6342		seq_printf(seq,
6343			   "\tNumber of Additional MAC address registers: %d\n",
6344			   priv->dma_cap.multi_addr);
6345	} else {
6346		seq_printf(seq, "\tHash Filter: %s\n",
6347			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6348		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6349			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6350	}
6351	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6352		   (priv->dma_cap.pcs) ? "Y" : "N");
6353	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6354		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6355	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6356		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6357	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6358		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6359	seq_printf(seq, "\tRMON module: %s\n",
6360		   (priv->dma_cap.rmon) ? "Y" : "N");
6361	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6362		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6363	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6364		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6365	if (priv->plat->has_xgmac)
6366		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6367			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6368	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6369		   (priv->dma_cap.eee) ? "Y" : "N");
6370	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6371	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6372		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6373	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6374	    priv->plat->has_xgmac) {
6375		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6376			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6377	} else {
6378		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6379			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6380		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6381			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6382		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6383			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6384	}
6385	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6386		   priv->dma_cap.number_rx_channel);
6387	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6388		   priv->dma_cap.number_tx_channel);
6389	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6390		   priv->dma_cap.number_rx_queues);
6391	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6392		   priv->dma_cap.number_tx_queues);
6393	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6394		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6395	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6396	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6397	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6398		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6399	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6400	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6401		   priv->dma_cap.pps_out_num);
6402	seq_printf(seq, "\tSafety Features: %s\n",
6403		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6404	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6405		   priv->dma_cap.frpsel ? "Y" : "N");
6406	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6407		   priv->dma_cap.host_dma_width);
6408	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6409		   priv->dma_cap.rssen ? "Y" : "N");
6410	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6411		   priv->dma_cap.vlhash ? "Y" : "N");
6412	seq_printf(seq, "\tSplit Header: %s\n",
6413		   priv->dma_cap.sphen ? "Y" : "N");
6414	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6415		   priv->dma_cap.vlins ? "Y" : "N");
6416	seq_printf(seq, "\tDouble VLAN: %s\n",
6417		   priv->dma_cap.dvlan ? "Y" : "N");
6418	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6419		   priv->dma_cap.l3l4fnum);
6420	seq_printf(seq, "\tARP Offloading: %s\n",
6421		   priv->dma_cap.arpoffsel ? "Y" : "N");
6422	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6423		   priv->dma_cap.estsel ? "Y" : "N");
6424	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6425		   priv->dma_cap.fpesel ? "Y" : "N");
6426	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6427		   priv->dma_cap.tbssel ? "Y" : "N");
6428	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6429		   priv->dma_cap.tbs_ch_num);
6430	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6431		   priv->dma_cap.sgfsel ? "Y" : "N");
6432	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6433		   BIT(priv->dma_cap.ttsfd) >> 1);
6434	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6435		   priv->dma_cap.numtc);
6436	seq_printf(seq, "\tDCB Feature: %s\n",
6437		   priv->dma_cap.dcben ? "Y" : "N");
6438	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6439		   priv->dma_cap.advthword ? "Y" : "N");
6440	seq_printf(seq, "\tPTP Offload: %s\n",
6441		   priv->dma_cap.ptoen ? "Y" : "N");
6442	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6443		   priv->dma_cap.osten ? "Y" : "N");
6444	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6445		   priv->dma_cap.pfcen ? "Y" : "N");
6446	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6447		   BIT(priv->dma_cap.frpes) << 6);
6448	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6449		   BIT(priv->dma_cap.frpbs) << 6);
6450	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6451		   priv->dma_cap.frppipe_num);
6452	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6453		   priv->dma_cap.nrvf_num ?
6454		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6455	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6456		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6457	seq_printf(seq, "\tDepth of GCL: %lu\n",
6458		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6459	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6460		   priv->dma_cap.cbtisel ? "Y" : "N");
6461	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6462		   priv->dma_cap.aux_snapshot_n);
6463	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6464		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6465	seq_printf(seq, "\tEnhanced DMA: %s\n",
6466		   priv->dma_cap.edma ? "Y" : "N");
6467	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6468		   priv->dma_cap.ediffc ? "Y" : "N");
6469	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6470		   priv->dma_cap.vxn ? "Y" : "N");
6471	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6472		   priv->dma_cap.dbgmem ? "Y" : "N");
6473	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6474		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6475	return 0;
6476}
6477DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6478
6479/* Use network device events to rename debugfs file entries.
6480 */
6481static int stmmac_device_event(struct notifier_block *unused,
6482			       unsigned long event, void *ptr)
6483{
6484	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6485	struct stmmac_priv *priv = netdev_priv(dev);
6486
6487	if (dev->netdev_ops != &stmmac_netdev_ops)
6488		goto done;
6489
6490	switch (event) {
6491	case NETDEV_CHANGENAME:
6492		if (priv->dbgfs_dir)
6493			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6494							 priv->dbgfs_dir,
6495							 stmmac_fs_dir,
6496							 dev->name);
6497		break;
6498	}
6499done:
6500	return NOTIFY_DONE;
6501}
6502
6503static struct notifier_block stmmac_notifier = {
6504	.notifier_call = stmmac_device_event,
 
 
 
 
6505};
6506
6507static void stmmac_init_fs(struct net_device *dev)
6508{
6509	struct stmmac_priv *priv = netdev_priv(dev);
6510
6511	rtnl_lock();
6512
6513	/* Create per netdev entries */
6514	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6515
6516	/* Entry to report DMA RX/TX rings */
6517	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6518			    &stmmac_rings_status_fops);
6519
6520	/* Entry to report the DMA HW features */
6521	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6522			    &stmmac_dma_cap_fops);
6523
6524	rtnl_unlock();
6525}
6526
6527static void stmmac_exit_fs(struct net_device *dev)
6528{
6529	struct stmmac_priv *priv = netdev_priv(dev);
6530
6531	debugfs_remove_recursive(priv->dbgfs_dir);
6532}
6533#endif /* CONFIG_DEBUG_FS */
6534
6535static u32 stmmac_vid_crc32_le(__le16 vid_le)
6536{
6537	unsigned char *data = (unsigned char *)&vid_le;
6538	unsigned char data_byte = 0;
6539	u32 crc = ~0x0;
6540	u32 temp = 0;
6541	int i, bits;
6542
6543	bits = get_bitmask_order(VLAN_VID_MASK);
6544	for (i = 0; i < bits; i++) {
6545		if ((i % 8) == 0)
6546			data_byte = data[i / 8];
6547
6548		temp = ((crc & 1) ^ data_byte) & 1;
6549		crc >>= 1;
6550		data_byte >>= 1;
6551
6552		if (temp)
6553			crc ^= 0xedb88320;
6554	}
6555
6556	return crc;
6557}
6558
6559static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6560{
6561	u32 crc, hash = 0;
6562	u16 pmatch = 0;
6563	int count = 0;
6564	u16 vid = 0;
6565
6566	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6567		__le16 vid_le = cpu_to_le16(vid);
6568		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6569		hash |= (1 << crc);
6570		count++;
6571	}
6572
6573	if (!priv->dma_cap.vlhash) {
6574		if (count > 2) /* VID = 0 always passes filter */
6575			return -EOPNOTSUPP;
6576
6577		pmatch = vid;
6578		hash = 0;
6579	}
6580
6581	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6582}
6583
6584static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6585{
6586	struct stmmac_priv *priv = netdev_priv(ndev);
6587	bool is_double = false;
6588	int ret;
6589
6590	ret = pm_runtime_resume_and_get(priv->device);
6591	if (ret < 0)
6592		return ret;
6593
6594	if (be16_to_cpu(proto) == ETH_P_8021AD)
6595		is_double = true;
6596
6597	set_bit(vid, priv->active_vlans);
6598	ret = stmmac_vlan_update(priv, is_double);
6599	if (ret) {
6600		clear_bit(vid, priv->active_vlans);
6601		goto err_pm_put;
6602	}
6603
6604	if (priv->hw->num_vlan) {
6605		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6606		if (ret)
6607			goto err_pm_put;
6608	}
6609err_pm_put:
6610	pm_runtime_put(priv->device);
6611
6612	return ret;
6613}
6614
6615static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6616{
6617	struct stmmac_priv *priv = netdev_priv(ndev);
6618	bool is_double = false;
6619	int ret;
6620
6621	ret = pm_runtime_resume_and_get(priv->device);
6622	if (ret < 0)
6623		return ret;
6624
6625	if (be16_to_cpu(proto) == ETH_P_8021AD)
6626		is_double = true;
6627
6628	clear_bit(vid, priv->active_vlans);
 
 
6629
6630	if (priv->hw->num_vlan) {
6631		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6632		if (ret)
6633			goto del_vlan_error;
6634	}
6635
6636	ret = stmmac_vlan_update(priv, is_double);
6637
6638del_vlan_error:
6639	pm_runtime_put(priv->device);
6640
6641	return ret;
6642}
6643
6644static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6645{
6646	struct stmmac_priv *priv = netdev_priv(dev);
6647
6648	switch (bpf->command) {
6649	case XDP_SETUP_PROG:
6650		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6651	case XDP_SETUP_XSK_POOL:
6652		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6653					     bpf->xsk.queue_id);
6654	default:
6655		return -EOPNOTSUPP;
6656	}
6657}
6658
6659static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6660			   struct xdp_frame **frames, u32 flags)
6661{
6662	struct stmmac_priv *priv = netdev_priv(dev);
6663	int cpu = smp_processor_id();
6664	struct netdev_queue *nq;
6665	int i, nxmit = 0;
6666	int queue;
6667
6668	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6669		return -ENETDOWN;
6670
6671	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6672		return -EINVAL;
6673
6674	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6675	nq = netdev_get_tx_queue(priv->dev, queue);
6676
6677	__netif_tx_lock(nq, cpu);
6678	/* Avoids TX time-out as we are sharing with slow path */
6679	txq_trans_cond_update(nq);
6680
6681	for (i = 0; i < num_frames; i++) {
6682		int res;
6683
6684		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6685		if (res == STMMAC_XDP_CONSUMED)
6686			break;
6687
6688		nxmit++;
6689	}
6690
6691	if (flags & XDP_XMIT_FLUSH) {
6692		stmmac_flush_tx_descriptors(priv, queue);
6693		stmmac_tx_timer_arm(priv, queue);
6694	}
6695
6696	__netif_tx_unlock(nq);
6697
6698	return nxmit;
6699}
6700
6701void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6702{
6703	struct stmmac_channel *ch = &priv->channel[queue];
6704	unsigned long flags;
6705
6706	spin_lock_irqsave(&ch->lock, flags);
6707	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6708	spin_unlock_irqrestore(&ch->lock, flags);
6709
6710	stmmac_stop_rx_dma(priv, queue);
6711	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6712}
6713
6714void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6715{
6716	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6717	struct stmmac_channel *ch = &priv->channel[queue];
6718	unsigned long flags;
6719	u32 buf_size;
6720	int ret;
6721
6722	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6723	if (ret) {
6724		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6725		return;
6726	}
6727
6728	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6729	if (ret) {
6730		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6731		netdev_err(priv->dev, "Failed to init RX desc.\n");
6732		return;
6733	}
6734
6735	stmmac_reset_rx_queue(priv, queue);
6736	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6737
6738	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6739			    rx_q->dma_rx_phy, rx_q->queue_index);
6740
6741	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6742			     sizeof(struct dma_desc));
6743	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6744			       rx_q->rx_tail_addr, rx_q->queue_index);
6745
6746	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6747		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6748		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6749				      buf_size,
6750				      rx_q->queue_index);
6751	} else {
6752		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6753				      priv->dma_conf.dma_buf_sz,
6754				      rx_q->queue_index);
6755	}
6756
6757	stmmac_start_rx_dma(priv, queue);
6758
6759	spin_lock_irqsave(&ch->lock, flags);
6760	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6761	spin_unlock_irqrestore(&ch->lock, flags);
6762}
6763
6764void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6765{
6766	struct stmmac_channel *ch = &priv->channel[queue];
6767	unsigned long flags;
6768
6769	spin_lock_irqsave(&ch->lock, flags);
6770	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6771	spin_unlock_irqrestore(&ch->lock, flags);
6772
6773	stmmac_stop_tx_dma(priv, queue);
6774	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6775}
6776
6777void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6778{
6779	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6780	struct stmmac_channel *ch = &priv->channel[queue];
6781	unsigned long flags;
6782	int ret;
6783
6784	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6785	if (ret) {
6786		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6787		return;
6788	}
6789
6790	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6791	if (ret) {
6792		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6793		netdev_err(priv->dev, "Failed to init TX desc.\n");
6794		return;
6795	}
6796
6797	stmmac_reset_tx_queue(priv, queue);
6798	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6799
6800	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6801			    tx_q->dma_tx_phy, tx_q->queue_index);
6802
6803	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6804		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6805
6806	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6807	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6808			       tx_q->tx_tail_addr, tx_q->queue_index);
6809
6810	stmmac_start_tx_dma(priv, queue);
6811
6812	spin_lock_irqsave(&ch->lock, flags);
6813	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6814	spin_unlock_irqrestore(&ch->lock, flags);
6815}
6816
6817void stmmac_xdp_release(struct net_device *dev)
6818{
6819	struct stmmac_priv *priv = netdev_priv(dev);
6820	u32 chan;
6821
6822	/* Ensure tx function is not running */
6823	netif_tx_disable(dev);
6824
6825	/* Disable NAPI process */
6826	stmmac_disable_all_queues(priv);
6827
6828	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6829		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6830
6831	/* Free the IRQ lines */
6832	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6833
6834	/* Stop TX/RX DMA channels */
6835	stmmac_stop_all_dma(priv);
6836
6837	/* Release and free the Rx/Tx resources */
6838	free_dma_desc_resources(priv, &priv->dma_conf);
6839
6840	/* Disable the MAC Rx/Tx */
6841	stmmac_mac_set(priv, priv->ioaddr, false);
6842
6843	/* set trans_start so we don't get spurious
6844	 * watchdogs during reset
6845	 */
6846	netif_trans_update(dev);
6847	netif_carrier_off(dev);
6848}
6849
6850int stmmac_xdp_open(struct net_device *dev)
6851{
6852	struct stmmac_priv *priv = netdev_priv(dev);
6853	u32 rx_cnt = priv->plat->rx_queues_to_use;
6854	u32 tx_cnt = priv->plat->tx_queues_to_use;
6855	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6856	struct stmmac_rx_queue *rx_q;
6857	struct stmmac_tx_queue *tx_q;
6858	u32 buf_size;
6859	bool sph_en;
6860	u32 chan;
6861	int ret;
6862
6863	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6864	if (ret < 0) {
6865		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6866			   __func__);
6867		goto dma_desc_error;
6868	}
6869
6870	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6871	if (ret < 0) {
6872		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6873			   __func__);
6874		goto init_error;
6875	}
6876
6877	stmmac_reset_queues_param(priv);
6878
6879	/* DMA CSR Channel configuration */
6880	for (chan = 0; chan < dma_csr_ch; chan++) {
6881		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6882		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6883	}
6884
6885	/* Adjust Split header */
6886	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6887
6888	/* DMA RX Channel Configuration */
6889	for (chan = 0; chan < rx_cnt; chan++) {
6890		rx_q = &priv->dma_conf.rx_queue[chan];
6891
6892		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6893				    rx_q->dma_rx_phy, chan);
6894
6895		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6896				     (rx_q->buf_alloc_num *
6897				      sizeof(struct dma_desc));
6898		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6899				       rx_q->rx_tail_addr, chan);
6900
6901		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6902			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6903			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6904					      buf_size,
6905					      rx_q->queue_index);
6906		} else {
6907			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6908					      priv->dma_conf.dma_buf_sz,
6909					      rx_q->queue_index);
6910		}
6911
6912		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6913	}
6914
6915	/* DMA TX Channel Configuration */
6916	for (chan = 0; chan < tx_cnt; chan++) {
6917		tx_q = &priv->dma_conf.tx_queue[chan];
6918
6919		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6920				    tx_q->dma_tx_phy, chan);
6921
6922		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6923		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6924				       tx_q->tx_tail_addr, chan);
6925
6926		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6927		tx_q->txtimer.function = stmmac_tx_timer;
6928	}
6929
6930	/* Enable the MAC Rx/Tx */
6931	stmmac_mac_set(priv, priv->ioaddr, true);
6932
6933	/* Start Rx & Tx DMA Channels */
6934	stmmac_start_all_dma(priv);
6935
6936	ret = stmmac_request_irq(dev);
6937	if (ret)
6938		goto irq_error;
6939
6940	/* Enable NAPI process*/
6941	stmmac_enable_all_queues(priv);
6942	netif_carrier_on(dev);
6943	netif_tx_start_all_queues(dev);
6944	stmmac_enable_all_dma_irq(priv);
6945
6946	return 0;
6947
6948irq_error:
6949	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6950		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6951
6952	stmmac_hw_teardown(dev);
6953init_error:
6954	free_dma_desc_resources(priv, &priv->dma_conf);
6955dma_desc_error:
6956	return ret;
6957}
6958
6959int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6960{
6961	struct stmmac_priv *priv = netdev_priv(dev);
6962	struct stmmac_rx_queue *rx_q;
6963	struct stmmac_tx_queue *tx_q;
6964	struct stmmac_channel *ch;
6965
6966	if (test_bit(STMMAC_DOWN, &priv->state) ||
6967	    !netif_carrier_ok(priv->dev))
6968		return -ENETDOWN;
6969
6970	if (!stmmac_xdp_is_enabled(priv))
6971		return -EINVAL;
6972
6973	if (queue >= priv->plat->rx_queues_to_use ||
6974	    queue >= priv->plat->tx_queues_to_use)
6975		return -EINVAL;
6976
6977	rx_q = &priv->dma_conf.rx_queue[queue];
6978	tx_q = &priv->dma_conf.tx_queue[queue];
6979	ch = &priv->channel[queue];
6980
6981	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6982		return -EINVAL;
 
 
6983
6984	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6985		/* EQoS does not have per-DMA channel SW interrupt,
6986		 * so we schedule RX Napi straight-away.
6987		 */
6988		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6989			__napi_schedule(&ch->rxtx_napi);
6990	}
6991
6992	return 0;
6993}
6994
6995static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6996{
6997	struct stmmac_priv *priv = netdev_priv(dev);
6998	u32 tx_cnt = priv->plat->tx_queues_to_use;
6999	u32 rx_cnt = priv->plat->rx_queues_to_use;
7000	unsigned int start;
7001	int q;
7002
7003	for (q = 0; q < tx_cnt; q++) {
7004		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7005		u64 tx_packets;
7006		u64 tx_bytes;
7007
7008		do {
7009			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7010			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7011		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7012		do {
7013			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7014			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7015		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7016
7017		stats->tx_packets += tx_packets;
7018		stats->tx_bytes += tx_bytes;
7019	}
7020
7021	for (q = 0; q < rx_cnt; q++) {
7022		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7023		u64 rx_packets;
7024		u64 rx_bytes;
7025
7026		do {
7027			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7028			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7029			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7030		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7031
7032		stats->rx_packets += rx_packets;
7033		stats->rx_bytes += rx_bytes;
7034	}
7035
7036	stats->rx_dropped = priv->xstats.rx_dropped;
7037	stats->rx_errors = priv->xstats.rx_errors;
7038	stats->tx_dropped = priv->xstats.tx_dropped;
7039	stats->tx_errors = priv->xstats.tx_errors;
7040	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7041	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7042	stats->rx_length_errors = priv->xstats.rx_length;
7043	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7044	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7045	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7046}
 
7047
7048static const struct net_device_ops stmmac_netdev_ops = {
7049	.ndo_open = stmmac_open,
7050	.ndo_start_xmit = stmmac_xmit,
7051	.ndo_stop = stmmac_release,
7052	.ndo_change_mtu = stmmac_change_mtu,
7053	.ndo_fix_features = stmmac_fix_features,
7054	.ndo_set_features = stmmac_set_features,
7055	.ndo_set_rx_mode = stmmac_set_rx_mode,
7056	.ndo_tx_timeout = stmmac_tx_timeout,
7057	.ndo_eth_ioctl = stmmac_ioctl,
7058	.ndo_get_stats64 = stmmac_get_stats64,
7059	.ndo_setup_tc = stmmac_setup_tc,
7060	.ndo_select_queue = stmmac_select_queue,
7061	.ndo_set_mac_address = stmmac_set_mac_address,
7062	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7063	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7064	.ndo_bpf = stmmac_bpf,
7065	.ndo_xdp_xmit = stmmac_xdp_xmit,
7066	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7067};
7068
7069static void stmmac_reset_subtask(struct stmmac_priv *priv)
7070{
7071	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7072		return;
7073	if (test_bit(STMMAC_DOWN, &priv->state))
7074		return;
7075
7076	netdev_err(priv->dev, "Reset adapter.\n");
7077
7078	rtnl_lock();
7079	netif_trans_update(priv->dev);
7080	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7081		usleep_range(1000, 2000);
7082
7083	set_bit(STMMAC_DOWN, &priv->state);
7084	dev_close(priv->dev);
7085	dev_open(priv->dev, NULL);
7086	clear_bit(STMMAC_DOWN, &priv->state);
7087	clear_bit(STMMAC_RESETING, &priv->state);
7088	rtnl_unlock();
7089}
7090
7091static void stmmac_service_task(struct work_struct *work)
7092{
7093	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7094			service_task);
7095
7096	stmmac_reset_subtask(priv);
7097	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7098}
7099
7100/**
7101 *  stmmac_hw_init - Init the MAC device
7102 *  @priv: driver private structure
7103 *  Description: this function is to configure the MAC device according to
7104 *  some platform parameters or the HW capability register. It prepares the
7105 *  driver to use either ring or chain modes and to setup either enhanced or
7106 *  normal descriptors.
 
7107 */
7108static int stmmac_hw_init(struct stmmac_priv *priv)
7109{
7110	int ret;
 
7111
7112	/* dwmac-sun8i only work in chain mode */
7113	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7114		chain_mode = 1;
7115	priv->chain_mode = chain_mode;
 
 
 
 
 
7116
7117	/* Initialize HW Interface */
7118	ret = stmmac_hwif_init(priv);
7119	if (ret)
7120		return ret;
 
 
 
 
 
 
 
 
 
 
 
7121
7122	/* Get the HW capability (new GMAC newer than 3.50a) */
7123	priv->hw_cap_support = stmmac_get_hw_features(priv);
7124	if (priv->hw_cap_support) {
7125		dev_info(priv->device, "DMA HW capability register supported\n");
7126
7127		/* We can override some gmac/dma configuration fields: e.g.
7128		 * enh_desc, tx_coe (e.g. that are passed through the
7129		 * platform) with the values from the HW capability
7130		 * register (if supported).
7131		 */
7132		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7133		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7134				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7135		priv->hw->pmt = priv->plat->pmt;
7136		if (priv->dma_cap.hash_tb_sz) {
7137			priv->hw->multicast_filter_bins =
7138					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7139			priv->hw->mcast_bits_log2 =
7140					ilog2(priv->hw->multicast_filter_bins);
7141		}
7142
7143		/* TXCOE doesn't work in thresh DMA mode */
7144		if (priv->plat->force_thresh_dma_mode)
7145			priv->plat->tx_coe = 0;
7146		else
7147			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7148
7149		/* In case of GMAC4 rx_coe is from HW cap register. */
7150		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7151
7152		if (priv->dma_cap.rx_coe_type2)
7153			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7154		else if (priv->dma_cap.rx_coe_type1)
7155			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7156
7157	} else {
7158		dev_info(priv->device, "No HW DMA feature register supported\n");
7159	}
7160
7161	if (priv->plat->rx_coe) {
7162		priv->hw->rx_csum = priv->plat->rx_coe;
7163		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7164		if (priv->synopsys_id < DWMAC_CORE_4_00)
7165			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
 
 
7166	}
 
 
 
 
7167	if (priv->plat->tx_coe)
7168		dev_info(priv->device, "TX Checksum insertion supported\n");
7169
7170	if (priv->plat->pmt) {
7171		dev_info(priv->device, "Wake-Up On Lan supported\n");
7172		device_set_wakeup_capable(priv->device, 1);
7173	}
7174
7175	if (priv->dma_cap.tsoen)
7176		dev_info(priv->device, "TSO supported\n");
7177
7178	if (priv->dma_cap.number_rx_queues &&
7179	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7180		dev_warn(priv->device,
7181			 "Number of Rx queues (%u) exceeds dma capability\n",
7182			 priv->plat->rx_queues_to_use);
7183		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7184	}
7185	if (priv->dma_cap.number_tx_queues &&
7186	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7187		dev_warn(priv->device,
7188			 "Number of Tx queues (%u) exceeds dma capability\n",
7189			 priv->plat->tx_queues_to_use);
7190		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7191	}
7192
7193	if (priv->dma_cap.rx_fifo_size &&
7194	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7195		dev_warn(priv->device,
7196			 "Rx FIFO size (%u) exceeds dma capability\n",
7197			 priv->plat->rx_fifo_size);
7198		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7199	}
7200	if (priv->dma_cap.tx_fifo_size &&
7201	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7202		dev_warn(priv->device,
7203			 "Tx FIFO size (%u) exceeds dma capability\n",
7204			 priv->plat->tx_fifo_size);
7205		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7206	}
7207
7208	priv->hw->vlan_fail_q_en =
7209		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7210	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7211
7212	/* Run HW quirks, if any */
7213	if (priv->hwif_quirks) {
7214		ret = priv->hwif_quirks(priv);
7215		if (ret)
7216			return ret;
7217	}
7218
7219	/* Rx Watchdog is available in the COREs newer than the 3.40.
7220	 * In some case, for example on bugged HW this feature
7221	 * has to be disable and this can be done by passing the
7222	 * riwt_off field from the platform.
7223	 */
7224	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7225	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7226		priv->use_riwt = 1;
7227		dev_info(priv->device,
7228			 "Enable RX Mitigation via HW Watchdog Timer\n");
7229	}
7230
7231	return 0;
7232}
7233
7234static void stmmac_napi_add(struct net_device *dev)
7235{
7236	struct stmmac_priv *priv = netdev_priv(dev);
7237	u32 queue, maxq;
7238
7239	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7240
7241	for (queue = 0; queue < maxq; queue++) {
7242		struct stmmac_channel *ch = &priv->channel[queue];
7243
7244		ch->priv_data = priv;
7245		ch->index = queue;
7246		spin_lock_init(&ch->lock);
7247
7248		if (queue < priv->plat->rx_queues_to_use) {
7249			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7250		}
7251		if (queue < priv->plat->tx_queues_to_use) {
7252			netif_napi_add_tx(dev, &ch->tx_napi,
7253					  stmmac_napi_poll_tx);
7254		}
7255		if (queue < priv->plat->rx_queues_to_use &&
7256		    queue < priv->plat->tx_queues_to_use) {
7257			netif_napi_add(dev, &ch->rxtx_napi,
7258				       stmmac_napi_poll_rxtx);
7259		}
7260	}
7261}
7262
7263static void stmmac_napi_del(struct net_device *dev)
7264{
7265	struct stmmac_priv *priv = netdev_priv(dev);
7266	u32 queue, maxq;
7267
7268	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7269
7270	for (queue = 0; queue < maxq; queue++) {
7271		struct stmmac_channel *ch = &priv->channel[queue];
7272
7273		if (queue < priv->plat->rx_queues_to_use)
7274			netif_napi_del(&ch->rx_napi);
7275		if (queue < priv->plat->tx_queues_to_use)
7276			netif_napi_del(&ch->tx_napi);
7277		if (queue < priv->plat->rx_queues_to_use &&
7278		    queue < priv->plat->tx_queues_to_use) {
7279			netif_napi_del(&ch->rxtx_napi);
7280		}
7281	}
7282}
7283
7284int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7285{
7286	struct stmmac_priv *priv = netdev_priv(dev);
7287	int ret = 0, i;
7288
7289	if (netif_running(dev))
7290		stmmac_release(dev);
7291
7292	stmmac_napi_del(dev);
7293
7294	priv->plat->rx_queues_to_use = rx_cnt;
7295	priv->plat->tx_queues_to_use = tx_cnt;
7296	if (!netif_is_rxfh_configured(dev))
7297		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7298			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7299									rx_cnt);
7300
7301	stmmac_napi_add(dev);
7302
7303	if (netif_running(dev))
7304		ret = stmmac_open(dev);
7305
7306	return ret;
7307}
7308
7309int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7310{
7311	struct stmmac_priv *priv = netdev_priv(dev);
7312	int ret = 0;
7313
7314	if (netif_running(dev))
7315		stmmac_release(dev);
7316
7317	priv->dma_conf.dma_rx_size = rx_size;
7318	priv->dma_conf.dma_tx_size = tx_size;
7319
7320	if (netif_running(dev))
7321		ret = stmmac_open(dev);
7322
7323	return ret;
7324}
7325
7326static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7327{
7328	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7329	struct dma_desc *desc_contains_ts = ctx->desc;
7330	struct stmmac_priv *priv = ctx->priv;
7331	struct dma_desc *ndesc = ctx->ndesc;
7332	struct dma_desc *desc = ctx->desc;
7333	u64 ns = 0;
7334
7335	if (!priv->hwts_rx_en)
7336		return -ENODATA;
7337
7338	/* For GMAC4, the valid timestamp is from CTX next desc. */
7339	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7340		desc_contains_ts = ndesc;
7341
7342	/* Check if timestamp is available */
7343	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7344		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7345		ns -= priv->plat->cdc_error_adj;
7346		*timestamp = ns_to_ktime(ns);
7347		return 0;
7348	}
7349
7350	return -ENODATA;
7351}
7352
7353static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7354	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7355};
7356
7357/**
7358 * stmmac_dvr_probe
7359 * @device: device pointer
7360 * @plat_dat: platform data pointer
7361 * @res: stmmac resource pointer
7362 * Description: this is the main probe function used to
7363 * call the alloc_etherdev, allocate the priv structure.
7364 * Return:
7365 * returns 0 on success, otherwise errno.
7366 */
7367int stmmac_dvr_probe(struct device *device,
7368		     struct plat_stmmacenet_data *plat_dat,
7369		     struct stmmac_resources *res)
7370{
 
7371	struct net_device *ndev = NULL;
7372	struct stmmac_priv *priv;
7373	u32 rxq;
7374	int i, ret = 0;
7375
7376	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7377				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7378	if (!ndev)
7379		return -ENOMEM;
7380
7381	SET_NETDEV_DEV(ndev, device);
7382
7383	priv = netdev_priv(ndev);
7384	priv->device = device;
7385	priv->dev = ndev;
7386
7387	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7388		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7389	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7390		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7391		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7392	}
7393
7394	priv->xstats.pcpu_stats =
7395		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7396	if (!priv->xstats.pcpu_stats)
7397		return -ENOMEM;
7398
7399	stmmac_set_ethtool_ops(ndev);
7400	priv->pause = pause;
7401	priv->plat = plat_dat;
7402	priv->ioaddr = res->addr;
7403	priv->dev->base_addr = (unsigned long)res->addr;
7404	priv->plat->dma_cfg->multi_msi_en =
7405		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7406
7407	priv->dev->irq = res->irq;
7408	priv->wol_irq = res->wol_irq;
7409	priv->lpi_irq = res->lpi_irq;
7410	priv->sfty_irq = res->sfty_irq;
7411	priv->sfty_ce_irq = res->sfty_ce_irq;
7412	priv->sfty_ue_irq = res->sfty_ue_irq;
7413	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7414		priv->rx_irq[i] = res->rx_irq[i];
7415	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7416		priv->tx_irq[i] = res->tx_irq[i];
7417
7418	if (!is_zero_ether_addr(res->mac))
7419		eth_hw_addr_set(priv->dev, res->mac);
7420
7421	dev_set_drvdata(device, priv->dev);
7422
7423	/* Verify driver arguments */
7424	stmmac_verify_args();
7425
7426	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7427	if (!priv->af_xdp_zc_qps)
7428		return -ENOMEM;
7429
7430	/* Allocate workqueue */
7431	priv->wq = create_singlethread_workqueue("stmmac_wq");
7432	if (!priv->wq) {
7433		dev_err(priv->device, "failed to create workqueue\n");
7434		ret = -ENOMEM;
7435		goto error_wq_init;
7436	}
7437
7438	INIT_WORK(&priv->service_task, stmmac_service_task);
7439
7440	/* Override with kernel parameters if supplied XXX CRS XXX
7441	 * this needs to have multiple instances
7442	 */
7443	if ((phyaddr >= 0) && (phyaddr <= 31))
7444		priv->plat->phy_addr = phyaddr;
7445
7446	if (priv->plat->stmmac_rst) {
7447		ret = reset_control_assert(priv->plat->stmmac_rst);
7448		reset_control_deassert(priv->plat->stmmac_rst);
7449		/* Some reset controllers have only reset callback instead of
7450		 * assert + deassert callbacks pair.
7451		 */
7452		if (ret == -ENOTSUPP)
7453			reset_control_reset(priv->plat->stmmac_rst);
 
 
 
 
 
 
 
 
 
 
7454	}
7455
7456	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7457	if (ret == -ENOTSUPP)
7458		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7459			ERR_PTR(ret));
7460
7461	/* Wait a bit for the reset to take effect */
7462	udelay(10);
7463
7464	/* Init MAC and get the capabilities */
7465	ret = stmmac_hw_init(priv);
7466	if (ret)
7467		goto error_hw_init;
7468
7469	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7470	 */
7471	if (priv->synopsys_id < DWMAC_CORE_5_20)
7472		priv->plat->dma_cfg->dche = false;
7473
7474	stmmac_check_ether_addr(priv);
7475
7476	ndev->netdev_ops = &stmmac_netdev_ops;
7477
7478	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7479	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7480
7481	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7482			    NETIF_F_RXCSUM;
7483	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7484			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7485
7486	ret = stmmac_tc_init(priv, priv);
7487	if (!ret) {
7488		ndev->hw_features |= NETIF_F_HW_TC;
7489	}
7490
7491	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7492		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7493		if (priv->plat->has_gmac4)
7494			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7495		priv->tso = true;
7496		dev_info(priv->device, "TSO feature enabled\n");
7497	}
7498
7499	if (priv->dma_cap.sphen &&
7500	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7501		ndev->hw_features |= NETIF_F_GRO;
7502		priv->sph_cap = true;
7503		priv->sph = priv->sph_cap;
7504		dev_info(priv->device, "SPH feature enabled\n");
7505	}
7506
7507	/* Ideally our host DMA address width is the same as for the
7508	 * device. However, it may differ and then we have to use our
7509	 * host DMA width for allocation and the device DMA width for
7510	 * register handling.
7511	 */
7512	if (priv->plat->host_dma_width)
7513		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7514	else
7515		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7516
7517	if (priv->dma_cap.host_dma_width) {
7518		ret = dma_set_mask_and_coherent(device,
7519				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7520		if (!ret) {
7521			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7522				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7523
7524			/*
7525			 * If more than 32 bits can be addressed, make sure to
7526			 * enable enhanced addressing mode.
7527			 */
7528			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7529				priv->plat->dma_cfg->eame = true;
7530		} else {
7531			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7532			if (ret) {
7533				dev_err(priv->device, "Failed to set DMA Mask\n");
7534				goto error_hw_init;
7535			}
7536
7537			priv->dma_cap.host_dma_width = 32;
7538		}
7539	}
7540
7541	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7542	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7543#ifdef STMMAC_VLAN_TAG_USED
7544	/* Both mac100 and gmac support receive VLAN tag detection */
7545	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7546	if (priv->plat->has_gmac4) {
7547		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7548		priv->hw->hw_vlan_en = true;
7549	}
7550	if (priv->dma_cap.vlhash) {
7551		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7552		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7553	}
7554	if (priv->dma_cap.vlins) {
7555		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7556		if (priv->dma_cap.dvlan)
7557			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7558	}
7559#endif
7560	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7561
7562	priv->xstats.threshold = tc;
7563
7564	/* Initialize RSS */
7565	rxq = priv->plat->rx_queues_to_use;
7566	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7567	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7568		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7569
7570	if (priv->dma_cap.rssen && priv->plat->rss_en)
7571		ndev->features |= NETIF_F_RXHASH;
7572
7573	ndev->vlan_features |= ndev->features;
7574
7575	/* MTU range: 46 - hw-specific max */
7576	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7577	if (priv->plat->has_xgmac)
7578		ndev->max_mtu = XGMAC_JUMBO_LEN;
7579	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7580		ndev->max_mtu = JUMBO_LEN;
7581	else
7582		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7583	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7584	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7585	 */
7586	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7587	    (priv->plat->maxmtu >= ndev->min_mtu))
7588		ndev->max_mtu = priv->plat->maxmtu;
7589	else if (priv->plat->maxmtu < ndev->min_mtu)
7590		dev_warn(priv->device,
7591			 "%s: warning: maxmtu having invalid value (%d)\n",
7592			 __func__, priv->plat->maxmtu);
7593
7594	if (flow_ctrl)
7595		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7596
7597	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
 
 
 
 
 
 
 
7598
7599	/* Setup channels NAPI */
7600	stmmac_napi_add(ndev);
7601
7602	mutex_init(&priv->lock);
 
7603
7604	stmmac_fpe_init(priv);
 
 
 
 
7605
7606	/* If a specific clk_csr value is passed from the platform
7607	 * this means that the CSR Clock Range selection cannot be
7608	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7609	 * set the MDC clock dynamically according to the csr actual
7610	 * clock input.
7611	 */
7612	if (priv->plat->clk_csr >= 0)
7613		priv->clk_csr = priv->plat->clk_csr;
7614	else
7615		stmmac_clk_csr_set(priv);
 
 
7616
7617	stmmac_check_pcs_mode(priv);
7618
7619	pm_runtime_get_noresume(device);
7620	pm_runtime_set_active(device);
7621	if (!pm_runtime_enabled(device))
7622		pm_runtime_enable(device);
7623
7624	ret = stmmac_mdio_register(ndev);
7625	if (ret < 0) {
7626		dev_err_probe(priv->device, ret,
7627			      "MDIO bus (id: %d) registration failed\n",
7628			      priv->plat->bus_id);
7629		goto error_mdio_register;
7630	}
7631
7632	if (priv->plat->speed_mode_2500)
7633		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7634
7635	ret = stmmac_pcs_setup(ndev);
7636	if (ret)
7637		goto error_pcs_setup;
7638
7639	ret = stmmac_phy_setup(priv);
7640	if (ret) {
7641		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7642		goto error_phy_setup;
7643	}
7644
7645	ret = register_netdev(ndev);
7646	if (ret) {
7647		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7648			__func__, ret);
7649		goto error_netdev_register;
7650	}
7651
7652#ifdef CONFIG_DEBUG_FS
7653	stmmac_init_fs(ndev);
7654#endif
7655
7656	if (priv->plat->dump_debug_regs)
7657		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7658
7659	/* Let pm_runtime_put() disable the clocks.
7660	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7661	 */
7662	pm_runtime_put(device);
7663
7664	return ret;
7665
7666error_netdev_register:
7667	phylink_destroy(priv->phylink);
7668error_phy_setup:
7669	stmmac_pcs_clean(ndev);
7670error_pcs_setup:
7671	stmmac_mdio_unregister(ndev);
7672error_mdio_register:
7673	stmmac_napi_del(ndev);
 
 
7674error_hw_init:
7675	destroy_workqueue(priv->wq);
7676error_wq_init:
7677	bitmap_free(priv->af_xdp_zc_qps);
7678
7679	return ret;
7680}
7681EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7682
7683/**
7684 * stmmac_dvr_remove
7685 * @dev: device pointer
7686 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7687 * changes the link status, releases the DMA descriptor rings.
7688 */
7689void stmmac_dvr_remove(struct device *dev)
7690{
7691	struct net_device *ndev = dev_get_drvdata(dev);
7692	struct stmmac_priv *priv = netdev_priv(ndev);
7693
7694	netdev_info(priv->dev, "%s: removing driver", __func__);
7695
7696	pm_runtime_get_sync(dev);
 
7697
7698	stmmac_stop_all_dma(priv);
7699	stmmac_mac_set(priv, priv->ioaddr, false);
 
 
 
7700	unregister_netdev(ndev);
 
 
 
 
7701
7702#ifdef CONFIG_DEBUG_FS
7703	stmmac_exit_fs(ndev);
7704#endif
7705	phylink_destroy(priv->phylink);
7706	if (priv->plat->stmmac_rst)
7707		reset_control_assert(priv->plat->stmmac_rst);
7708	reset_control_assert(priv->plat->stmmac_ahb_rst);
7709
7710	stmmac_pcs_clean(ndev);
7711	stmmac_mdio_unregister(ndev);
7712
7713	destroy_workqueue(priv->wq);
7714	mutex_destroy(&priv->lock);
7715	bitmap_free(priv->af_xdp_zc_qps);
7716
7717	pm_runtime_disable(dev);
7718	pm_runtime_put_noidle(dev);
7719}
7720EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7721
7722/**
7723 * stmmac_suspend - suspend callback
7724 * @dev: device pointer
7725 * Description: this is the function to suspend the device and it is called
7726 * by the platform driver to stop the network queue, release the resources,
7727 * program the PMT register (for WoL), clean and release driver resources.
7728 */
7729int stmmac_suspend(struct device *dev)
7730{
7731	struct net_device *ndev = dev_get_drvdata(dev);
7732	struct stmmac_priv *priv = netdev_priv(ndev);
7733	u32 chan;
7734
7735	if (!ndev || !netif_running(ndev))
7736		return 0;
7737
7738	mutex_lock(&priv->lock);
 
7739
7740	netif_device_detach(ndev);
7741
7742	stmmac_disable_all_queues(priv);
7743
7744	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7745		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7746
7747	if (priv->eee_enabled) {
7748		priv->tx_path_in_lpi_mode = false;
7749		del_timer_sync(&priv->eee_ctrl_timer);
7750	}
7751
7752	/* Stop TX/RX DMA */
7753	stmmac_stop_all_dma(priv);
 
7754
7755	if (priv->plat->serdes_powerdown)
7756		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7757
7758	/* Enable Power down mode by programming the PMT regs */
7759	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7760		stmmac_pmt(priv, priv->hw, priv->wolopts);
7761		priv->irq_wake = 1;
7762	} else {
7763		stmmac_mac_set(priv, priv->ioaddr, false);
7764		pinctrl_pm_select_sleep_state(priv->device);
 
 
7765	}
7766
7767	mutex_unlock(&priv->lock);
7768
7769	rtnl_lock();
7770	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7771		phylink_suspend(priv->phylink, true);
7772	} else {
7773		if (device_may_wakeup(priv->device))
7774			phylink_speed_down(priv->phylink, false);
7775		phylink_suspend(priv->phylink, false);
7776	}
7777	rtnl_unlock();
7778
7779	if (stmmac_fpe_supported(priv))
7780		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7781
7782	priv->speed = SPEED_UNKNOWN;
7783	return 0;
7784}
7785EXPORT_SYMBOL_GPL(stmmac_suspend);
7786
7787static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7788{
7789	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7790
7791	rx_q->cur_rx = 0;
7792	rx_q->dirty_rx = 0;
7793}
7794
7795static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7796{
7797	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7798
7799	tx_q->cur_tx = 0;
7800	tx_q->dirty_tx = 0;
7801	tx_q->mss = 0;
7802
7803	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7804}
7805
7806/**
7807 * stmmac_reset_queues_param - reset queue parameters
7808 * @priv: device pointer
7809 */
7810static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7811{
7812	u32 rx_cnt = priv->plat->rx_queues_to_use;
7813	u32 tx_cnt = priv->plat->tx_queues_to_use;
7814	u32 queue;
7815
7816	for (queue = 0; queue < rx_cnt; queue++)
7817		stmmac_reset_rx_queue(priv, queue);
7818
7819	for (queue = 0; queue < tx_cnt; queue++)
7820		stmmac_reset_tx_queue(priv, queue);
7821}
7822
7823/**
7824 * stmmac_resume - resume callback
7825 * @dev: device pointer
7826 * Description: when resume this function is invoked to setup the DMA and CORE
7827 * in a usable state.
7828 */
7829int stmmac_resume(struct device *dev)
7830{
7831	struct net_device *ndev = dev_get_drvdata(dev);
7832	struct stmmac_priv *priv = netdev_priv(ndev);
7833	int ret;
7834
7835	if (!netif_running(ndev))
7836		return 0;
7837
 
 
7838	/* Power Down bit, into the PM register, is cleared
7839	 * automatically as soon as a magic packet or a Wake-up frame
7840	 * is received. Anyway, it's better to manually clear
7841	 * this bit because it can generate problems while resuming
7842	 * from another devices (e.g. serial console).
7843	 */
7844	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7845		mutex_lock(&priv->lock);
7846		stmmac_pmt(priv, priv->hw, 0);
7847		mutex_unlock(&priv->lock);
7848		priv->irq_wake = 0;
7849	} else {
7850		pinctrl_pm_select_default_state(priv->device);
 
 
7851		/* reset the phy so that it's ready */
7852		if (priv->mii)
7853			stmmac_mdio_reset(priv->mii);
7854	}
7855
7856	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7857	    priv->plat->serdes_powerup) {
7858		ret = priv->plat->serdes_powerup(ndev,
7859						 priv->plat->bsp_priv);
7860
7861		if (ret < 0)
7862			return ret;
7863	}
7864
7865	rtnl_lock();
7866	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7867		phylink_resume(priv->phylink);
7868	} else {
7869		phylink_resume(priv->phylink);
7870		if (device_may_wakeup(priv->device))
7871			phylink_speed_up(priv->phylink);
7872	}
7873	rtnl_unlock();
7874
7875	rtnl_lock();
7876	mutex_lock(&priv->lock);
7877
7878	stmmac_reset_queues_param(priv);
7879
7880	stmmac_free_tx_skbufs(priv);
7881	stmmac_clear_descriptors(priv, &priv->dma_conf);
7882
7883	stmmac_hw_setup(ndev, false);
7884	stmmac_init_coalesce(priv);
7885	stmmac_set_rx_mode(ndev);
7886
7887	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7888
7889	stmmac_enable_all_queues(priv);
7890	stmmac_enable_all_dma_irq(priv);
7891
7892	mutex_unlock(&priv->lock);
7893	rtnl_unlock();
 
7894
7895	netif_device_attach(ndev);
 
 
 
 
 
7896
 
 
 
 
 
 
7897	return 0;
 
 
 
 
 
 
 
 
 
 
 
7898}
7899EXPORT_SYMBOL_GPL(stmmac_resume);
 
 
7900
7901#ifndef MODULE
7902static int __init stmmac_cmdline_opt(char *str)
7903{
7904	char *opt;
7905
7906	if (!str || !*str)
7907		return 1;
7908	while ((opt = strsep(&str, ",")) != NULL) {
7909		if (!strncmp(opt, "debug:", 6)) {
7910			if (kstrtoint(opt + 6, 0, &debug))
7911				goto err;
7912		} else if (!strncmp(opt, "phyaddr:", 8)) {
7913			if (kstrtoint(opt + 8, 0, &phyaddr))
7914				goto err;
 
 
 
 
 
 
7915		} else if (!strncmp(opt, "buf_sz:", 7)) {
7916			if (kstrtoint(opt + 7, 0, &buf_sz))
7917				goto err;
7918		} else if (!strncmp(opt, "tc:", 3)) {
7919			if (kstrtoint(opt + 3, 0, &tc))
7920				goto err;
7921		} else if (!strncmp(opt, "watchdog:", 9)) {
7922			if (kstrtoint(opt + 9, 0, &watchdog))
7923				goto err;
7924		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7925			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7926				goto err;
7927		} else if (!strncmp(opt, "pause:", 6)) {
7928			if (kstrtoint(opt + 6, 0, &pause))
7929				goto err;
7930		} else if (!strncmp(opt, "eee_timer:", 10)) {
7931			if (kstrtoint(opt + 10, 0, &eee_timer))
7932				goto err;
7933		} else if (!strncmp(opt, "chain_mode:", 11)) {
7934			if (kstrtoint(opt + 11, 0, &chain_mode))
7935				goto err;
7936		}
7937	}
7938	return 1;
7939
7940err:
7941	pr_err("%s: ERROR broken module parameter conversion", __func__);
7942	return 1;
7943}
7944
7945__setup("stmmaceth=", stmmac_cmdline_opt);
7946#endif /* MODULE */
7947
7948static int __init stmmac_init(void)
7949{
7950#ifdef CONFIG_DEBUG_FS
7951	/* Create debugfs main directory if it doesn't exist yet */
7952	if (!stmmac_fs_dir)
7953		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7954	register_netdevice_notifier(&stmmac_notifier);
7955#endif
7956
7957	return 0;
7958}
7959
7960static void __exit stmmac_exit(void)
7961{
7962#ifdef CONFIG_DEBUG_FS
7963	unregister_netdevice_notifier(&stmmac_notifier);
7964	debugfs_remove_recursive(stmmac_fs_dir);
7965#endif
7966}
7967
7968module_init(stmmac_init)
7969module_exit(stmmac_exit)
7970
7971MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7972MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7973MODULE_LICENSE("GPL");
v3.15
 
   1/*******************************************************************************
   2  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   3  ST Ethernet IPs are built around a Synopsys IP Core.
   4
   5	Copyright(C) 2007-2011 STMicroelectronics Ltd
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  You should have received a copy of the GNU General Public License along with
  17  this program; if not, write to the Free Software Foundation, Inc.,
  18  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19
  20  The full GNU General Public License is included in this distribution in
  21  the file called "COPYING".
  22
  23  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  24
  25  Documentation available at:
  26	http://www.stlinux.com
  27  Support available at:
  28	https://bugzilla.stlinux.com/
  29*******************************************************************************/
  30
  31#include <linux/clk.h>
  32#include <linux/kernel.h>
  33#include <linux/interrupt.h>
  34#include <linux/ip.h>
  35#include <linux/tcp.h>
  36#include <linux/skbuff.h>
  37#include <linux/ethtool.h>
  38#include <linux/if_ether.h>
  39#include <linux/crc32.h>
  40#include <linux/mii.h>
  41#include <linux/if.h>
  42#include <linux/if_vlan.h>
  43#include <linux/dma-mapping.h>
  44#include <linux/slab.h>
 
  45#include <linux/prefetch.h>
  46#include <linux/pinctrl/consumer.h>
  47#ifdef CONFIG_STMMAC_DEBUG_FS
  48#include <linux/debugfs.h>
  49#include <linux/seq_file.h>
  50#endif /* CONFIG_STMMAC_DEBUG_FS */
  51#include <linux/net_tstamp.h>
 
 
 
 
 
 
  52#include "stmmac_ptp.h"
 
  53#include "stmmac.h"
 
  54#include <linux/reset.h>
 
 
 
 
 
 
 
 
 
 
 
  55
  56#define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
 
  57
  58/* Module parameters */
  59#define TX_TIMEO	5000
  60static int watchdog = TX_TIMEO;
  61module_param(watchdog, int, S_IRUGO | S_IWUSR);
  62MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  63
  64static int debug = -1;
  65module_param(debug, int, S_IRUGO | S_IWUSR);
  66MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  67
  68static int phyaddr = -1;
  69module_param(phyaddr, int, S_IRUGO);
  70MODULE_PARM_DESC(phyaddr, "Physical device address");
  71
  72#define DMA_TX_SIZE 256
  73static int dma_txsize = DMA_TX_SIZE;
  74module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
  75MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
  76
  77#define DMA_RX_SIZE 256
  78static int dma_rxsize = DMA_RX_SIZE;
  79module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
  80MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
 
 
 
  81
  82static int flow_ctrl = FLOW_OFF;
  83module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
  84MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  85
  86static int pause = PAUSE_TIME;
  87module_param(pause, int, S_IRUGO | S_IWUSR);
  88MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  89
  90#define TC_DEFAULT 64
  91static int tc = TC_DEFAULT;
  92module_param(tc, int, S_IRUGO | S_IWUSR);
  93MODULE_PARM_DESC(tc, "DMA threshold control value");
  94
  95#define	DEFAULT_BUFSIZE	1536
  96static int buf_sz = DEFAULT_BUFSIZE;
  97module_param(buf_sz, int, S_IRUGO | S_IWUSR);
  98MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  99
 
 
 100static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 101				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
 102				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
 103
 104#define STMMAC_DEFAULT_LPI_TIMER	1000
 105static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 106module_param(eee_timer, int, S_IRUGO | S_IWUSR);
 107MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 108#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 109
 110/* By default the driver will use the ring mode to manage tx and rx descriptors
 111 * but passing this value so user can force to use the chain instead of the ring
 112 */
 113static unsigned int chain_mode;
 114module_param(chain_mode, int, S_IRUGO);
 115MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 116
 117static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119#ifdef CONFIG_STMMAC_DEBUG_FS
 120static int stmmac_init_fs(struct net_device *dev);
 121static void stmmac_exit_fs(void);
 122#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123
 124#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
 
 
 125
 126/**
 127 * stmmac_verify_args - verify the driver parameters.
 128 * Description: it verifies if some wrong parameter is passed to the driver.
 129 * Note that wrong parameters are replaced with the default values.
 130 */
 131static void stmmac_verify_args(void)
 132{
 133	if (unlikely(watchdog < 0))
 134		watchdog = TX_TIMEO;
 135	if (unlikely(dma_rxsize < 0))
 136		dma_rxsize = DMA_RX_SIZE;
 137	if (unlikely(dma_txsize < 0))
 138		dma_txsize = DMA_TX_SIZE;
 139	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 140		buf_sz = DEFAULT_BUFSIZE;
 141	if (unlikely(flow_ctrl > 1))
 142		flow_ctrl = FLOW_AUTO;
 143	else if (likely(flow_ctrl < 0))
 144		flow_ctrl = FLOW_OFF;
 145	if (unlikely((pause < 0) || (pause > 0xffff)))
 146		pause = PAUSE_TIME;
 147	if (eee_timer < 0)
 148		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 149}
 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 151/**
 152 * stmmac_clk_csr_set - dynamically set the MDC clock
 153 * @priv: driver private structure
 154 * Description: this is to dynamically set the MDC clock according to the csr
 155 * clock input.
 156 * Note:
 157 *	If a specific clk_csr value is passed from the platform
 158 *	this means that the CSR Clock Range selection cannot be
 159 *	changed at run-time and it is fixed (as reported in the driver
 160 *	documentation). Viceversa the driver will try to set the MDC
 161 *	clock dynamically according to the actual clock input.
 162 */
 163static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 164{
 165	u32 clk_rate;
 166
 167	clk_rate = clk_get_rate(priv->stmmac_clk);
 168
 169	/* Platform provided default clk_csr would be assumed valid
 170	 * for all other cases except for the below mentioned ones.
 171	 * For values higher than the IEEE 802.3 specified frequency
 172	 * we can not estimate the proper divider as it is not known
 173	 * the frequency of clk_csr_i. So we do not change the default
 174	 * divider.
 175	 */
 176	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 177		if (clk_rate < CSR_F_35M)
 178			priv->clk_csr = STMMAC_CSR_20_35M;
 179		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 180			priv->clk_csr = STMMAC_CSR_35_60M;
 181		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 182			priv->clk_csr = STMMAC_CSR_60_100M;
 183		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 184			priv->clk_csr = STMMAC_CSR_100_150M;
 185		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 186			priv->clk_csr = STMMAC_CSR_150_250M;
 187		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 188			priv->clk_csr = STMMAC_CSR_250_300M;
 189	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190}
 191
 192static void print_pkt(unsigned char *buf, int len)
 193{
 194	int j;
 195	pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
 196	for (j = 0; j < len; j++) {
 197		if ((j % 16) == 0)
 198			pr_debug("\n %03x:", j);
 199		pr_debug(" %02x", buf[j]);
 200	}
 201	pr_debug("\n");
 202}
 203
 204/* minimum number of free TX descriptors required to wake up TX process */
 205#define STMMAC_TX_THRESH(x)	(x->dma_tx_size/4)
 
 
 206
 207static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
 208{
 209	return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
 
 
 
 210}
 211
 212/**
 213 * stmmac_hw_fix_mac_speed: callback for speed selection
 214 * @priv: driver private structure
 215 * Description: on some platforms (e.g. ST), some HW system configuraton
 216 * registers have to be set according to the link speed negotiated.
 217 */
 218static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 219{
 220	struct phy_device *phydev = priv->phydev;
 221
 222	if (likely(priv->plat->fix_mac_speed))
 223		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
 
 
 224}
 225
 226/**
 227 * stmmac_enable_eee_mode: Check and enter in LPI mode
 228 * @priv: driver private structure
 229 * Description: this function is to verify and enter in LPI mode for EEE.
 
 230 */
 231static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 232{
 
 
 
 
 
 
 
 
 
 
 
 233	/* Check and enter in LPI mode */
 234	if ((priv->dirty_tx == priv->cur_tx) &&
 235	    (priv->tx_path_in_lpi_mode == false))
 236		priv->hw->mac->set_eee_mode(priv->ioaddr);
 
 237}
 238
 239/**
 240 * stmmac_disable_eee_mode: disable/exit from EEE
 241 * @priv: driver private structure
 242 * Description: this function is to exit and disable EEE in case of
 243 * LPI state is true. This is called by the xmit.
 244 */
 245void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 246{
 247	priv->hw->mac->reset_eee_mode(priv->ioaddr);
 
 
 
 
 
 248	del_timer_sync(&priv->eee_ctrl_timer);
 249	priv->tx_path_in_lpi_mode = false;
 250}
 251
 252/**
 253 * stmmac_eee_ctrl_timer: EEE TX SW timer.
 254 * @arg : data hook
 255 * Description:
 256 *  if there is no data transfer and if we are not in LPI state,
 257 *  then MAC Transmitter can be moved to LPI state.
 258 */
 259static void stmmac_eee_ctrl_timer(unsigned long arg)
 260{
 261	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
 262
 263	stmmac_enable_eee_mode(priv);
 264	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 265}
 266
 267/**
 268 * stmmac_eee_init: init EEE
 269 * @priv: driver private structure
 270 * Description:
 271 *  If the EEE support has been enabled while configuring the driver,
 272 *  if the GMAC actually supports the EEE (from the HW cap reg) and the
 273 *  phy can also manage EEE, so enable the LPI state and start the timer
 274 *  to verify if the tx path can enter in LPI state.
 275 */
 276bool stmmac_eee_init(struct stmmac_priv *priv)
 277{
 278	bool ret = false;
 279
 280	/* Using PCS we cannot dial with the phy registers at this stage
 281	 * so we do not support extra feature like EEE.
 282	 */
 283	if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
 284	    (priv->pcs == STMMAC_PCS_RTBI))
 285		goto out;
 286
 287	/* MAC core supports the EEE feature. */
 288	if (priv->dma_cap.eee) {
 289		int tx_lpi_timer = priv->tx_lpi_timer;
 290
 291		/* Check if the PHY supports EEE */
 292		if (phy_init_eee(priv->phydev, 1)) {
 293			/* To manage at run-time if the EEE cannot be supported
 294			 * anymore (for example because the lp caps have been
 295			 * changed).
 296			 * In that case the driver disable own timers.
 297			 */
 298			if (priv->eee_active) {
 299				pr_debug("stmmac: disable EEE\n");
 300				del_timer_sync(&priv->eee_ctrl_timer);
 301				priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
 302							     tx_lpi_timer);
 303			}
 304			priv->eee_active = 0;
 305			goto out;
 306		}
 307		/* Activate the EEE and start timers */
 308		if (!priv->eee_active) {
 309			priv->eee_active = 1;
 310			init_timer(&priv->eee_ctrl_timer);
 311			priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
 312			priv->eee_ctrl_timer.data = (unsigned long)priv;
 313			priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
 314			add_timer(&priv->eee_ctrl_timer);
 315
 316			priv->hw->mac->set_eee_timer(priv->ioaddr,
 317						     STMMAC_DEFAULT_LIT_LS,
 318						     tx_lpi_timer);
 319		} else
 320			/* Set HW EEE according to the speed */
 321			priv->hw->mac->set_eee_pls(priv->ioaddr,
 322						   priv->phydev->link);
 323
 324		pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
 
 
 
 
 
 
 
 
 325
 326		ret = true;
 
 
 
 
 
 
 
 327	}
 328out:
 329	return ret;
 
 
 330}
 331
 332/* stmmac_get_tx_hwtstamp: get HW TX timestamps
 333 * @priv: driver private structure
 334 * @entry : descriptor index to be used.
 335 * @skb : the socket buffer
 336 * Description :
 337 * This function will read timestamp from the descriptor & pass it to stack.
 338 * and also perform some sanity checks.
 339 */
 340static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 341				   unsigned int entry, struct sk_buff *skb)
 342{
 343	struct skb_shared_hwtstamps shhwtstamp;
 344	u64 ns;
 345	void *desc = NULL;
 346
 347	if (!priv->hwts_tx_en)
 348		return;
 349
 350	/* exit if skb doesn't support hw tstamp */
 351	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 352		return;
 353
 354	if (priv->adv_ts)
 355		desc = (priv->dma_etx + entry);
 356	else
 357		desc = (priv->dma_tx + entry);
 358
 359	/* check tx tstamp status */
 360	if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
 361		return;
 
 
 
 
 362
 363	/* get the valid tstamp */
 364	ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
 365
 366	memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 367	shhwtstamp.hwtstamp = ns_to_ktime(ns);
 368	/* pass tstamp to stack */
 369	skb_tstamp_tx(skb, &shhwtstamp);
 370
 371	return;
 
 
 
 372}
 373
 374/* stmmac_get_rx_hwtstamp: get HW RX timestamps
 375 * @priv: driver private structure
 376 * @entry : descriptor index to be used.
 
 377 * @skb : the socket buffer
 378 * Description :
 379 * This function will read received packet's timestamp from the descriptor
 380 * and pass it to stack. It also perform some sanity checks.
 381 */
 382static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
 383				   unsigned int entry, struct sk_buff *skb)
 384{
 385	struct skb_shared_hwtstamps *shhwtstamp = NULL;
 386	u64 ns;
 387	void *desc = NULL;
 388
 389	if (!priv->hwts_rx_en)
 390		return;
 
 
 
 391
 392	if (priv->adv_ts)
 393		desc = (priv->dma_erx + entry);
 394	else
 395		desc = (priv->dma_rx + entry);
 396
 397	/* exit if rx tstamp is not valid */
 398	if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
 399		return;
 400
 401	/* get valid tstamp */
 402	ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
 403	shhwtstamp = skb_hwtstamps(skb);
 404	memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 405	shhwtstamp->hwtstamp = ns_to_ktime(ns);
 
 
 406}
 407
 408/**
 409 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 410 *  @dev: device pointer.
 411 *  @ifr: An IOCTL specefic structure, that can contain a pointer to
 412 *  a proprietary structure used to pass information to the driver.
 413 *  Description:
 414 *  This function configures the MAC to enable/disable both outgoing(TX)
 415 *  and incoming(RX) packets time stamping based on user input.
 416 *  Return Value:
 417 *  0 on success and an appropriate -ve integer on failure.
 418 */
 419static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 420{
 421	struct stmmac_priv *priv = netdev_priv(dev);
 422	struct hwtstamp_config config;
 423	struct timespec now;
 424	u64 temp = 0;
 425	u32 ptp_v2 = 0;
 426	u32 tstamp_all = 0;
 427	u32 ptp_over_ipv4_udp = 0;
 428	u32 ptp_over_ipv6_udp = 0;
 429	u32 ptp_over_ethernet = 0;
 430	u32 snap_type_sel = 0;
 431	u32 ts_master_en = 0;
 432	u32 ts_event_en = 0;
 433	u32 value = 0;
 434
 435	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 436		netdev_alert(priv->dev, "No support for HW time stamping\n");
 437		priv->hwts_tx_en = 0;
 438		priv->hwts_rx_en = 0;
 439
 440		return -EOPNOTSUPP;
 441	}
 442
 443	if (copy_from_user(&config, ifr->ifr_data,
 444			   sizeof(struct hwtstamp_config)))
 445		return -EFAULT;
 446
 447	pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 448		 __func__, config.flags, config.tx_type, config.rx_filter);
 449
 450	/* reserved for future extensions */
 451	if (config.flags)
 452		return -EINVAL;
 453
 454	if (config.tx_type != HWTSTAMP_TX_OFF &&
 455	    config.tx_type != HWTSTAMP_TX_ON)
 456		return -ERANGE;
 457
 458	if (priv->adv_ts) {
 459		switch (config.rx_filter) {
 460		case HWTSTAMP_FILTER_NONE:
 461			/* time stamp no incoming packet at all */
 462			config.rx_filter = HWTSTAMP_FILTER_NONE;
 463			break;
 464
 465		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 466			/* PTP v1, UDP, any kind of event packet */
 467			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 468			/* take time stamp for all event messages */
 
 
 
 
 
 469			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 470
 471			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 472			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 473			break;
 474
 475		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 476			/* PTP v1, UDP, Sync packet */
 477			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 478			/* take time stamp for SYNC messages only */
 479			ts_event_en = PTP_TCR_TSEVNTENA;
 480
 481			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 482			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 483			break;
 484
 485		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 486			/* PTP v1, UDP, Delay_req packet */
 487			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 488			/* take time stamp for Delay_Req messages only */
 489			ts_master_en = PTP_TCR_TSMSTRENA;
 490			ts_event_en = PTP_TCR_TSEVNTENA;
 491
 492			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 493			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 494			break;
 495
 496		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 497			/* PTP v2, UDP, any kind of event packet */
 498			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 499			ptp_v2 = PTP_TCR_TSVER2ENA;
 500			/* take time stamp for all event messages */
 501			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 502
 503			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 504			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 505			break;
 506
 507		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 508			/* PTP v2, UDP, Sync packet */
 509			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 510			ptp_v2 = PTP_TCR_TSVER2ENA;
 511			/* take time stamp for SYNC messages only */
 512			ts_event_en = PTP_TCR_TSEVNTENA;
 513
 514			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 515			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 516			break;
 517
 518		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 519			/* PTP v2, UDP, Delay_req packet */
 520			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 521			ptp_v2 = PTP_TCR_TSVER2ENA;
 522			/* take time stamp for Delay_Req messages only */
 523			ts_master_en = PTP_TCR_TSMSTRENA;
 524			ts_event_en = PTP_TCR_TSEVNTENA;
 525
 526			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 527			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 528			break;
 529
 530		case HWTSTAMP_FILTER_PTP_V2_EVENT:
 531			/* PTP v2/802.AS1 any layer, any kind of event packet */
 532			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 533			ptp_v2 = PTP_TCR_TSVER2ENA;
 534			/* take time stamp for all event messages */
 535			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 536
 
 537			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 538			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 539			ptp_over_ethernet = PTP_TCR_TSIPENA;
 540			break;
 541
 542		case HWTSTAMP_FILTER_PTP_V2_SYNC:
 543			/* PTP v2/802.AS1, any layer, Sync packet */
 544			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 545			ptp_v2 = PTP_TCR_TSVER2ENA;
 546			/* take time stamp for SYNC messages only */
 547			ts_event_en = PTP_TCR_TSEVNTENA;
 548
 549			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 550			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 551			ptp_over_ethernet = PTP_TCR_TSIPENA;
 552			break;
 553
 554		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 555			/* PTP v2/802.AS1, any layer, Delay_req packet */
 556			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 557			ptp_v2 = PTP_TCR_TSVER2ENA;
 558			/* take time stamp for Delay_Req messages only */
 559			ts_master_en = PTP_TCR_TSMSTRENA;
 560			ts_event_en = PTP_TCR_TSEVNTENA;
 561
 562			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 563			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 564			ptp_over_ethernet = PTP_TCR_TSIPENA;
 565			break;
 566
 
 567		case HWTSTAMP_FILTER_ALL:
 568			/* time stamp any incoming packet */
 569			config.rx_filter = HWTSTAMP_FILTER_ALL;
 570			tstamp_all = PTP_TCR_TSENALL;
 571			break;
 572
 573		default:
 574			return -ERANGE;
 575		}
 576	} else {
 577		switch (config.rx_filter) {
 578		case HWTSTAMP_FILTER_NONE:
 579			config.rx_filter = HWTSTAMP_FILTER_NONE;
 580			break;
 581		default:
 582			/* PTP v1, UDP, any kind of event packet */
 583			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 584			break;
 585		}
 586	}
 587	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 588	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 589
 590	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 591		priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
 592	else {
 593		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 594			 tstamp_all | ptp_v2 | ptp_over_ethernet |
 595			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 596			 ts_master_en | snap_type_sel);
 597
 598		priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
 599
 600		/* program Sub Second Increment reg */
 601		priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
 602
 603		/* calculate default added value:
 604		 * formula is :
 605		 * addend = (2^32)/freq_div_ratio;
 606		 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
 607		 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
 608		 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
 609		 *       achive 20ns accuracy.
 610		 *
 611		 * 2^x * y == (y << x), hence
 612		 * 2^32 * 50000000 ==> (50000000 << 32)
 613		 */
 614		temp = (u64) (50000000ULL << 32);
 615		priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
 616		priv->hw->ptp->config_addend(priv->ioaddr,
 617					     priv->default_addend);
 618
 619		/* initialize system time */
 620		getnstimeofday(&now);
 621		priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
 622					    now.tv_nsec);
 623	}
 624
 
 
 
 
 625	return copy_to_user(ifr->ifr_data, &config,
 626			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627}
 628
 629/**
 630 * stmmac_init_ptp: init PTP
 631 * @priv: driver private structure
 632 * Description: this is to verify if the HW supports the PTPv1 or v2.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633 * This is done by looking at the HW cap. register.
 634 * Also it registers the ptp driver.
 635 */
 636static int stmmac_init_ptp(struct stmmac_priv *priv)
 637{
 638	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 639		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 640
 641	priv->adv_ts = 0;
 642	if (priv->dma_cap.atime_stamp && priv->extend_desc)
 
 
 
 
 643		priv->adv_ts = 1;
 644
 645	if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
 646		pr_debug("IEEE 1588-2002 Time Stamp supported\n");
 647
 648	if (netif_msg_hw(priv) && priv->adv_ts)
 649		pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
 
 650
 651	priv->hw->ptp = &stmmac_ptp;
 652	priv->hwts_tx_en = 0;
 653	priv->hwts_rx_en = 0;
 654
 655	return stmmac_ptp_register(priv);
 
 
 
 656}
 657
 658static void stmmac_release_ptp(struct stmmac_priv *priv)
 659{
 
 660	stmmac_ptp_unregister(priv);
 661}
 662
 663/**
 664 * stmmac_adjust_link
 665 * @dev: net device structure
 666 * Description: it adjusts the link parameters.
 
 667 */
 668static void stmmac_adjust_link(struct net_device *dev)
 669{
 670	struct stmmac_priv *priv = netdev_priv(dev);
 671	struct phy_device *phydev = priv->phydev;
 672	unsigned long flags;
 673	int new_state = 0;
 674	unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
 675
 676	if (phydev == NULL)
 677		return;
 
 
 678
 679	spin_lock_irqsave(&priv->lock, flags);
 
 680
 681	if (phydev->link) {
 682		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
 683
 684		/* Now we make sure that we can be in full duplex mode.
 685		 * If not, we operate in half-duplex mode. */
 686		if (phydev->duplex != priv->oldduplex) {
 687			new_state = 1;
 688			if (!(phydev->duplex))
 689				ctrl &= ~priv->hw->link.duplex;
 690			else
 691				ctrl |= priv->hw->link.duplex;
 692			priv->oldduplex = phydev->duplex;
 693		}
 694		/* Flow Control operation */
 695		if (phydev->pause)
 696			priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
 697						 fc, pause_time);
 698
 699		if (phydev->speed != priv->speed) {
 700			new_state = 1;
 701			switch (phydev->speed) {
 702			case 1000:
 703				if (likely(priv->plat->has_gmac))
 704					ctrl &= ~priv->hw->link.port;
 705				stmmac_hw_fix_mac_speed(priv);
 706				break;
 707			case 100:
 708			case 10:
 709				if (priv->plat->has_gmac) {
 710					ctrl |= priv->hw->link.port;
 711					if (phydev->speed == SPEED_100) {
 712						ctrl |= priv->hw->link.speed;
 713					} else {
 714						ctrl &= ~(priv->hw->link.speed);
 715					}
 716				} else {
 717					ctrl &= ~priv->hw->link.port;
 718				}
 719				stmmac_hw_fix_mac_speed(priv);
 720				break;
 721			default:
 722				if (netif_msg_link(priv))
 723					pr_warn("%s: Speed (%d) not 10/100\n",
 724						dev->name, phydev->speed);
 725				break;
 726			}
 727
 728			priv->speed = phydev->speed;
 729		}
 730
 731		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
 
 
 
 
 732
 733		if (!priv->oldlink) {
 734			new_state = 1;
 735			priv->oldlink = 1;
 736		}
 737	} else if (priv->oldlink) {
 738		new_state = 1;
 739		priv->oldlink = 0;
 740		priv->speed = 0;
 741		priv->oldduplex = -1;
 742	}
 743
 744	if (new_state && netif_msg_link(priv))
 745		phy_print_status(phydev);
 746
 747	/* At this stage, it could be needed to setup the EEE or adjust some
 748	 * MAC related HW registers.
 749	 */
 
 
 
 
 
 
 
 
 
 
 
 750	priv->eee_enabled = stmmac_eee_init(priv);
 
 751
 752	spin_unlock_irqrestore(&priv->lock, flags);
 
 753}
 754
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 755/**
 756 * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
 757 * @priv: driver private structure
 758 * Description: this is to verify if the HW supports the PCS.
 759 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 760 * configured for the TBI, RTBI, or SGMII PHY interface.
 761 */
 762static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 763{
 764	int interface = priv->plat->interface;
 765
 766	if (priv->dma_cap.pcs) {
 767		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
 768		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
 769		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
 770		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
 771			pr_debug("STMMAC: PCS RGMII support enable\n");
 772			priv->pcs = STMMAC_PCS_RGMII;
 773		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
 774			pr_debug("STMMAC: PCS SGMII support enable\n");
 775			priv->pcs = STMMAC_PCS_SGMII;
 776		}
 777	}
 778}
 779
 780/**
 781 * stmmac_init_phy - PHY initialization
 782 * @dev: net device structure
 783 * Description: it initializes the driver's PHY state, and attaches the PHY
 784 * to the mac driver.
 785 *  Return value:
 786 *  0 on success
 787 */
 788static int stmmac_init_phy(struct net_device *dev)
 789{
 790	struct stmmac_priv *priv = netdev_priv(dev);
 791	struct phy_device *phydev;
 792	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 793	char bus_id[MII_BUS_ID_SIZE];
 794	int interface = priv->plat->interface;
 795	int max_speed = priv->plat->max_speed;
 796	priv->oldlink = 0;
 797	priv->speed = 0;
 798	priv->oldduplex = -1;
 799
 800	if (priv->plat->phy_bus_name)
 801		snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
 802			 priv->plat->phy_bus_name, priv->plat->bus_id);
 
 
 
 
 
 
 803	else
 804		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
 805			 priv->plat->bus_id);
 806
 807	snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 808		 priv->plat->phy_addr);
 809	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
 
 
 
 
 
 
 
 
 810
 811	phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
 
 
 
 
 
 
 
 812
 813	if (IS_ERR(phydev)) {
 814		pr_err("%s: Could not attach to PHY\n", dev->name);
 815		return PTR_ERR(phydev);
 
 816	}
 817
 818	/* Stop Advertising 1000BASE Capability if interface is not GMII */
 819	if ((interface == PHY_INTERFACE_MODE_MII) ||
 820	    (interface == PHY_INTERFACE_MODE_RMII) ||
 821		(max_speed < 1000 &&  max_speed > 0))
 822		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 823					 SUPPORTED_1000baseT_Full);
 824
 825	/*
 826	 * Broken HW is sometimes missing the pull-up resistor on the
 827	 * MDIO line, which results in reads to non-existent devices returning
 828	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 829	 * device as well.
 830	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831	 */
 832	if (phydev->phy_id == 0) {
 833		phy_disconnect(phydev);
 834		return -ENODEV;
 835	}
 836	pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
 837		 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
 838
 839	priv->phydev = phydev;
 
 
 
 
 
 
 
 
 
 
 
 
 840
 
 841	return 0;
 842}
 843
 844/**
 845 * stmmac_display_ring: display ring
 846 * @head: pointer to the head of the ring passed.
 847 * @size: size of the ring.
 848 * @extend_desc: to verify if extended descriptors are used.
 849 * Description: display the control/status and buffer descriptors.
 850 */
 851static void stmmac_display_ring(void *head, int size, int extend_desc)
 852{
 853	int i;
 854	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
 855	struct dma_desc *p = (struct dma_desc *)head;
 
 
 
 
 
 
 
 856
 857	for (i = 0; i < size; i++) {
 858		u64 x;
 859		if (extend_desc) {
 860			x = *(u64 *) ep;
 861			pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 862				i, (unsigned int)virt_to_phys(ep),
 863				(unsigned int)x, (unsigned int)(x >> 32),
 864				ep->basic.des2, ep->basic.des3);
 865			ep++;
 866		} else {
 867			x = *(u64 *) p;
 868			pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
 869				i, (unsigned int)virt_to_phys(p),
 870				(unsigned int)x, (unsigned int)(x >> 32),
 871				p->des2, p->des3);
 872			p++;
 873		}
 874		pr_info("\n");
 
 
 
 875	}
 876}
 877
 878static void stmmac_display_rings(struct stmmac_priv *priv)
 
 879{
 880	unsigned int txsize = priv->dma_tx_size;
 881	unsigned int rxsize = priv->dma_rx_size;
 
 
 882
 883	if (priv->extend_desc) {
 884		pr_info("Extended RX descriptor ring:\n");
 885		stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
 886		pr_info("Extended TX descriptor ring:\n");
 887		stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
 888	} else {
 889		pr_info("RX descriptor ring:\n");
 890		stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
 891		pr_info("TX descriptor ring:\n");
 892		stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
 
 
 
 
 
 
 
 
 
 893	}
 894}
 895
 
 
 
 
 
 
 
 
 
 
 896static int stmmac_set_bfsize(int mtu, int bufsize)
 897{
 898	int ret = bufsize;
 899
 900	if (mtu >= BUF_SIZE_4KiB)
 
 
 901		ret = BUF_SIZE_8KiB;
 902	else if (mtu >= BUF_SIZE_2KiB)
 903		ret = BUF_SIZE_4KiB;
 904	else if (mtu > DEFAULT_BUFSIZE)
 905		ret = BUF_SIZE_2KiB;
 906	else
 907		ret = DEFAULT_BUFSIZE;
 908
 909	return ret;
 910}
 911
 912/**
 913 * stmmac_clear_descriptors: clear descriptors
 914 * @priv: driver private structure
 915 * Description: this function is called to clear the tx and rx descriptors
 
 
 916 * in case of both basic and extended descriptors are used.
 917 */
 918static void stmmac_clear_descriptors(struct stmmac_priv *priv)
 
 
 919{
 
 920	int i;
 921	unsigned int txsize = priv->dma_tx_size;
 922	unsigned int rxsize = priv->dma_rx_size;
 923
 924	/* Clear the Rx/Tx descriptors */
 925	for (i = 0; i < rxsize; i++)
 926		if (priv->extend_desc)
 927			priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
 928						     priv->use_riwt, priv->mode,
 929						     (i == rxsize - 1));
 930		else
 931			priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
 932						     priv->use_riwt, priv->mode,
 933						     (i == rxsize - 1));
 934	for (i = 0; i < txsize; i++)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935		if (priv->extend_desc)
 936			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
 937						     priv->mode,
 938						     (i == txsize - 1));
 939		else
 940			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
 941						     priv->mode,
 942						     (i == txsize - 1));
 
 943}
 944
 945static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 946				  int i)
 
 
 
 
 
 
 
 947{
 948	struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949
 950	skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
 951				 GFP_KERNEL);
 952	if (!skb) {
 953		pr_err("%s: Rx init fails; skb is NULL\n", __func__);
 954		return -ENOMEM;
 955	}
 956	skb_reserve(skb, NET_IP_ALIGN);
 957	priv->rx_skbuff[i] = skb;
 958	priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
 959						priv->dma_buf_sz,
 960						DMA_FROM_DEVICE);
 961	if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
 962		pr_err("%s: DMA mapping error\n", __func__);
 963		dev_kfree_skb_any(skb);
 964		return -EINVAL;
 965	}
 966
 967	p->des2 = priv->rx_skbuff_dma[i];
 968
 969	if ((priv->hw->mode->init_desc3) &&
 970	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
 971		priv->hw->mode->init_desc3(p);
 972
 973	return 0;
 974}
 975
 976static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
 
 
 
 
 
 
 
 
 977{
 978	if (priv->rx_skbuff[i]) {
 979		dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
 980				 priv->dma_buf_sz, DMA_FROM_DEVICE);
 981		dev_kfree_skb_any(priv->rx_skbuff[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	}
 983	priv->rx_skbuff[i] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984}
 985
 986/**
 987 * init_dma_desc_rings - init the RX/TX descriptor rings
 988 * @dev: net device structure
 989 * Description:  this function initializes the DMA RX/TX descriptors
 990 * and allocates the socket buffers. It suppors the chained and ring
 991 * modes.
 992 */
 993static int init_dma_desc_rings(struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 994{
 
 995	int i;
 996	struct stmmac_priv *priv = netdev_priv(dev);
 997	unsigned int txsize = priv->dma_tx_size;
 998	unsigned int rxsize = priv->dma_rx_size;
 999	unsigned int bfsize = 0;
1000	int ret = -ENOMEM;
1001
1002	if (priv->hw->mode->set_16kib_bfsize)
1003		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004
1005	if (bfsize < BUF_SIZE_16KiB)
1006		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1007
1008	priv->dma_buf_sz = bfsize;
 
 
 
 
 
 
 
 
 
 
 
1009
1010	if (netif_msg_probe(priv))
1011		pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
1012			 txsize, rxsize, bfsize);
1013
1014	if (netif_msg_probe(priv)) {
1015		pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1016			 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1017
1018		/* RX INITIALIZATION */
1019		pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1020	}
1021	for (i = 0; i < rxsize; i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022		struct dma_desc *p;
 
1023		if (priv->extend_desc)
1024			p = &((priv->dma_erx + i)->basic);
1025		else
1026			p = priv->dma_rx + i;
 
 
1027
1028		ret = stmmac_init_rx_buffers(priv, p, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1029		if (ret)
1030			goto err_init_rx_buffers;
 
 
 
1031
1032		if (netif_msg_probe(priv))
1033			pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1034				 priv->rx_skbuff[i]->data,
1035				 (unsigned int)priv->rx_skbuff_dma[i]);
1036	}
1037	priv->cur_rx = 0;
1038	priv->dirty_rx = (unsigned int)(i - rxsize);
1039	buf_sz = bfsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040
1041	/* Setup the chained descriptor addresses */
1042	if (priv->mode == STMMAC_CHAIN_MODE) {
1043		if (priv->extend_desc) {
1044			priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1045					     rxsize, 1);
1046			priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1047					     txsize, 1);
1048		} else {
1049			priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1050					     rxsize, 0);
1051			priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1052					     txsize, 0);
1053		}
1054	}
1055
1056	/* TX INITIALIZATION */
1057	for (i = 0; i < txsize; i++) {
 
1058		struct dma_desc *p;
 
1059		if (priv->extend_desc)
1060			p = &((priv->dma_etx + i)->basic);
 
 
1061		else
1062			p = priv->dma_tx + i;
1063		p->des2 = 0;
1064		priv->tx_skbuff_dma[i] = 0;
1065		priv->tx_skbuff[i] = NULL;
 
 
 
 
 
1066	}
1067
1068	priv->dirty_tx = 0;
1069	priv->cur_tx = 0;
1070
1071	stmmac_clear_descriptors(priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072
1073	if (netif_msg_hw(priv))
1074		stmmac_display_rings(priv);
1075
1076	return 0;
1077err_init_rx_buffers:
1078	while (--i >= 0)
1079		stmmac_free_rx_buffers(priv, i);
1080	return ret;
1081}
1082
1083static void dma_free_rx_skbufs(struct stmmac_priv *priv)
 
 
 
 
 
 
 
 
1084{
 
1085	int i;
1086
1087	for (i = 0; i < priv->dma_rx_size; i++)
1088		stmmac_free_rx_buffers(priv, i);
 
 
 
 
 
 
 
 
1089}
1090
1091static void dma_free_tx_skbufs(struct stmmac_priv *priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092{
1093	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094
1095	for (i = 0; i < priv->dma_tx_size; i++) {
1096		struct dma_desc *p;
 
 
1097
1098		if (priv->extend_desc)
1099			p = &((priv->dma_etx + i)->basic);
1100		else
1101			p = priv->dma_tx + i;
 
 
 
 
 
 
 
 
 
1102
1103		if (priv->tx_skbuff_dma[i]) {
1104			dma_unmap_single(priv->device,
1105					 priv->tx_skbuff_dma[i],
1106					 priv->hw->desc->get_tx_len(p),
1107					 DMA_TO_DEVICE);
1108			priv->tx_skbuff_dma[i] = 0;
1109		}
1110
1111		if (priv->tx_skbuff[i] != NULL) {
1112			dev_kfree_skb_any(priv->tx_skbuff[i]);
1113			priv->tx_skbuff[i] = NULL;
1114		}
 
 
 
 
 
1115	}
 
 
 
 
 
 
 
1116}
1117
1118static int alloc_dma_desc_resources(struct stmmac_priv *priv)
 
1119{
1120	unsigned int txsize = priv->dma_tx_size;
1121	unsigned int rxsize = priv->dma_rx_size;
1122	int ret = -ENOMEM;
1123
1124	priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1125					    GFP_KERNEL);
1126	if (!priv->rx_skbuff_dma)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127		return -ENOMEM;
1128
1129	priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1130					GFP_KERNEL);
1131	if (!priv->rx_skbuff)
1132		goto err_rx_skbuff;
1133
1134	priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1135					    GFP_KERNEL);
1136	if (!priv->tx_skbuff_dma)
1137		goto err_tx_skbuff_dma;
1138
1139	priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1140					GFP_KERNEL);
1141	if (!priv->tx_skbuff)
1142		goto err_tx_skbuff;
1143
1144	if (priv->extend_desc) {
1145		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
1146						   sizeof(struct
1147							  dma_extended_desc),
1148						   &priv->dma_rx_phy,
1149						   GFP_KERNEL);
1150		if (!priv->dma_erx)
1151			goto err_dma;
1152
1153		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
1154						   sizeof(struct
1155							  dma_extended_desc),
1156						   &priv->dma_tx_phy,
1157						   GFP_KERNEL);
1158		if (!priv->dma_etx) {
1159			dma_free_coherent(priv->device, priv->dma_rx_size *
1160					sizeof(struct dma_extended_desc),
1161					priv->dma_erx, priv->dma_rx_phy);
1162			goto err_dma;
1163		}
1164	} else {
1165		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
 
1166						  sizeof(struct dma_desc),
1167						  &priv->dma_rx_phy,
1168						  GFP_KERNEL);
1169		if (!priv->dma_rx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170			goto err_dma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171
1172		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1173						  sizeof(struct dma_desc),
1174						  &priv->dma_tx_phy,
1175						  GFP_KERNEL);
1176		if (!priv->dma_tx) {
1177			dma_free_coherent(priv->device, priv->dma_rx_size *
1178					sizeof(struct dma_desc),
1179					priv->dma_rx, priv->dma_rx_phy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180			goto err_dma;
1181		}
1182	}
1183
1184	return 0;
1185
1186err_dma:
1187	kfree(priv->tx_skbuff);
1188err_tx_skbuff:
1189	kfree(priv->tx_skbuff_dma);
1190err_tx_skbuff_dma:
1191	kfree(priv->rx_skbuff);
1192err_rx_skbuff:
1193	kfree(priv->rx_skbuff_dma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194	return ret;
1195}
1196
1197static void free_dma_desc_resources(struct stmmac_priv *priv)
 
 
 
 
 
 
1198{
1199	/* Release the DMA TX/RX socket buffers */
1200	dma_free_rx_skbufs(priv);
1201	dma_free_tx_skbufs(priv);
 
 
 
 
 
1202
1203	/* Free DMA regions of consistent memory previously allocated */
1204	if (!priv->extend_desc) {
1205		dma_free_coherent(priv->device,
1206				  priv->dma_tx_size * sizeof(struct dma_desc),
1207				  priv->dma_tx, priv->dma_tx_phy);
1208		dma_free_coherent(priv->device,
1209				  priv->dma_rx_size * sizeof(struct dma_desc),
1210				  priv->dma_rx, priv->dma_rx_phy);
1211	} else {
1212		dma_free_coherent(priv->device, priv->dma_tx_size *
1213				  sizeof(struct dma_extended_desc),
1214				  priv->dma_etx, priv->dma_tx_phy);
1215		dma_free_coherent(priv->device, priv->dma_rx_size *
1216				  sizeof(struct dma_extended_desc),
1217				  priv->dma_erx, priv->dma_rx_phy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1218	}
1219	kfree(priv->rx_skbuff_dma);
1220	kfree(priv->rx_skbuff);
1221	kfree(priv->tx_skbuff_dma);
1222	kfree(priv->tx_skbuff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1223}
1224
1225/**
1226 *  stmmac_dma_operation_mode - HW DMA operation mode
1227 *  @priv: driver private structure
1228 *  Description: it sets the DMA operation mode: tx/rx DMA thresholds
1229 *  or Store-And-Forward capability.
1230 */
1231static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1232{
1233	if (priv->plat->force_thresh_dma_mode)
1234		priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1235	else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236		/*
1237		 * In case of GMAC, SF mode can be enabled
1238		 * to perform the TX COE in HW. This depends on:
1239		 * 1) TX COE if actually supported
1240		 * 2) There is no bugged Jumbo frame support
1241		 *    that needs to not insert csum in the TDES.
1242		 */
1243		priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1244		tc = SF_DMA_MODE;
1245	} else
1246		priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247}
1248
1249/**
1250 * stmmac_tx_clean:
1251 * @priv: driver private structure
1252 * Description: it reclaims resources after transmission completes.
1253 */
1254static void stmmac_tx_clean(struct stmmac_priv *priv)
1255{
1256	unsigned int txsize = priv->dma_tx_size;
 
1257
1258	spin_lock(&priv->tx_lock);
 
 
 
 
1259
1260	priv->xstats.tx_clean++;
 
 
1261
1262	while (priv->dirty_tx != priv->cur_tx) {
1263		int last;
1264		unsigned int entry = priv->dirty_tx % txsize;
1265		struct sk_buff *skb = priv->tx_skbuff[entry];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266		struct dma_desc *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
1267
1268		if (priv->extend_desc)
1269			p = (struct dma_desc *)(priv->dma_etx + entry);
 
 
1270		else
1271			p = priv->dma_tx + entry;
1272
1273		/* Check if the descriptor is owned by the DMA. */
1274		if (priv->hw->desc->get_tx_owner(p))
 
1275			break;
1276
1277		/* Verify tx error by looking at the last segment. */
1278		last = priv->hw->desc->get_tx_ls(p);
1279		if (likely(last)) {
1280			int tx_error =
1281			    priv->hw->desc->tx_status(&priv->dev->stats,
1282						      &priv->xstats, p,
1283						      priv->ioaddr);
1284			if (likely(tx_error == 0)) {
1285				priv->dev->stats.tx_packets++;
1286				priv->xstats.tx_pkt_n++;
1287			} else
1288				priv->dev->stats.tx_errors++;
1289
1290			stmmac_get_tx_hwtstamp(priv, entry, skb);
1291		}
1292		if (netif_msg_tx_done(priv))
1293			pr_debug("%s: curr %d, dirty %d\n", __func__,
1294				 priv->cur_tx, priv->dirty_tx);
1295
1296		if (likely(priv->tx_skbuff_dma[entry])) {
1297			dma_unmap_single(priv->device,
1298					 priv->tx_skbuff_dma[entry],
1299					 priv->hw->desc->get_tx_len(p),
1300					 DMA_TO_DEVICE);
1301			priv->tx_skbuff_dma[entry] = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1302		}
1303		priv->hw->mode->clean_desc3(priv, p);
1304
1305		if (likely(skb != NULL)) {
1306			dev_consume_skb_any(skb);
1307			priv->tx_skbuff[entry] = NULL;
 
 
 
 
 
 
 
1308		}
1309
1310		priv->hw->desc->release_tx_desc(p, priv->mode);
1311
1312		priv->dirty_tx++;
1313	}
1314	if (unlikely(netif_queue_stopped(priv->dev) &&
1315		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1316		netif_tx_lock(priv->dev);
1317		if (netif_queue_stopped(priv->dev) &&
1318		    stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1319			if (netif_msg_tx_done(priv))
1320				pr_debug("%s: restart transmit\n", __func__);
1321			netif_wake_queue(priv->dev);
1322		}
1323		netif_tx_unlock(priv->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324	}
1325
1326	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1327		stmmac_enable_eee_mode(priv);
1328		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 
1329	}
1330	spin_unlock(&priv->tx_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331}
1332
1333static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
 
 
 
 
 
 
 
1334{
1335	priv->hw->dma->enable_dma_irq(priv->ioaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
1336}
1337
1338static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339{
1340	priv->hw->dma->disable_dma_irq(priv->ioaddr);
 
 
 
 
 
 
 
 
 
1341}
1342
1343/**
1344 * stmmac_tx_err: irq tx error mng function
1345 * @priv: driver private structure
1346 * Description: it cleans the descriptors and restarts the transmission
1347 * in case of errors.
1348 */
1349static void stmmac_tx_err(struct stmmac_priv *priv)
1350{
1351	int i;
1352	int txsize = priv->dma_tx_size;
1353	netif_stop_queue(priv->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1354
1355	priv->hw->dma->stop_tx(priv->ioaddr);
1356	dma_free_tx_skbufs(priv);
1357	for (i = 0; i < txsize; i++)
1358		if (priv->extend_desc)
1359			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1360						     priv->mode,
1361						     (i == txsize - 1));
1362		else
1363			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1364						     priv->mode,
1365						     (i == txsize - 1));
1366	priv->dirty_tx = 0;
1367	priv->cur_tx = 0;
1368	priv->hw->dma->start_tx(priv->ioaddr);
1369
1370	priv->dev->stats.tx_errors++;
1371	netif_wake_queue(priv->dev);
1372}
1373
1374/**
1375 * stmmac_dma_interrupt: DMA ISR
1376 * @priv: driver private structure
1377 * Description: this is the DMA ISR. It is called by the main ISR.
1378 * It calls the dwmac dma routine to understand which type of interrupt
1379 * happened. In case of there is a Normal interrupt and either TX or RX
1380 * interrupt happened so the NAPI is scheduled.
1381 */
1382static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1383{
1384	int status;
1385
1386	status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1387	if (likely((status & handle_rx)) || (status & handle_tx)) {
1388		if (likely(napi_schedule_prep(&priv->napi))) {
1389			stmmac_disable_dma_irq(priv);
1390			__napi_schedule(&priv->napi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391		}
1392	}
1393	if (unlikely(status & tx_hard_error_bump_tc)) {
1394		/* Try to bump up the dma threshold on this failure */
1395		if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
1396			tc += 64;
1397			priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1398			priv->xstats.threshold = tc;
1399		}
1400	} else if (unlikely(status == tx_hard_error))
1401		stmmac_tx_err(priv);
1402}
1403
1404/**
1405 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1406 * @priv: driver private structure
1407 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1408 */
1409static void stmmac_mmc_setup(struct stmmac_priv *priv)
1410{
1411	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1412	    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1413
1414	dwmac_mmc_intr_all_mask(priv->ioaddr);
1415
1416	if (priv->dma_cap.rmon) {
1417		dwmac_mmc_ctrl(priv->ioaddr, mode);
1418		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1419	} else
1420		pr_info(" No MAC Management Counters available\n");
1421}
1422
1423static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1424{
1425	u32 hwid = priv->hw->synopsys_uid;
1426
1427	/* Check Synopsys Id (not available on old chips) */
1428	if (likely(hwid)) {
1429		u32 uid = ((hwid & 0x0000ff00) >> 8);
1430		u32 synid = (hwid & 0x000000ff);
1431
1432		pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1433			uid, synid);
1434
1435		return synid;
1436	}
1437	return 0;
1438}
1439
1440/**
1441 * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
1442 * @priv: driver private structure
1443 * Description: select the Enhanced/Alternate or Normal descriptors.
1444 * In case of Enhanced/Alternate, it looks at the extended descriptors are
1445 * supported by the HW cap. register.
1446 */
1447static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1448{
1449	if (priv->plat->enh_desc) {
1450		pr_info(" Enhanced/Alternate descriptors\n");
1451
1452		/* GMAC older than 3.50 has no extended descriptors */
1453		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1454			pr_info("\tEnabled extended descriptors\n");
1455			priv->extend_desc = 1;
1456		} else
1457			pr_warn("Extended descriptors not supported\n");
1458
1459		priv->hw->desc = &enh_desc_ops;
1460	} else {
1461		pr_info(" Normal descriptors\n");
1462		priv->hw->desc = &ndesc_ops;
1463	}
1464}
1465
1466/**
1467 * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
1468 * @priv: driver private structure
1469 * Description:
1470 *  new GMAC chip generations have a new register to indicate the
1471 *  presence of the optional feature/functions.
1472 *  This can be also used to override the value passed through the
1473 *  platform and necessary for old MAC10/100 and GMAC chips.
1474 */
1475static int stmmac_get_hw_features(struct stmmac_priv *priv)
1476{
1477	u32 hw_cap = 0;
1478
1479	if (priv->hw->dma->get_hw_feature) {
1480		hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1481
1482		priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1483		priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1484		priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1485		priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1486		priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1487		priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1488		priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1489		priv->dma_cap.pmt_remote_wake_up =
1490		    (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1491		priv->dma_cap.pmt_magic_frame =
1492		    (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1493		/* MMC */
1494		priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1495		/* IEEE 1588-2002 */
1496		priv->dma_cap.time_stamp =
1497		    (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1498		/* IEEE 1588-2008 */
1499		priv->dma_cap.atime_stamp =
1500		    (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1501		/* 802.3az - Energy-Efficient Ethernet (EEE) */
1502		priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1503		priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1504		/* TX and RX csum */
1505		priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1506		priv->dma_cap.rx_coe_type1 =
1507		    (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1508		priv->dma_cap.rx_coe_type2 =
1509		    (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1510		priv->dma_cap.rxfifo_over_2048 =
1511		    (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1512		/* TX and RX number of channels */
1513		priv->dma_cap.number_rx_channel =
1514		    (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1515		priv->dma_cap.number_tx_channel =
1516		    (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1517		/* Alternate (enhanced) DESC mode */
1518		priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1519	}
1520
1521	return hw_cap;
1522}
1523
1524/**
1525 * stmmac_check_ether_addr: check if the MAC addr is valid
1526 * @priv: driver private structure
1527 * Description:
1528 * it is to verify if the MAC address is valid, in case of failures it
1529 * generates a random MAC address
1530 */
1531static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1532{
 
 
1533	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1534		priv->hw->mac->get_umac_addr((void __iomem *)
1535					     priv->dev->base_addr,
1536					     priv->dev->dev_addr, 0);
1537		if (!is_valid_ether_addr(priv->dev->dev_addr))
1538			eth_hw_addr_random(priv->dev);
1539		pr_info("%s: device MAC address %pM\n", priv->dev->name,
1540			priv->dev->dev_addr);
1541	}
1542}
1543
1544/**
1545 * stmmac_init_dma_engine: DMA init.
1546 * @priv: driver private structure
1547 * Description:
1548 * It inits the DMA invoking the specific MAC/GMAC callback.
1549 * Some DMA parameters can be passed from the platform;
1550 * in case of these are not passed a default is kept for the MAC or GMAC.
1551 */
1552static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1553{
1554	int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1555	int mixed_burst = 0;
1556	int atds = 0;
1557
1558	if (priv->plat->dma_cfg) {
1559		pbl = priv->plat->dma_cfg->pbl;
1560		fixed_burst = priv->plat->dma_cfg->fixed_burst;
1561		mixed_burst = priv->plat->dma_cfg->mixed_burst;
1562		burst_len = priv->plat->dma_cfg->burst_len;
 
 
1563	}
1564
1565	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1566		atds = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1567
1568	return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1569				   burst_len, priv->dma_tx_phy,
1570				   priv->dma_rx_phy, atds);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571}
1572
1573/**
1574 * stmmac_tx_timer: mitigation sw timer for tx.
1575 * @data: data pointer
1576 * Description:
1577 * This is the timer handler to directly invoke the stmmac_tx_clean.
1578 */
1579static void stmmac_tx_timer(unsigned long data)
1580{
1581	struct stmmac_priv *priv = (struct stmmac_priv *)data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582
1583	stmmac_tx_clean(priv);
1584}
1585
1586/**
1587 * stmmac_init_tx_coalesce: init tx mitigation options.
1588 * @priv: driver private structure
1589 * Description:
1590 * This inits the transmit coalesce parameters: i.e. timer rate,
1591 * timer handler and default threshold used for enabling the
1592 * interrupt on completion bit.
1593 */
1594static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1595{
1596	priv->tx_coal_frames = STMMAC_TX_FRAMES;
1597	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1598	init_timer(&priv->txtimer);
1599	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1600	priv->txtimer.data = (unsigned long)priv;
1601	priv->txtimer.function = stmmac_tx_timer;
1602	add_timer(&priv->txtimer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603}
1604
1605/**
1606 * stmmac_hw_setup: setup mac in a usable state.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1607 *  @dev : pointer to the device structure.
 
1608 *  Description:
1609 *  This function sets up the ip in a usable state.
 
 
 
1610 *  Return value:
1611 *  0 on success and an appropriate (-)ve integer as defined in errno.h
1612 *  file on failure.
1613 */
1614static int stmmac_hw_setup(struct net_device *dev)
1615{
1616	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
1617	int ret;
1618
1619	ret = init_dma_desc_rings(dev);
1620	if (ret < 0) {
1621		pr_err("%s: DMA descriptors initialization failed\n", __func__);
1622		return ret;
1623	}
1624	/* DMA initialization and SW reset */
1625	ret = stmmac_init_dma_engine(priv);
1626	if (ret < 0) {
1627		pr_err("%s: DMA engine initialization failed\n", __func__);
 
1628		return ret;
1629	}
1630
1631	/* Copy the MAC addr into the HW  */
1632	priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1633
1634	/* If required, perform hw setup of the bus. */
1635	if (priv->plat->bus_setup)
1636		priv->plat->bus_setup(priv->ioaddr);
 
 
 
 
 
 
 
 
 
1637
1638	/* Initialize the MAC Core */
1639	priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
 
 
 
 
 
 
 
 
 
 
 
 
 
1640
1641	/* Enable the MAC Rx/Tx */
1642	stmmac_set_mac(priv->ioaddr, true);
1643
1644	/* Set the HW DMA mode and the COE */
1645	stmmac_dma_operation_mode(priv);
1646
1647	stmmac_mmc_setup(priv);
1648
 
 
 
 
 
 
 
 
1649	ret = stmmac_init_ptp(priv);
1650	if (ret && ret != -EOPNOTSUPP)
1651		pr_warn("%s: failed PTP initialisation\n", __func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652
1653#ifdef CONFIG_STMMAC_DEBUG_FS
1654	ret = stmmac_init_fs(dev);
1655	if (ret < 0)
1656		pr_warn("%s: failed debugFS registration\n", __func__);
1657#endif
1658	/* Start the ball rolling... */
1659	pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1660	priv->hw->dma->start_tx(priv->ioaddr);
1661	priv->hw->dma->start_rx(priv->ioaddr);
1662
1663	/* Dump DMA/MAC registers */
1664	if (netif_msg_hw(priv)) {
1665		priv->hw->mac->dump_regs(priv->ioaddr);
1666		priv->hw->dma->dump_regs(priv->ioaddr);
 
 
 
1667	}
1668	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1669
1670	priv->eee_enabled = stmmac_eee_init(priv);
 
 
 
 
 
 
 
 
1671
1672	stmmac_init_tx_coalesce(priv);
 
 
 
1673
1674	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1675		priv->rx_riwt = MAX_DMA_RIWT;
1676		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1677	}
1678
1679	if (priv->pcs && priv->hw->mac->ctrl_ane)
1680		priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
 
 
 
 
 
 
1681
1682	return 0;
1683}
1684
1685/**
1686 *  stmmac_open - open entry point of the driver
1687 *  @dev : pointer to the device structure.
1688 *  Description:
1689 *  This function is the open entry point of the driver.
1690 *  Return value:
1691 *  0 on success and an appropriate (-)ve integer as defined in errno.h
1692 *  file on failure.
1693 */
1694static int stmmac_open(struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1695{
1696	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
1697	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1698
1699	stmmac_check_ether_addr(priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1700
1701	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1702	    priv->pcs != STMMAC_PCS_RTBI) {
1703		ret = stmmac_init_phy(dev);
1704		if (ret) {
1705			pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706			       __func__, ret);
1707			return ret;
 
 
 
 
 
 
 
 
1708		}
1709	}
1710
1711	/* Extra statistics */
1712	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1713	priv->xstats.threshold = tc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714
1715	/* Create and initialize the TX/RX descriptors chains. */
1716	priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1717	priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1718	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
 
 
1719
1720	ret = alloc_dma_desc_resources(priv);
1721	if (ret < 0) {
1722		pr_err("%s: DMA descriptors allocation failed\n", __func__);
1723		goto dma_desc_error;
 
 
 
 
 
 
 
 
 
 
 
 
1724	}
1725
1726	ret = stmmac_hw_setup(dev);
1727	if (ret < 0) {
1728		pr_err("%s: Hw setup failed\n", __func__);
1729		goto init_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730	}
1731
1732	if (priv->phydev)
1733		phy_start(priv->phydev);
 
 
 
 
 
 
 
 
 
 
1734
1735	/* Request the IRQ lines */
1736	ret = request_irq(dev->irq, stmmac_interrupt,
1737			  IRQF_SHARED, dev->name, dev);
1738	if (unlikely(ret < 0)) {
1739		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1740		       __func__, dev->irq, ret);
1741		goto init_error;
 
 
1742	}
1743
1744	/* Request the Wake IRQ in case of another line is used for WoL */
1745	if (priv->wol_irq != dev->irq) {
 
 
 
1746		ret = request_irq(priv->wol_irq, stmmac_interrupt,
1747				  IRQF_SHARED, dev->name, dev);
1748		if (unlikely(ret < 0)) {
1749			pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1750			       __func__, priv->wol_irq, ret);
1751			goto wolirq_error;
 
 
1752		}
1753	}
1754
1755	/* Request the IRQ lines */
1756	if (priv->lpi_irq != -ENXIO) {
1757		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1758				  dev->name, dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759		if (unlikely(ret < 0)) {
1760			pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1761			       __func__, priv->lpi_irq, ret);
1762			goto lpiirq_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1763		}
1764	}
1765
1766	napi_enable(&priv->napi);
1767	netif_start_queue(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768
1769	return 0;
1770
1771lpiirq_error:
1772	if (priv->wol_irq != dev->irq)
1773		free_irq(priv->wol_irq, dev);
1774wolirq_error:
1775	free_irq(dev->irq, dev);
1776
 
1777init_error:
1778	free_dma_desc_resources(priv);
1779dma_desc_error:
1780	if (priv->phydev)
1781		phy_disconnect(priv->phydev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1782
 
1783	return ret;
1784}
1785
1786/**
1787 *  stmmac_release - close entry point of the driver
1788 *  @dev : device pointer.
1789 *  Description:
1790 *  This is the stop entry point of the driver.
1791 */
1792static int stmmac_release(struct net_device *dev)
1793{
1794	struct stmmac_priv *priv = netdev_priv(dev);
 
1795
1796	if (priv->eee_enabled)
1797		del_timer_sync(&priv->eee_ctrl_timer);
1798
1799	/* Stop and disconnect the PHY */
1800	if (priv->phydev) {
1801		phy_stop(priv->phydev);
1802		phy_disconnect(priv->phydev);
1803		priv->phydev = NULL;
1804	}
1805
1806	netif_stop_queue(dev);
1807
1808	napi_disable(&priv->napi);
 
1809
1810	del_timer_sync(&priv->txtimer);
1811
1812	/* Free the IRQ lines */
1813	free_irq(dev->irq, dev);
1814	if (priv->wol_irq != dev->irq)
1815		free_irq(priv->wol_irq, dev);
1816	if (priv->lpi_irq != -ENXIO)
1817		free_irq(priv->lpi_irq, dev);
 
1818
1819	/* Stop TX/RX DMA and clear the descriptors */
1820	priv->hw->dma->stop_tx(priv->ioaddr);
1821	priv->hw->dma->stop_rx(priv->ioaddr);
1822
1823	/* Release and free the Rx/Tx resources */
1824	free_dma_desc_resources(priv);
1825
1826	/* Disable the MAC Rx/Tx */
1827	stmmac_set_mac(priv->ioaddr, false);
1828
1829	netif_carrier_off(dev);
 
 
 
 
1830
1831#ifdef CONFIG_STMMAC_DEBUG_FS
1832	stmmac_exit_fs();
1833#endif
1834
1835	stmmac_release_ptp(priv);
1836
1837	return 0;
1838}
1839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1840/**
1841 *  stmmac_xmit: Tx entry point of the driver
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1842 *  @skb : the socket buffer
1843 *  @dev : device pointer
1844 *  Description : this is the tx entry point of the driver.
1845 *  It programs the chain or the ring and supports oversized frames
1846 *  and SG feature.
1847 */
1848static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1849{
 
1850	struct stmmac_priv *priv = netdev_priv(dev);
1851	unsigned int txsize = priv->dma_tx_size;
1852	unsigned int entry;
1853	int i, csum_insertion = 0, is_jumbo = 0;
 
1854	int nfrags = skb_shinfo(skb)->nr_frags;
 
 
 
1855	struct dma_desc *desc, *first;
1856	unsigned int nopaged_len = skb_headlen(skb);
1857	unsigned int enh_desc = priv->plat->enh_desc;
 
 
 
 
 
 
 
 
 
1858
1859	if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1860		if (!netif_queue_stopped(dev)) {
1861			netif_stop_queue(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1862			/* This is a hard error, log it. */
1863			pr_err("%s: Tx Ring full when queue awake\n", __func__);
 
 
1864		}
1865		return NETDEV_TX_BUSY;
1866	}
1867
1868	spin_lock(&priv->tx_lock);
 
1869
1870	if (priv->tx_path_in_lpi_mode)
1871		stmmac_disable_eee_mode(priv);
1872
1873	entry = priv->cur_tx % txsize;
1874
1875	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1876
1877	if (priv->extend_desc)
1878		desc = (struct dma_desc *)(priv->dma_etx + entry);
 
 
1879	else
1880		desc = priv->dma_tx + entry;
1881
1882	first = desc;
1883
 
 
 
 
1884	/* To program the descriptors according to the size of the frame */
1885	if (enh_desc)
1886		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1887
1888	if (likely(!is_jumbo)) {
1889		desc->des2 = dma_map_single(priv->device, skb->data,
1890					    nopaged_len, DMA_TO_DEVICE);
1891		priv->tx_skbuff_dma[entry] = desc->des2;
1892		priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1893						csum_insertion, priv->mode);
1894	} else {
1895		desc = first;
1896		entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1897	}
1898
1899	for (i = 0; i < nfrags; i++) {
1900		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1901		int len = skb_frag_size(frag);
 
 
 
 
1902
1903		priv->tx_skbuff[entry] = NULL;
1904		entry = (++priv->cur_tx) % txsize;
1905		if (priv->extend_desc)
1906			desc = (struct dma_desc *)(priv->dma_etx + entry);
1907		else
1908			desc = priv->dma_tx + entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1909
1910		desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1911					      DMA_TO_DEVICE);
1912		priv->tx_skbuff_dma[entry] = desc->des2;
1913		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1914						priv->mode);
1915		wmb();
1916		priv->hw->desc->set_tx_owner(desc);
1917		wmb();
1918	}
1919
1920	priv->tx_skbuff[entry] = skb;
 
 
1921
1922	/* Finalize the latest segment. */
1923	priv->hw->desc->close_tx_desc(desc);
1924
1925	wmb();
1926	/* According to the coalesce parameter the IC bit for the latest
1927	 * segment could be reset and the timer re-started to invoke the
1928	 * stmmac_tx function. This approach takes care about the fragments.
 
1929	 */
1930	priv->tx_count_frames += nfrags + 1;
1931	if (priv->tx_coal_frames > priv->tx_count_frames) {
1932		priv->hw->desc->clear_tx_ic(desc);
1933		priv->xstats.tx_reset_ic_bit++;
1934		mod_timer(&priv->txtimer,
1935			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
1936	} else
1937		priv->tx_count_frames = 0;
 
 
 
 
 
 
1938
1939	/* To avoid raise condition */
1940	priv->hw->desc->set_tx_owner(first);
1941	wmb();
 
 
 
 
1942
1943	priv->cur_tx++;
 
 
 
 
 
 
 
 
 
 
1944
1945	if (netif_msg_pktdata(priv)) {
1946		pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1947			__func__, (priv->cur_tx % txsize),
1948			(priv->dirty_tx % txsize), entry, first, nfrags);
1949
1950		if (priv->extend_desc)
1951			stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1952		else
1953			stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1954
1955		pr_debug(">>> frame to be transmitted: ");
1956		print_pkt(skb->data, skb->len);
1957	}
1958	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1959		if (netif_msg_hw(priv))
1960			pr_debug("%s: stop transmitted packets\n", __func__);
1961		netif_stop_queue(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1962	}
1963
1964	dev->stats.tx_bytes += skb->len;
 
1965
1966	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1967		     priv->hwts_tx_en)) {
1968		/* declare that device is doing timestamping */
1969		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1970		priv->hw->desc->enable_tx_timestamp(first);
1971	}
1972
1973	if (!priv->hwts_tx_en)
1974		skb_tx_timestamp(skb);
 
 
 
1975
1976	priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
1977
1978	spin_unlock(&priv->tx_lock);
1979
 
 
 
 
 
1980	return NETDEV_TX_OK;
1981}
1982
1983static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
1984{
1985	struct ethhdr *ehdr;
 
1986	u16 vlanid;
1987
1988	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1989	    NETIF_F_HW_VLAN_CTAG_RX &&
1990	    !__vlan_get_tag(skb, &vlanid)) {
 
1991		/* pop the vlan tag */
1992		ehdr = (struct ethhdr *)skb->data;
1993		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
1994		skb_pull(skb, VLAN_HLEN);
1995		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
1996	}
1997}
1998
1999
2000/**
2001 * stmmac_rx_refill: refill used skb preallocated buffers
2002 * @priv: driver private structure
 
2003 * Description : this is to reallocate the skb for the reception process
2004 * that is based on zero-copy.
2005 */
2006static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2007{
2008	unsigned int rxsize = priv->dma_rx_size;
2009	int bfsize = priv->dma_buf_sz;
 
 
2010
2011	for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
2012		unsigned int entry = priv->dirty_rx % rxsize;
 
 
 
2013		struct dma_desc *p;
 
2014
2015		if (priv->extend_desc)
2016			p = (struct dma_desc *)(priv->dma_erx + entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017		else
2018			p = priv->dma_rx + entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2019
2020		if (likely(priv->rx_skbuff[entry] == NULL)) {
2021			struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022
2023			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
 
 
 
2024
2025			if (unlikely(skb == NULL))
 
 
 
2026				break;
 
 
2027
2028			priv->rx_skbuff[entry] = skb;
2029			priv->rx_skbuff_dma[entry] =
2030			    dma_map_single(priv->device, skb->data, bfsize,
2031					   DMA_FROM_DEVICE);
2032
2033			p->des2 = priv->rx_skbuff_dma[entry];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034
2035			priv->hw->mode->refill_desc3(priv, p);
 
2036
2037			if (netif_msg_rx_status(priv))
2038				pr_debug("\trefill entry #%d\n", entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2039		}
2040		wmb();
2041		priv->hw->desc->set_rx_owner(p);
2042		wmb();
2043	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2044}
2045
2046/**
2047 * stmmac_rx_refill: refill used skb preallocated buffers
2048 * @priv: driver private structure
2049 * @limit: napi bugget.
 
2050 * Description :  this the function called by the napi poll method.
2051 * It gets all the frames inside the ring.
2052 */
2053static int stmmac_rx(struct stmmac_priv *priv, int limit)
2054{
2055	unsigned int rxsize = priv->dma_rx_size;
2056	unsigned int entry = priv->cur_rx % rxsize;
2057	unsigned int next_entry;
2058	unsigned int count = 0;
2059	int coe = priv->plat->rx_coe;
 
 
 
 
 
 
 
 
 
 
 
 
2060
2061	if (netif_msg_rx_status(priv)) {
2062		pr_debug("%s: descriptor ring:\n", __func__);
2063		if (priv->extend_desc)
2064			stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2065		else
2066			stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
 
 
 
 
 
 
 
 
2067	}
2068	while (count < limit) {
2069		int status;
2070		struct dma_desc *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071
2072		if (priv->extend_desc)
2073			p = (struct dma_desc *)(priv->dma_erx + entry);
2074		else
2075			p = priv->dma_rx + entry;
2076
2077		if (priv->hw->desc->get_rx_owner(p))
 
 
 
2078			break;
2079
2080		count++;
 
 
2081
2082		next_entry = (++priv->cur_rx) % rxsize;
2083		if (priv->extend_desc)
2084			prefetch(priv->dma_erx + next_entry);
2085		else
2086			prefetch(priv->dma_rx + next_entry);
2087
2088		/* read the status of the incoming frame */
2089		status = priv->hw->desc->rx_status(&priv->dev->stats,
2090						   &priv->xstats, p);
2091		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2092			priv->hw->desc->rx_extended_status(&priv->dev->stats,
2093							   &priv->xstats,
2094							   priv->dma_erx +
2095							   entry);
2096		if (unlikely(status == discard_frame)) {
2097			priv->dev->stats.rx_errors++;
2098			if (priv->hwts_rx_en && !priv->extend_desc) {
2099				/* DESC2 & DESC3 will be overwitten by device
2100				 * with timestamp value, hence reinitialize
2101				 * them in stmmac_rx_refill() function so that
2102				 * device can reuse it.
2103				 */
2104				priv->rx_skbuff[entry] = NULL;
2105				dma_unmap_single(priv->device,
2106						 priv->rx_skbuff_dma[entry],
2107						 priv->dma_buf_sz,
2108						 DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2109			}
2110		} else {
2111			struct sk_buff *skb;
2112			int frame_len;
 
2113
2114			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
 
2115
2116			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2117			 * Type frames (LLC/LLC-SNAP)
 
 
 
 
 
 
 
 
 
 
 
 
2118			 */
2119			if (unlikely(status != llc_snap))
2120				frame_len -= ETH_FCS_LEN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2121
2122			if (netif_msg_rx_status(priv)) {
2123				pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2124					 p, entry, p->des2);
2125				if (frame_len > ETH_FRAME_LEN)
2126					pr_debug("\tframe size %d, COE: %d\n",
2127						 frame_len, status);
 
 
 
2128			}
2129			skb = priv->rx_skbuff[entry];
2130			if (unlikely(!skb)) {
2131				pr_err("%s: Inconsistent Rx descriptor chain\n",
2132				       priv->dev->name);
2133				priv->dev->stats.rx_dropped++;
2134				break;
2135			}
2136			prefetch(skb->data - NET_IP_ALIGN);
2137			priv->rx_skbuff[entry] = NULL;
2138
2139			stmmac_get_rx_hwtstamp(priv, entry, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140
2141			skb_put(skb, frame_len);
2142			dma_unmap_single(priv->device,
2143					 priv->rx_skbuff_dma[entry],
2144					 priv->dma_buf_sz, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
2145
2146			if (netif_msg_pktdata(priv)) {
2147				pr_debug("frame received (%dbytes)", frame_len);
2148				print_pkt(skb->data, frame_len);
2149			}
2150
2151			stmmac_rx_vlan(priv->dev, skb);
 
 
 
2152
2153			skb->protocol = eth_type_trans(skb, priv->dev);
 
2154
2155			if (unlikely(!coe))
2156				skb_checksum_none_assert(skb);
2157			else
2158				skb->ip_summed = CHECKSUM_UNNECESSARY;
2159
2160			napi_gro_receive(&priv->napi, skb);
 
 
 
2161
2162			priv->dev->stats.rx_packets++;
2163			priv->dev->stats.rx_bytes += frame_len;
2164		}
2165		entry = next_entry;
 
2166	}
2167
2168	stmmac_rx_refill(priv);
 
 
2169
2170	priv->xstats.rx_pkt_n += count;
 
 
 
 
 
 
 
2171
2172	return count;
2173}
2174
2175/**
2176 *  stmmac_poll - stmmac poll method (NAPI)
2177 *  @napi : pointer to the napi structure.
2178 *  @budget : maximum number of packets that the current CPU can receive from
2179 *	      all interfaces.
2180 *  Description :
2181 *  To look at the incoming frames and clear the tx resources.
2182 */
2183static int stmmac_poll(struct napi_struct *napi, int budget)
2184{
2185	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2186	int work_done = 0;
2187
2188	priv->xstats.napi_poll++;
2189	stmmac_tx_clean(priv);
2190
2191	work_done = stmmac_rx(priv, budget);
2192	if (work_done < budget) {
2193		napi_complete(napi);
2194		stmmac_enable_dma_irq(priv);
 
2195	}
 
2196	return work_done;
2197}
2198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2199/**
2200 *  stmmac_tx_timeout
2201 *  @dev : Pointer to net device structure
 
2202 *  Description: this function is called when a packet transmission fails to
2203 *   complete within a reasonable time. The driver will mark the error in the
2204 *   netdev structure and arrange for the device to be reset to a sane state
2205 *   in order to transmit a new packet.
2206 */
2207static void stmmac_tx_timeout(struct net_device *dev)
2208{
2209	struct stmmac_priv *priv = netdev_priv(dev);
2210
2211	/* Clear Tx resources and restart transmitting again */
2212	stmmac_tx_err(priv);
2213}
2214
2215/* Configuration changes (passed on by ifconfig) */
2216static int stmmac_config(struct net_device *dev, struct ifmap *map)
2217{
2218	if (dev->flags & IFF_UP)	/* can't act on a running interface */
2219		return -EBUSY;
2220
2221	/* Don't allow changing the I/O address */
2222	if (map->base_addr != dev->base_addr) {
2223		pr_warn("%s: can't change I/O address\n", dev->name);
2224		return -EOPNOTSUPP;
2225	}
2226
2227	/* Don't allow changing the IRQ */
2228	if (map->irq != dev->irq) {
2229		pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2230		return -EOPNOTSUPP;
2231	}
2232
2233	return 0;
2234}
2235
2236/**
2237 *  stmmac_set_rx_mode - entry point for multicast addressing
2238 *  @dev : pointer to the device structure
2239 *  Description:
2240 *  This function is a driver entry point which gets called by the kernel
2241 *  whenever multicast addresses must be enabled/disabled.
2242 *  Return value:
2243 *  void.
2244 */
2245static void stmmac_set_rx_mode(struct net_device *dev)
2246{
2247	struct stmmac_priv *priv = netdev_priv(dev);
2248
2249	spin_lock(&priv->lock);
2250	priv->hw->mac->set_filter(dev, priv->synopsys_id);
2251	spin_unlock(&priv->lock);
2252}
2253
2254/**
2255 *  stmmac_change_mtu - entry point to change MTU size for the device.
2256 *  @dev : device pointer.
2257 *  @new_mtu : the new MTU size for the device.
2258 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2259 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2260 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2261 *  Return value:
2262 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2263 *  file on failure.
2264 */
2265static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2266{
2267	struct stmmac_priv *priv = netdev_priv(dev);
2268	int max_mtu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2269
2270	if (netif_running(dev)) {
2271		pr_err("%s: must be stopped to change its MTU\n", dev->name);
2272		return -EBUSY;
2273	}
 
 
 
 
 
 
 
2274
2275	if (priv->plat->enh_desc)
2276		max_mtu = JUMBO_LEN;
2277	else
2278		max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
 
 
 
2279
2280	if (priv->plat->maxmtu < max_mtu)
2281		max_mtu = priv->plat->maxmtu;
2282
2283	if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2284		pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2285		return -EINVAL;
2286	}
2287
2288	dev->mtu = new_mtu;
2289	netdev_update_features(dev);
2290
2291	return 0;
2292}
2293
2294static netdev_features_t stmmac_fix_features(struct net_device *dev,
2295					     netdev_features_t features)
2296{
2297	struct stmmac_priv *priv = netdev_priv(dev);
2298
2299	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2300		features &= ~NETIF_F_RXCSUM;
2301	else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
2302		features &= ~NETIF_F_IPV6_CSUM;
2303	if (!priv->plat->tx_coe)
2304		features &= ~NETIF_F_ALL_CSUM;
2305
2306	/* Some GMAC devices have a bugged Jumbo frame support that
2307	 * needs to have the Tx COE disabled for oversized frames
2308	 * (due to limited buffer sizes). In this case we disable
2309	 * the TX csum insertionin the TDES and not use SF.
2310	 */
2311	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2312		features &= ~NETIF_F_ALL_CSUM;
 
 
 
 
 
 
 
 
2313
2314	return features;
2315}
2316
2317/**
2318 *  stmmac_interrupt - main ISR
2319 *  @irq: interrupt number.
2320 *  @dev_id: to pass the net device pointer.
2321 *  Description: this is the main driver interrupt service routine.
2322 *  It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
2323 *  interrupts.
2324 */
2325static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2326{
2327	struct net_device *dev = (struct net_device *)dev_id;
2328	struct stmmac_priv *priv = netdev_priv(dev);
 
 
 
 
 
 
2329
2330	if (priv->irq_wake)
2331		pm_wakeup_event(priv->device, 0);
2332
2333	if (unlikely(!dev)) {
2334		pr_err("%s: invalid dev pointer\n", __func__);
2335		return IRQ_NONE;
2336	}
 
 
2337
2338	/* To handle GMAC own interrupts */
2339	if (priv->plat->has_gmac) {
2340		int status = priv->hw->mac->host_irq_status((void __iomem *)
2341							    dev->base_addr,
2342							    &priv->xstats);
2343		if (unlikely(status)) {
2344			/* For LPI we need to save the tx status */
2345			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2346				priv->tx_path_in_lpi_mode = true;
2347			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2348				priv->tx_path_in_lpi_mode = false;
2349		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2351
2352	/* To handle DMA interrupts */
2353	stmmac_dma_interrupt(priv);
2354
2355	return IRQ_HANDLED;
2356}
2357
2358#ifdef CONFIG_NET_POLL_CONTROLLER
2359/* Polling receive - used by NETCONSOLE and other diagnostic tools
2360 * to allow network I/O with interrupts disabled.
2361 */
2362static void stmmac_poll_controller(struct net_device *dev)
2363{
2364	disable_irq(dev->irq);
2365	stmmac_interrupt(dev->irq, dev);
2366	enable_irq(dev->irq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2367}
2368#endif
2369
2370/**
2371 *  stmmac_ioctl - Entry point for the Ioctl
2372 *  @dev: Device pointer.
2373 *  @rq: An IOCTL specefic structure, that can contain a pointer to
2374 *  a proprietary structure used to pass information to the driver.
2375 *  @cmd: IOCTL command
2376 *  Description:
2377 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2378 */
2379static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2380{
2381	struct stmmac_priv *priv = netdev_priv(dev);
2382	int ret = -EOPNOTSUPP;
2383
2384	if (!netif_running(dev))
2385		return -EINVAL;
2386
2387	switch (cmd) {
2388	case SIOCGMIIPHY:
2389	case SIOCGMIIREG:
2390	case SIOCSMIIREG:
2391		if (!priv->phydev)
2392			return -EINVAL;
2393		ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2394		break;
2395	case SIOCSHWTSTAMP:
2396		ret = stmmac_hwtstamp_ioctl(dev, rq);
 
 
 
2397		break;
2398	default:
2399		break;
2400	}
2401
2402	return ret;
2403}
2404
2405#ifdef CONFIG_STMMAC_DEBUG_FS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2406static struct dentry *stmmac_fs_dir;
2407static struct dentry *stmmac_rings_status;
2408static struct dentry *stmmac_dma_cap;
2409
2410static void sysfs_display_ring(void *head, int size, int extend_desc,
2411			       struct seq_file *seq)
2412{
2413	int i;
2414	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2415	struct dma_desc *p = (struct dma_desc *)head;
 
 
 
2416
 
2417	for (i = 0; i < size; i++) {
2418		u64 x;
2419		if (extend_desc) {
2420			x = *(u64 *) ep;
2421			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2422				   i, (unsigned int)virt_to_phys(ep),
2423				   (unsigned int)x, (unsigned int)(x >> 32),
2424				   ep->basic.des2, ep->basic.des3);
2425			ep++;
2426		} else {
2427			x = *(u64 *) p;
2428			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2429				   i, (unsigned int)virt_to_phys(ep),
2430				   (unsigned int)x, (unsigned int)(x >> 32),
2431				   p->des2, p->des3);
2432			p++;
2433		}
2434		seq_printf(seq, "\n");
2435	}
2436}
2437
2438static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2439{
2440	struct net_device *dev = seq->private;
2441	struct stmmac_priv *priv = netdev_priv(dev);
2442	unsigned int txsize = priv->dma_tx_size;
2443	unsigned int rxsize = priv->dma_rx_size;
 
 
 
 
2444
2445	if (priv->extend_desc) {
2446		seq_printf(seq, "Extended RX descriptor ring:\n");
2447		sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
2448		seq_printf(seq, "Extended TX descriptor ring:\n");
2449		sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
2450	} else {
2451		seq_printf(seq, "RX descriptor ring:\n");
2452		sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2453		seq_printf(seq, "TX descriptor ring:\n");
2454		sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2455	}
2456
2457	return 0;
2458}
 
2459
2460static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2461{
2462	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2463}
2464
2465static const struct file_operations stmmac_rings_status_fops = {
2466	.owner = THIS_MODULE,
2467	.open = stmmac_sysfs_ring_open,
2468	.read = seq_read,
2469	.llseek = seq_lseek,
2470	.release = single_release,
2471};
2472
2473static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2474{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2475	struct net_device *dev = seq->private;
2476	struct stmmac_priv *priv = netdev_priv(dev);
2477
2478	if (!priv->hw_cap_support) {
2479		seq_printf(seq, "DMA HW features not supported\n");
2480		return 0;
2481	}
2482
2483	seq_printf(seq, "==============================\n");
2484	seq_printf(seq, "\tDMA HW features\n");
2485	seq_printf(seq, "==============================\n");
2486
2487	seq_printf(seq, "\t10/100 Mbps %s\n",
2488		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2489	seq_printf(seq, "\t1000 Mbps %s\n",
2490		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
2491	seq_printf(seq, "\tHalf duple %s\n",
2492		   (priv->dma_cap.half_duplex) ? "Y" : "N");
2493	seq_printf(seq, "\tHash Filter: %s\n",
2494		   (priv->dma_cap.hash_filter) ? "Y" : "N");
2495	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2496		   (priv->dma_cap.multi_addr) ? "Y" : "N");
2497	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
 
 
 
 
 
 
2498		   (priv->dma_cap.pcs) ? "Y" : "N");
2499	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2500		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
2501	seq_printf(seq, "\tPMT Remote wake up: %s\n",
2502		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2503	seq_printf(seq, "\tPMT Magic Frame: %s\n",
2504		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2505	seq_printf(seq, "\tRMON module: %s\n",
2506		   (priv->dma_cap.rmon) ? "Y" : "N");
2507	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2508		   (priv->dma_cap.time_stamp) ? "Y" : "N");
2509	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2510		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
2511	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
 
 
 
2512		   (priv->dma_cap.eee) ? "Y" : "N");
2513	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2514	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2515		   (priv->dma_cap.tx_coe) ? "Y" : "N");
2516	seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2517		   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2518	seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2519		   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2520	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2521		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
 
 
 
 
 
 
2522	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2523		   priv->dma_cap.number_rx_channel);
2524	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2525		   priv->dma_cap.number_tx_channel);
 
 
 
 
2526	seq_printf(seq, "\tEnhanced descriptors: %s\n",
2527		   (priv->dma_cap.enh_desc) ? "Y" : "N");
2528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2529	return 0;
2530}
 
2531
2532static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
 
 
 
2533{
2534	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2535}
2536
2537static const struct file_operations stmmac_dma_cap_fops = {
2538	.owner = THIS_MODULE,
2539	.open = stmmac_sysfs_dma_cap_open,
2540	.read = seq_read,
2541	.llseek = seq_lseek,
2542	.release = single_release,
2543};
2544
2545static int stmmac_init_fs(struct net_device *dev)
2546{
2547	/* Create debugfs entries */
2548	stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549
2550	if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
2551		pr_err("ERROR %s, debugfs create directory failed\n",
2552		       STMMAC_RESOURCE_NAME);
2553
2554		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2555	}
2556
2557	/* Entry to report DMA RX/TX rings */
2558	stmmac_rings_status = debugfs_create_file("descriptors_status",
2559						  S_IRUGO, stmmac_fs_dir, dev,
2560						  &stmmac_rings_status_fops);
2561
2562	if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2563		pr_info("ERROR creating stmmac ring debugfs file\n");
2564		debugfs_remove(stmmac_fs_dir);
 
 
2565
2566		return -ENOMEM;
 
2567	}
2568
2569	/* Entry to report the DMA HW features */
2570	stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2571					     dev, &stmmac_dma_cap_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2572
2573	if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2574		pr_info("ERROR creating stmmac MMC debugfs file\n");
2575		debugfs_remove(stmmac_rings_status);
2576		debugfs_remove(stmmac_fs_dir);
2577
2578		return -ENOMEM;
 
 
 
 
 
2579	}
2580
2581	return 0;
2582}
2583
2584static void stmmac_exit_fs(void)
2585{
2586	debugfs_remove(stmmac_rings_status);
2587	debugfs_remove(stmmac_dma_cap);
2588	debugfs_remove(stmmac_fs_dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2589}
2590#endif /* CONFIG_STMMAC_DEBUG_FS */
2591
2592static const struct net_device_ops stmmac_netdev_ops = {
2593	.ndo_open = stmmac_open,
2594	.ndo_start_xmit = stmmac_xmit,
2595	.ndo_stop = stmmac_release,
2596	.ndo_change_mtu = stmmac_change_mtu,
2597	.ndo_fix_features = stmmac_fix_features,
 
2598	.ndo_set_rx_mode = stmmac_set_rx_mode,
2599	.ndo_tx_timeout = stmmac_tx_timeout,
2600	.ndo_do_ioctl = stmmac_ioctl,
2601	.ndo_set_config = stmmac_config,
2602#ifdef CONFIG_NET_POLL_CONTROLLER
2603	.ndo_poll_controller = stmmac_poll_controller,
2604#endif
2605	.ndo_set_mac_address = eth_mac_addr,
 
 
 
 
2606};
2607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2608/**
2609 *  stmmac_hw_init - Init the MAC device
2610 *  @priv: driver private structure
2611 *  Description: this function detects which MAC device
2612 *  (GMAC/MAC10-100) has to attached, checks the HW capability
2613 *  (if supported) and sets the driver's features (for example
2614 *  to use the ring or chaine mode or support the normal/enh
2615 *  descriptor structure).
2616 */
2617static int stmmac_hw_init(struct stmmac_priv *priv)
2618{
2619	int ret;
2620	struct mac_device_info *mac;
2621
2622	/* Identify the MAC HW device */
2623	if (priv->plat->has_gmac) {
2624		priv->dev->priv_flags |= IFF_UNICAST_FLT;
2625		mac = dwmac1000_setup(priv->ioaddr);
2626	} else {
2627		mac = dwmac100_setup(priv->ioaddr);
2628	}
2629	if (!mac)
2630		return -ENOMEM;
2631
2632	priv->hw = mac;
2633
2634	/* Get and dump the chip ID */
2635	priv->synopsys_id = stmmac_get_synopsys_id(priv);
2636
2637	/* To use the chained or ring mode */
2638	if (chain_mode) {
2639		priv->hw->mode = &chain_mode_ops;
2640		pr_info(" Chain mode enabled\n");
2641		priv->mode = STMMAC_CHAIN_MODE;
2642	} else {
2643		priv->hw->mode = &ring_mode_ops;
2644		pr_info(" Ring mode enabled\n");
2645		priv->mode = STMMAC_RING_MODE;
2646	}
2647
2648	/* Get the HW capability (new GMAC newer than 3.50a) */
2649	priv->hw_cap_support = stmmac_get_hw_features(priv);
2650	if (priv->hw_cap_support) {
2651		pr_info(" DMA HW capability register supported");
2652
2653		/* We can override some gmac/dma configuration fields: e.g.
2654		 * enh_desc, tx_coe (e.g. that are passed through the
2655		 * platform) with the values from the HW capability
2656		 * register (if supported).
2657		 */
2658		priv->plat->enh_desc = priv->dma_cap.enh_desc;
2659		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2660
2661		priv->plat->tx_coe = priv->dma_cap.tx_coe;
 
2662
2663		if (priv->dma_cap.rx_coe_type2)
2664			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2665		else if (priv->dma_cap.rx_coe_type1)
2666			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2667
2668	} else
2669		pr_info(" No HW DMA feature register supported");
 
2670
2671	/* To use alternate (extended) or normal descriptor structures */
2672	stmmac_selec_desc_mode(priv);
2673
2674	ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2675	if (!ret) {
2676		pr_warn(" RX IPC Checksum Offload not configured.\n");
2677		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2678	}
2679
2680	if (priv->plat->rx_coe)
2681		pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2682			priv->plat->rx_coe);
2683	if (priv->plat->tx_coe)
2684		pr_info(" TX Checksum insertion supported\n");
2685
2686	if (priv->plat->pmt) {
2687		pr_info(" Wake-Up On Lan supported\n");
2688		device_set_wakeup_capable(priv->device, 1);
2689	}
2690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2691	return 0;
2692}
2693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2694/**
2695 * stmmac_dvr_probe
2696 * @device: device pointer
2697 * @plat_dat: platform data pointer
2698 * @addr: iobase memory address
2699 * Description: this is the main probe function used to
2700 * call the alloc_etherdev, allocate the priv structure.
 
 
2701 */
2702struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2703				     struct plat_stmmacenet_data *plat_dat,
2704				     void __iomem *addr)
2705{
2706	int ret = 0;
2707	struct net_device *ndev = NULL;
2708	struct stmmac_priv *priv;
 
 
2709
2710	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
 
2711	if (!ndev)
2712		return NULL;
2713
2714	SET_NETDEV_DEV(ndev, device);
2715
2716	priv = netdev_priv(ndev);
2717	priv->device = device;
2718	priv->dev = ndev;
2719
2720	ether_setup(ndev);
 
 
 
 
 
 
 
 
 
 
2721
2722	stmmac_set_ethtool_ops(ndev);
2723	priv->pause = pause;
2724	priv->plat = plat_dat;
2725	priv->ioaddr = addr;
2726	priv->dev->base_addr = (unsigned long)addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2727
2728	/* Verify driver arguments */
2729	stmmac_verify_args();
2730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2731	/* Override with kernel parameters if supplied XXX CRS XXX
2732	 * this needs to have multiple instances
2733	 */
2734	if ((phyaddr >= 0) && (phyaddr <= 31))
2735		priv->plat->phy_addr = phyaddr;
2736
2737	priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
2738	if (IS_ERR(priv->stmmac_clk)) {
2739		dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2740			 __func__);
2741		ret = PTR_ERR(priv->stmmac_clk);
2742		goto error_clk_get;
2743	}
2744	clk_prepare_enable(priv->stmmac_clk);
2745
2746	priv->stmmac_rst = devm_reset_control_get(priv->device,
2747						  STMMAC_RESOURCE_NAME);
2748	if (IS_ERR(priv->stmmac_rst)) {
2749		if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
2750			ret = -EPROBE_DEFER;
2751			goto error_hw_init;
2752		}
2753		dev_info(priv->device, "no reset control found\n");
2754		priv->stmmac_rst = NULL;
2755	}
2756	if (priv->stmmac_rst)
2757		reset_control_deassert(priv->stmmac_rst);
 
 
 
 
 
 
2758
2759	/* Init MAC and get the capabilities */
2760	ret = stmmac_hw_init(priv);
2761	if (ret)
2762		goto error_hw_init;
2763
 
 
 
 
 
 
 
2764	ndev->netdev_ops = &stmmac_netdev_ops;
2765
 
 
 
2766	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2767			    NETIF_F_RXCSUM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2768	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2769	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2770#ifdef STMMAC_VLAN_TAG_USED
2771	/* Both mac100 and gmac support receive VLAN tag detection */
2772	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
 
 
 
 
 
 
 
 
 
 
 
 
 
2773#endif
2774	priv->msg_enable = netif_msg_init(debug, default_msg_level);
2775
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2776	if (flow_ctrl)
2777		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
2778
2779	/* Rx Watchdog is available in the COREs newer than the 3.40.
2780	 * In some case, for example on bugged HW this feature
2781	 * has to be disable and this can be done by passing the
2782	 * riwt_off field from the platform.
2783	 */
2784	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2785		priv->use_riwt = 1;
2786		pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2787	}
2788
2789	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
2790
2791	spin_lock_init(&priv->lock);
2792	spin_lock_init(&priv->tx_lock);
2793
2794	ret = register_netdev(ndev);
2795	if (ret) {
2796		pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2797		goto error_netdev_register;
2798	}
2799
2800	/* If a specific clk_csr value is passed from the platform
2801	 * this means that the CSR Clock Range selection cannot be
2802	 * changed at run-time and it is fixed. Viceversa the driver'll try to
2803	 * set the MDC clock dynamically according to the csr actual
2804	 * clock input.
2805	 */
2806	if (!priv->plat->clk_csr)
 
 
2807		stmmac_clk_csr_set(priv);
2808	else
2809		priv->clk_csr = priv->plat->clk_csr;
2810
2811	stmmac_check_pcs_mode(priv);
2812
2813	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2814	    priv->pcs != STMMAC_PCS_RTBI) {
2815		/* MDIO bus Registration */
2816		ret = stmmac_mdio_register(ndev);
2817		if (ret < 0) {
2818			pr_debug("%s: MDIO bus (id: %d) registration failed",
2819				 __func__, priv->plat->bus_id);
2820			goto error_mdio_register;
2821		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2822	}
2823
2824	return priv;
 
 
 
 
 
 
 
 
 
 
 
 
2825
 
 
 
 
 
 
2826error_mdio_register:
2827	unregister_netdev(ndev);
2828error_netdev_register:
2829	netif_napi_del(&priv->napi);
2830error_hw_init:
2831	clk_disable_unprepare(priv->stmmac_clk);
2832error_clk_get:
2833	free_netdev(ndev);
2834
2835	return ERR_PTR(ret);
2836}
 
2837
2838/**
2839 * stmmac_dvr_remove
2840 * @ndev: net device pointer
2841 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2842 * changes the link status, releases the DMA descriptor rings.
2843 */
2844int stmmac_dvr_remove(struct net_device *ndev)
2845{
 
2846	struct stmmac_priv *priv = netdev_priv(ndev);
2847
2848	pr_info("%s:\n\tremoving driver", __func__);
2849
2850	priv->hw->dma->stop_rx(priv->ioaddr);
2851	priv->hw->dma->stop_tx(priv->ioaddr);
2852
2853	stmmac_set_mac(priv->ioaddr, false);
2854	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2855	    priv->pcs != STMMAC_PCS_RTBI)
2856		stmmac_mdio_unregister(ndev);
2857	netif_carrier_off(ndev);
2858	unregister_netdev(ndev);
2859	if (priv->stmmac_rst)
2860		reset_control_assert(priv->stmmac_rst);
2861	clk_disable_unprepare(priv->stmmac_clk);
2862	free_netdev(ndev);
2863
2864	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2865}
 
2866
2867#ifdef CONFIG_PM
2868int stmmac_suspend(struct net_device *ndev)
 
 
 
 
 
 
2869{
 
2870	struct stmmac_priv *priv = netdev_priv(ndev);
2871	unsigned long flags;
2872
2873	if (!ndev || !netif_running(ndev))
2874		return 0;
2875
2876	if (priv->phydev)
2877		phy_stop(priv->phydev);
2878
2879	spin_lock_irqsave(&priv->lock, flags);
 
 
2880
2881	netif_device_detach(ndev);
2882	netif_stop_queue(ndev);
2883
2884	napi_disable(&priv->napi);
 
 
 
2885
2886	/* Stop TX/RX DMA */
2887	priv->hw->dma->stop_tx(priv->ioaddr);
2888	priv->hw->dma->stop_rx(priv->ioaddr);
2889
2890	stmmac_clear_descriptors(priv);
 
2891
2892	/* Enable Power down mode by programming the PMT regs */
2893	if (device_may_wakeup(priv->device)) {
2894		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
2895		priv->irq_wake = 1;
2896	} else {
2897		stmmac_set_mac(priv->ioaddr, false);
2898		pinctrl_pm_select_sleep_state(priv->device);
2899		/* Disable clock in case of PWM is off */
2900		clk_disable_unprepare(priv->stmmac_clk);
2901	}
2902	spin_unlock_irqrestore(&priv->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2903	return 0;
2904}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2905
2906int stmmac_resume(struct net_device *ndev)
 
 
 
 
 
 
 
2907{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2908	struct stmmac_priv *priv = netdev_priv(ndev);
2909	unsigned long flags;
2910
2911	if (!netif_running(ndev))
2912		return 0;
2913
2914	spin_lock_irqsave(&priv->lock, flags);
2915
2916	/* Power Down bit, into the PM register, is cleared
2917	 * automatically as soon as a magic packet or a Wake-up frame
2918	 * is received. Anyway, it's better to manually clear
2919	 * this bit because it can generate problems while resuming
2920	 * from another devices (e.g. serial console).
2921	 */
2922	if (device_may_wakeup(priv->device)) {
2923		priv->hw->mac->pmt(priv->ioaddr, 0);
 
 
2924		priv->irq_wake = 0;
2925	} else {
2926		pinctrl_pm_select_default_state(priv->device);
2927		/* enable the clk prevously disabled */
2928		clk_prepare_enable(priv->stmmac_clk);
2929		/* reset the phy so that it's ready */
2930		if (priv->mii)
2931			stmmac_mdio_reset(priv->mii);
2932	}
2933
2934	netif_device_attach(ndev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2935
2936	stmmac_hw_setup(ndev);
2937
2938	napi_enable(&priv->napi);
 
2939
2940	netif_start_queue(ndev);
 
 
2941
2942	spin_unlock_irqrestore(&priv->lock, flags);
2943
2944	if (priv->phydev)
2945		phy_start(priv->phydev);
2946
2947	return 0;
2948}
2949#endif /* CONFIG_PM */
2950
2951/* Driver can be configured w/ and w/ both PCI and Platf drivers
2952 * depending on the configuration selected.
2953 */
2954static int __init stmmac_init(void)
2955{
2956	int ret;
2957
2958	ret = stmmac_register_platform();
2959	if (ret)
2960		goto err;
2961	ret = stmmac_register_pci();
2962	if (ret)
2963		goto err_pci;
2964	return 0;
2965err_pci:
2966	stmmac_unregister_platform();
2967err:
2968	pr_err("stmmac: driver registration failed\n");
2969	return ret;
2970}
2971
2972static void __exit stmmac_exit(void)
2973{
2974	stmmac_unregister_platform();
2975	stmmac_unregister_pci();
2976}
2977
2978module_init(stmmac_init);
2979module_exit(stmmac_exit);
2980
2981#ifndef MODULE
2982static int __init stmmac_cmdline_opt(char *str)
2983{
2984	char *opt;
2985
2986	if (!str || !*str)
2987		return -EINVAL;
2988	while ((opt = strsep(&str, ",")) != NULL) {
2989		if (!strncmp(opt, "debug:", 6)) {
2990			if (kstrtoint(opt + 6, 0, &debug))
2991				goto err;
2992		} else if (!strncmp(opt, "phyaddr:", 8)) {
2993			if (kstrtoint(opt + 8, 0, &phyaddr))
2994				goto err;
2995		} else if (!strncmp(opt, "dma_txsize:", 11)) {
2996			if (kstrtoint(opt + 11, 0, &dma_txsize))
2997				goto err;
2998		} else if (!strncmp(opt, "dma_rxsize:", 11)) {
2999			if (kstrtoint(opt + 11, 0, &dma_rxsize))
3000				goto err;
3001		} else if (!strncmp(opt, "buf_sz:", 7)) {
3002			if (kstrtoint(opt + 7, 0, &buf_sz))
3003				goto err;
3004		} else if (!strncmp(opt, "tc:", 3)) {
3005			if (kstrtoint(opt + 3, 0, &tc))
3006				goto err;
3007		} else if (!strncmp(opt, "watchdog:", 9)) {
3008			if (kstrtoint(opt + 9, 0, &watchdog))
3009				goto err;
3010		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
3011			if (kstrtoint(opt + 10, 0, &flow_ctrl))
3012				goto err;
3013		} else if (!strncmp(opt, "pause:", 6)) {
3014			if (kstrtoint(opt + 6, 0, &pause))
3015				goto err;
3016		} else if (!strncmp(opt, "eee_timer:", 10)) {
3017			if (kstrtoint(opt + 10, 0, &eee_timer))
3018				goto err;
3019		} else if (!strncmp(opt, "chain_mode:", 11)) {
3020			if (kstrtoint(opt + 11, 0, &chain_mode))
3021				goto err;
3022		}
3023	}
3024	return 0;
3025
3026err:
3027	pr_err("%s: ERROR broken module parameter conversion", __func__);
3028	return -EINVAL;
3029}
3030
3031__setup("stmmaceth=", stmmac_cmdline_opt);
3032#endif /* MODULE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3033
3034MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3035MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3036MODULE_LICENSE("GPL");