Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15*******************************************************************************/
16
17#include <linux/clk.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/if_ether.h>
25#include <linux/crc32.h>
26#include <linux/mii.h>
27#include <linux/if.h>
28#include <linux/if_vlan.h>
29#include <linux/dma-mapping.h>
30#include <linux/slab.h>
31#include <linux/pm_runtime.h>
32#include <linux/prefetch.h>
33#include <linux/pinctrl/consumer.h>
34#ifdef CONFIG_DEBUG_FS
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37#endif /* CONFIG_DEBUG_FS */
38#include <linux/net_tstamp.h>
39#include <linux/phylink.h>
40#include <linux/udp.h>
41#include <linux/bpf_trace.h>
42#include <net/page_pool/helpers.h>
43#include <net/pkt_cls.h>
44#include <net/xdp_sock_drv.h>
45#include "stmmac_ptp.h"
46#include "stmmac_fpe.h"
47#include "stmmac.h"
48#include "stmmac_xdp.h"
49#include <linux/reset.h>
50#include <linux/of_mdio.h>
51#include "dwmac1000.h"
52#include "dwxgmac2.h"
53#include "hwif.h"
54
55/* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65/* Module parameters */
66#define TX_TIMEO 5000
67static int watchdog = TX_TIMEO;
68module_param(watchdog, int, 0644);
69MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71static int debug = -1;
72module_param(debug, int, 0644);
73MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75static int phyaddr = -1;
76module_param(phyaddr, int, 0444);
77MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
81
82/* Limit to make sure XDP TX and slow path can coexist */
83#define STMMAC_XSK_TX_BUDGET_MAX 256
84#define STMMAC_TX_XSK_AVAIL 16
85#define STMMAC_RX_FILL_BATCH 16
86
87#define STMMAC_XDP_PASS 0
88#define STMMAC_XDP_CONSUMED BIT(0)
89#define STMMAC_XDP_TX BIT(1)
90#define STMMAC_XDP_REDIRECT BIT(2)
91
92static int flow_ctrl = FLOW_AUTO;
93module_param(flow_ctrl, int, 0644);
94MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
95
96static int pause = PAUSE_TIME;
97module_param(pause, int, 0644);
98MODULE_PARM_DESC(pause, "Flow Control Pause Time");
99
100#define TC_DEFAULT 64
101static int tc = TC_DEFAULT;
102module_param(tc, int, 0644);
103MODULE_PARM_DESC(tc, "DMA threshold control value");
104
105#define DEFAULT_BUFSIZE 1536
106static int buf_sz = DEFAULT_BUFSIZE;
107module_param(buf_sz, int, 0644);
108MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109
110#define STMMAC_RX_COPYBREAK 256
111
112static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115
116#define STMMAC_DEFAULT_LPI_TIMER 1000
117static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118module_param(eee_timer, int, 0644);
119MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121
122/* By default the driver will use the ring mode to manage tx and rx descriptors,
123 * but allow user to force to use the chain instead of the ring
124 */
125static unsigned int chain_mode;
126module_param(chain_mode, int, 0444);
127MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128
129static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130/* For MSI interrupts handling */
131static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 u32 rxmode, u32 chan);
142
143#ifdef CONFIG_DEBUG_FS
144static const struct net_device_ops stmmac_netdev_ops;
145static void stmmac_init_fs(struct net_device *dev);
146static void stmmac_exit_fs(struct net_device *dev);
147#endif
148
149#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150
151int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
152{
153 int ret = 0;
154
155 if (enabled) {
156 ret = clk_prepare_enable(priv->plat->stmmac_clk);
157 if (ret)
158 return ret;
159 ret = clk_prepare_enable(priv->plat->pclk);
160 if (ret) {
161 clk_disable_unprepare(priv->plat->stmmac_clk);
162 return ret;
163 }
164 if (priv->plat->clks_config) {
165 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
166 if (ret) {
167 clk_disable_unprepare(priv->plat->stmmac_clk);
168 clk_disable_unprepare(priv->plat->pclk);
169 return ret;
170 }
171 }
172 } else {
173 clk_disable_unprepare(priv->plat->stmmac_clk);
174 clk_disable_unprepare(priv->plat->pclk);
175 if (priv->plat->clks_config)
176 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
177 }
178
179 return ret;
180}
181EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182
183/**
184 * stmmac_verify_args - verify the driver parameters.
185 * Description: it checks the driver parameters and set a default in case of
186 * errors.
187 */
188static void stmmac_verify_args(void)
189{
190 if (unlikely(watchdog < 0))
191 watchdog = TX_TIMEO;
192 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
193 buf_sz = DEFAULT_BUFSIZE;
194 if (unlikely(flow_ctrl > 1))
195 flow_ctrl = FLOW_AUTO;
196 else if (likely(flow_ctrl < 0))
197 flow_ctrl = FLOW_OFF;
198 if (unlikely((pause < 0) || (pause > 0xffff)))
199 pause = PAUSE_TIME;
200 if (eee_timer < 0)
201 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
202}
203
204static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
205{
206 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
207 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
208 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
209 u32 queue;
210
211 for (queue = 0; queue < maxq; queue++) {
212 struct stmmac_channel *ch = &priv->channel[queue];
213
214 if (stmmac_xdp_is_enabled(priv) &&
215 test_bit(queue, priv->af_xdp_zc_qps)) {
216 napi_disable(&ch->rxtx_napi);
217 continue;
218 }
219
220 if (queue < rx_queues_cnt)
221 napi_disable(&ch->rx_napi);
222 if (queue < tx_queues_cnt)
223 napi_disable(&ch->tx_napi);
224 }
225}
226
227/**
228 * stmmac_disable_all_queues - Disable all queues
229 * @priv: driver private structure
230 */
231static void stmmac_disable_all_queues(struct stmmac_priv *priv)
232{
233 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 struct stmmac_rx_queue *rx_q;
235 u32 queue;
236
237 /* synchronize_rcu() needed for pending XDP buffers to drain */
238 for (queue = 0; queue < rx_queues_cnt; queue++) {
239 rx_q = &priv->dma_conf.rx_queue[queue];
240 if (rx_q->xsk_pool) {
241 synchronize_rcu();
242 break;
243 }
244 }
245
246 __stmmac_disable_all_queues(priv);
247}
248
249/**
250 * stmmac_enable_all_queues - Enable all queues
251 * @priv: driver private structure
252 */
253static void stmmac_enable_all_queues(struct stmmac_priv *priv)
254{
255 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
256 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
257 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
258 u32 queue;
259
260 for (queue = 0; queue < maxq; queue++) {
261 struct stmmac_channel *ch = &priv->channel[queue];
262
263 if (stmmac_xdp_is_enabled(priv) &&
264 test_bit(queue, priv->af_xdp_zc_qps)) {
265 napi_enable(&ch->rxtx_napi);
266 continue;
267 }
268
269 if (queue < rx_queues_cnt)
270 napi_enable(&ch->rx_napi);
271 if (queue < tx_queues_cnt)
272 napi_enable(&ch->tx_napi);
273 }
274}
275
276static void stmmac_service_event_schedule(struct stmmac_priv *priv)
277{
278 if (!test_bit(STMMAC_DOWN, &priv->state) &&
279 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
280 queue_work(priv->wq, &priv->service_task);
281}
282
283static void stmmac_global_err(struct stmmac_priv *priv)
284{
285 netif_carrier_off(priv->dev);
286 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
287 stmmac_service_event_schedule(priv);
288}
289
290/**
291 * stmmac_clk_csr_set - dynamically set the MDC clock
292 * @priv: driver private structure
293 * Description: this is to dynamically set the MDC clock according to the csr
294 * clock input.
295 * Note:
296 * If a specific clk_csr value is passed from the platform
297 * this means that the CSR Clock Range selection cannot be
298 * changed at run-time and it is fixed (as reported in the driver
299 * documentation). Viceversa the driver will try to set the MDC
300 * clock dynamically according to the actual clock input.
301 */
302static void stmmac_clk_csr_set(struct stmmac_priv *priv)
303{
304 u32 clk_rate;
305
306 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
307
308 /* Platform provided default clk_csr would be assumed valid
309 * for all other cases except for the below mentioned ones.
310 * For values higher than the IEEE 802.3 specified frequency
311 * we can not estimate the proper divider as it is not known
312 * the frequency of clk_csr_i. So we do not change the default
313 * divider.
314 */
315 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
316 if (clk_rate < CSR_F_35M)
317 priv->clk_csr = STMMAC_CSR_20_35M;
318 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
319 priv->clk_csr = STMMAC_CSR_35_60M;
320 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
321 priv->clk_csr = STMMAC_CSR_60_100M;
322 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
323 priv->clk_csr = STMMAC_CSR_100_150M;
324 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
325 priv->clk_csr = STMMAC_CSR_150_250M;
326 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
327 priv->clk_csr = STMMAC_CSR_250_300M;
328 }
329
330 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
331 if (clk_rate > 160000000)
332 priv->clk_csr = 0x03;
333 else if (clk_rate > 80000000)
334 priv->clk_csr = 0x02;
335 else if (clk_rate > 40000000)
336 priv->clk_csr = 0x01;
337 else
338 priv->clk_csr = 0;
339 }
340
341 if (priv->plat->has_xgmac) {
342 if (clk_rate > 400000000)
343 priv->clk_csr = 0x5;
344 else if (clk_rate > 350000000)
345 priv->clk_csr = 0x4;
346 else if (clk_rate > 300000000)
347 priv->clk_csr = 0x3;
348 else if (clk_rate > 250000000)
349 priv->clk_csr = 0x2;
350 else if (clk_rate > 150000000)
351 priv->clk_csr = 0x1;
352 else
353 priv->clk_csr = 0x0;
354 }
355}
356
357static void print_pkt(unsigned char *buf, int len)
358{
359 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
360 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
361}
362
363static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
364{
365 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
366 u32 avail;
367
368 if (tx_q->dirty_tx > tx_q->cur_tx)
369 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
370 else
371 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
372
373 return avail;
374}
375
376/**
377 * stmmac_rx_dirty - Get RX queue dirty
378 * @priv: driver private structure
379 * @queue: RX queue index
380 */
381static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
382{
383 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
384 u32 dirty;
385
386 if (rx_q->dirty_rx <= rx_q->cur_rx)
387 dirty = rx_q->cur_rx - rx_q->dirty_rx;
388 else
389 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
390
391 return dirty;
392}
393
394static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
395{
396 int tx_lpi_timer;
397
398 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
399 priv->eee_sw_timer_en = en ? 0 : 1;
400 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
401 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
402}
403
404/**
405 * stmmac_enable_eee_mode - check and enter in LPI mode
406 * @priv: driver private structure
407 * Description: this function is to verify and enter in LPI mode in case of
408 * EEE.
409 */
410static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
411{
412 u32 tx_cnt = priv->plat->tx_queues_to_use;
413 u32 queue;
414
415 /* check if all TX queues have the work finished */
416 for (queue = 0; queue < tx_cnt; queue++) {
417 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
418
419 if (tx_q->dirty_tx != tx_q->cur_tx)
420 return -EBUSY; /* still unfinished work */
421 }
422
423 /* Check and enter in LPI mode */
424 if (!priv->tx_path_in_lpi_mode)
425 stmmac_set_eee_mode(priv, priv->hw,
426 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
427 return 0;
428}
429
430/**
431 * stmmac_disable_eee_mode - disable and exit from LPI mode
432 * @priv: driver private structure
433 * Description: this function is to exit and disable EEE in case of
434 * LPI state is true. This is called by the xmit.
435 */
436void stmmac_disable_eee_mode(struct stmmac_priv *priv)
437{
438 if (!priv->eee_sw_timer_en) {
439 stmmac_lpi_entry_timer_config(priv, 0);
440 return;
441 }
442
443 stmmac_reset_eee_mode(priv, priv->hw);
444 del_timer_sync(&priv->eee_ctrl_timer);
445 priv->tx_path_in_lpi_mode = false;
446}
447
448/**
449 * stmmac_eee_ctrl_timer - EEE TX SW timer.
450 * @t: timer_list struct containing private info
451 * Description:
452 * if there is no data transfer and if we are not in LPI state,
453 * then MAC Transmitter can be moved to LPI state.
454 */
455static void stmmac_eee_ctrl_timer(struct timer_list *t)
456{
457 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
458
459 if (stmmac_enable_eee_mode(priv))
460 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
461}
462
463/**
464 * stmmac_eee_init - init EEE
465 * @priv: driver private structure
466 * Description:
467 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
468 * can also manage EEE, this function enable the LPI state and start related
469 * timer.
470 */
471bool stmmac_eee_init(struct stmmac_priv *priv)
472{
473 int eee_tw_timer = priv->eee_tw_timer;
474
475 /* Check if MAC core supports the EEE feature. */
476 if (!priv->dma_cap.eee)
477 return false;
478
479 mutex_lock(&priv->lock);
480
481 /* Check if it needs to be deactivated */
482 if (!priv->eee_active) {
483 if (priv->eee_enabled) {
484 netdev_dbg(priv->dev, "disable EEE\n");
485 stmmac_lpi_entry_timer_config(priv, 0);
486 del_timer_sync(&priv->eee_ctrl_timer);
487 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
488 if (priv->hw->xpcs)
489 xpcs_config_eee(priv->hw->xpcs,
490 priv->plat->mult_fact_100ns,
491 false);
492 }
493 mutex_unlock(&priv->lock);
494 return false;
495 }
496
497 if (priv->eee_active && !priv->eee_enabled) {
498 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
499 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
500 eee_tw_timer);
501 if (priv->hw->xpcs)
502 xpcs_config_eee(priv->hw->xpcs,
503 priv->plat->mult_fact_100ns,
504 true);
505 }
506
507 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
508 del_timer_sync(&priv->eee_ctrl_timer);
509 priv->tx_path_in_lpi_mode = false;
510 stmmac_lpi_entry_timer_config(priv, 1);
511 } else {
512 stmmac_lpi_entry_timer_config(priv, 0);
513 mod_timer(&priv->eee_ctrl_timer,
514 STMMAC_LPI_T(priv->tx_lpi_timer));
515 }
516
517 mutex_unlock(&priv->lock);
518 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
519 return true;
520}
521
522/* stmmac_get_tx_hwtstamp - get HW TX timestamps
523 * @priv: driver private structure
524 * @p : descriptor pointer
525 * @skb : the socket buffer
526 * Description :
527 * This function will read timestamp from the descriptor & pass it to stack.
528 * and also perform some sanity checks.
529 */
530static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
531 struct dma_desc *p, struct sk_buff *skb)
532{
533 struct skb_shared_hwtstamps shhwtstamp;
534 bool found = false;
535 u64 ns = 0;
536
537 if (!priv->hwts_tx_en)
538 return;
539
540 /* exit if skb doesn't support hw tstamp */
541 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
542 return;
543
544 /* check tx tstamp status */
545 if (stmmac_get_tx_timestamp_status(priv, p)) {
546 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
547 found = true;
548 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
549 found = true;
550 }
551
552 if (found) {
553 ns -= priv->plat->cdc_error_adj;
554
555 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
556 shhwtstamp.hwtstamp = ns_to_ktime(ns);
557
558 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
559 /* pass tstamp to stack */
560 skb_tstamp_tx(skb, &shhwtstamp);
561 }
562}
563
564/* stmmac_get_rx_hwtstamp - get HW RX timestamps
565 * @priv: driver private structure
566 * @p : descriptor pointer
567 * @np : next descriptor pointer
568 * @skb : the socket buffer
569 * Description :
570 * This function will read received packet's timestamp from the descriptor
571 * and pass it to stack. It also perform some sanity checks.
572 */
573static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
574 struct dma_desc *np, struct sk_buff *skb)
575{
576 struct skb_shared_hwtstamps *shhwtstamp = NULL;
577 struct dma_desc *desc = p;
578 u64 ns = 0;
579
580 if (!priv->hwts_rx_en)
581 return;
582 /* For GMAC4, the valid timestamp is from CTX next desc. */
583 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
584 desc = np;
585
586 /* Check if timestamp is available */
587 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
588 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
589
590 ns -= priv->plat->cdc_error_adj;
591
592 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
593 shhwtstamp = skb_hwtstamps(skb);
594 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
595 shhwtstamp->hwtstamp = ns_to_ktime(ns);
596 } else {
597 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
598 }
599}
600
601/**
602 * stmmac_hwtstamp_set - control hardware timestamping.
603 * @dev: device pointer.
604 * @ifr: An IOCTL specific structure, that can contain a pointer to
605 * a proprietary structure used to pass information to the driver.
606 * Description:
607 * This function configures the MAC to enable/disable both outgoing(TX)
608 * and incoming(RX) packets time stamping based on user input.
609 * Return Value:
610 * 0 on success and an appropriate -ve integer on failure.
611 */
612static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
613{
614 struct stmmac_priv *priv = netdev_priv(dev);
615 struct hwtstamp_config config;
616 u32 ptp_v2 = 0;
617 u32 tstamp_all = 0;
618 u32 ptp_over_ipv4_udp = 0;
619 u32 ptp_over_ipv6_udp = 0;
620 u32 ptp_over_ethernet = 0;
621 u32 snap_type_sel = 0;
622 u32 ts_master_en = 0;
623 u32 ts_event_en = 0;
624
625 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
626 netdev_alert(priv->dev, "No support for HW time stamping\n");
627 priv->hwts_tx_en = 0;
628 priv->hwts_rx_en = 0;
629
630 return -EOPNOTSUPP;
631 }
632
633 if (copy_from_user(&config, ifr->ifr_data,
634 sizeof(config)))
635 return -EFAULT;
636
637 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
638 __func__, config.flags, config.tx_type, config.rx_filter);
639
640 if (config.tx_type != HWTSTAMP_TX_OFF &&
641 config.tx_type != HWTSTAMP_TX_ON)
642 return -ERANGE;
643
644 if (priv->adv_ts) {
645 switch (config.rx_filter) {
646 case HWTSTAMP_FILTER_NONE:
647 /* time stamp no incoming packet at all */
648 config.rx_filter = HWTSTAMP_FILTER_NONE;
649 break;
650
651 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
652 /* PTP v1, UDP, any kind of event packet */
653 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
654 /* 'xmac' hardware can support Sync, Pdelay_Req and
655 * Pdelay_resp by setting bit14 and bits17/16 to 01
656 * This leaves Delay_Req timestamps out.
657 * Enable all events *and* general purpose message
658 * timestamping
659 */
660 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
661 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 break;
664
665 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
666 /* PTP v1, UDP, Sync packet */
667 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
668 /* take time stamp for SYNC messages only */
669 ts_event_en = PTP_TCR_TSEVNTENA;
670
671 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 break;
674
675 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
676 /* PTP v1, UDP, Delay_req packet */
677 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
678 /* take time stamp for Delay_Req messages only */
679 ts_master_en = PTP_TCR_TSMSTRENA;
680 ts_event_en = PTP_TCR_TSEVNTENA;
681
682 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684 break;
685
686 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
687 /* PTP v2, UDP, any kind of event packet */
688 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
689 ptp_v2 = PTP_TCR_TSVER2ENA;
690 /* take time stamp for all event messages */
691 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
692
693 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695 break;
696
697 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
698 /* PTP v2, UDP, Sync packet */
699 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
700 ptp_v2 = PTP_TCR_TSVER2ENA;
701 /* take time stamp for SYNC messages only */
702 ts_event_en = PTP_TCR_TSEVNTENA;
703
704 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 break;
707
708 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
709 /* PTP v2, UDP, Delay_req packet */
710 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
711 ptp_v2 = PTP_TCR_TSVER2ENA;
712 /* take time stamp for Delay_Req messages only */
713 ts_master_en = PTP_TCR_TSMSTRENA;
714 ts_event_en = PTP_TCR_TSEVNTENA;
715
716 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
717 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
718 break;
719
720 case HWTSTAMP_FILTER_PTP_V2_EVENT:
721 /* PTP v2/802.AS1 any layer, any kind of event packet */
722 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
723 ptp_v2 = PTP_TCR_TSVER2ENA;
724 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725 if (priv->synopsys_id < DWMAC_CORE_4_10)
726 ts_event_en = PTP_TCR_TSEVNTENA;
727 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729 ptp_over_ethernet = PTP_TCR_TSIPENA;
730 break;
731
732 case HWTSTAMP_FILTER_PTP_V2_SYNC:
733 /* PTP v2/802.AS1, any layer, Sync packet */
734 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
735 ptp_v2 = PTP_TCR_TSVER2ENA;
736 /* take time stamp for SYNC messages only */
737 ts_event_en = PTP_TCR_TSEVNTENA;
738
739 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
740 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
741 ptp_over_ethernet = PTP_TCR_TSIPENA;
742 break;
743
744 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
745 /* PTP v2/802.AS1, any layer, Delay_req packet */
746 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
747 ptp_v2 = PTP_TCR_TSVER2ENA;
748 /* take time stamp for Delay_Req messages only */
749 ts_master_en = PTP_TCR_TSMSTRENA;
750 ts_event_en = PTP_TCR_TSEVNTENA;
751
752 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
753 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
754 ptp_over_ethernet = PTP_TCR_TSIPENA;
755 break;
756
757 case HWTSTAMP_FILTER_NTP_ALL:
758 case HWTSTAMP_FILTER_ALL:
759 /* time stamp any incoming packet */
760 config.rx_filter = HWTSTAMP_FILTER_ALL;
761 tstamp_all = PTP_TCR_TSENALL;
762 break;
763
764 default:
765 return -ERANGE;
766 }
767 } else {
768 switch (config.rx_filter) {
769 case HWTSTAMP_FILTER_NONE:
770 config.rx_filter = HWTSTAMP_FILTER_NONE;
771 break;
772 default:
773 /* PTP v1, UDP, any kind of event packet */
774 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
775 break;
776 }
777 }
778 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
779 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
780
781 priv->systime_flags = STMMAC_HWTS_ACTIVE;
782
783 if (priv->hwts_tx_en || priv->hwts_rx_en) {
784 priv->systime_flags |= tstamp_all | ptp_v2 |
785 ptp_over_ethernet | ptp_over_ipv6_udp |
786 ptp_over_ipv4_udp | ts_event_en |
787 ts_master_en | snap_type_sel;
788 }
789
790 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
791
792 memcpy(&priv->tstamp_config, &config, sizeof(config));
793
794 return copy_to_user(ifr->ifr_data, &config,
795 sizeof(config)) ? -EFAULT : 0;
796}
797
798/**
799 * stmmac_hwtstamp_get - read hardware timestamping.
800 * @dev: device pointer.
801 * @ifr: An IOCTL specific structure, that can contain a pointer to
802 * a proprietary structure used to pass information to the driver.
803 * Description:
804 * This function obtain the current hardware timestamping settings
805 * as requested.
806 */
807static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
808{
809 struct stmmac_priv *priv = netdev_priv(dev);
810 struct hwtstamp_config *config = &priv->tstamp_config;
811
812 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
813 return -EOPNOTSUPP;
814
815 return copy_to_user(ifr->ifr_data, config,
816 sizeof(*config)) ? -EFAULT : 0;
817}
818
819/**
820 * stmmac_init_tstamp_counter - init hardware timestamping counter
821 * @priv: driver private structure
822 * @systime_flags: timestamping flags
823 * Description:
824 * Initialize hardware counter for packet timestamping.
825 * This is valid as long as the interface is open and not suspended.
826 * Will be rerun after resuming from suspend, case in which the timestamping
827 * flags updated by stmmac_hwtstamp_set() also need to be restored.
828 */
829int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
830{
831 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
832 struct timespec64 now;
833 u32 sec_inc = 0;
834 u64 temp = 0;
835
836 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
837 return -EOPNOTSUPP;
838
839 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
840 priv->systime_flags = systime_flags;
841
842 /* program Sub Second Increment reg */
843 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
844 priv->plat->clk_ptp_rate,
845 xmac, &sec_inc);
846 temp = div_u64(1000000000ULL, sec_inc);
847
848 /* Store sub second increment for later use */
849 priv->sub_second_inc = sec_inc;
850
851 /* calculate default added value:
852 * formula is :
853 * addend = (2^32)/freq_div_ratio;
854 * where, freq_div_ratio = 1e9ns/sec_inc
855 */
856 temp = (u64)(temp << 32);
857 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
858 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
859
860 /* initialize system time */
861 ktime_get_real_ts64(&now);
862
863 /* lower 32 bits of tv_sec are safe until y2106 */
864 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
865
866 return 0;
867}
868EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
869
870/**
871 * stmmac_init_ptp - init PTP
872 * @priv: driver private structure
873 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
874 * This is done by looking at the HW cap. register.
875 * This function also registers the ptp driver.
876 */
877static int stmmac_init_ptp(struct stmmac_priv *priv)
878{
879 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
880 int ret;
881
882 if (priv->plat->ptp_clk_freq_config)
883 priv->plat->ptp_clk_freq_config(priv);
884
885 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
886 if (ret)
887 return ret;
888
889 priv->adv_ts = 0;
890 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
891 if (xmac && priv->dma_cap.atime_stamp)
892 priv->adv_ts = 1;
893 /* Dwmac 3.x core with extend_desc can support adv_ts */
894 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
895 priv->adv_ts = 1;
896
897 if (priv->dma_cap.time_stamp)
898 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
899
900 if (priv->adv_ts)
901 netdev_info(priv->dev,
902 "IEEE 1588-2008 Advanced Timestamp supported\n");
903
904 priv->hwts_tx_en = 0;
905 priv->hwts_rx_en = 0;
906
907 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
908 stmmac_hwtstamp_correct_latency(priv, priv);
909
910 return 0;
911}
912
913static void stmmac_release_ptp(struct stmmac_priv *priv)
914{
915 clk_disable_unprepare(priv->plat->clk_ptp_ref);
916 stmmac_ptp_unregister(priv);
917}
918
919/**
920 * stmmac_mac_flow_ctrl - Configure flow control in all queues
921 * @priv: driver private structure
922 * @duplex: duplex passed to the next function
923 * Description: It is used for configuring the flow control in all queues
924 */
925static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
926{
927 u32 tx_cnt = priv->plat->tx_queues_to_use;
928
929 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
930 priv->pause, tx_cnt);
931}
932
933static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
934 phy_interface_t interface)
935{
936 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
937
938 /* Refresh the MAC-specific capabilities */
939 stmmac_mac_update_caps(priv);
940
941 config->mac_capabilities = priv->hw->link.caps;
942
943 if (priv->plat->max_speed)
944 phylink_limit_mac_speed(config, priv->plat->max_speed);
945
946 return config->mac_capabilities;
947}
948
949static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
950 phy_interface_t interface)
951{
952 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
953 struct phylink_pcs *pcs;
954
955 if (priv->plat->select_pcs) {
956 pcs = priv->plat->select_pcs(priv, interface);
957 if (!IS_ERR(pcs))
958 return pcs;
959 }
960
961 return NULL;
962}
963
964static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
965 const struct phylink_link_state *state)
966{
967 /* Nothing to do, xpcs_config() handles everything */
968}
969
970static void stmmac_mac_link_down(struct phylink_config *config,
971 unsigned int mode, phy_interface_t interface)
972{
973 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
974
975 stmmac_mac_set(priv, priv->ioaddr, false);
976 priv->eee_active = false;
977 priv->tx_lpi_enabled = false;
978 priv->eee_enabled = stmmac_eee_init(priv);
979 stmmac_set_eee_pls(priv, priv->hw, false);
980
981 if (stmmac_fpe_supported(priv))
982 stmmac_fpe_link_state_handle(priv, false);
983}
984
985static void stmmac_mac_link_up(struct phylink_config *config,
986 struct phy_device *phy,
987 unsigned int mode, phy_interface_t interface,
988 int speed, int duplex,
989 bool tx_pause, bool rx_pause)
990{
991 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992 u32 old_ctrl, ctrl;
993
994 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
995 priv->plat->serdes_powerup)
996 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
997
998 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
999 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1000
1001 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1002 switch (speed) {
1003 case SPEED_10000:
1004 ctrl |= priv->hw->link.xgmii.speed10000;
1005 break;
1006 case SPEED_5000:
1007 ctrl |= priv->hw->link.xgmii.speed5000;
1008 break;
1009 case SPEED_2500:
1010 ctrl |= priv->hw->link.xgmii.speed2500;
1011 break;
1012 default:
1013 return;
1014 }
1015 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1016 switch (speed) {
1017 case SPEED_100000:
1018 ctrl |= priv->hw->link.xlgmii.speed100000;
1019 break;
1020 case SPEED_50000:
1021 ctrl |= priv->hw->link.xlgmii.speed50000;
1022 break;
1023 case SPEED_40000:
1024 ctrl |= priv->hw->link.xlgmii.speed40000;
1025 break;
1026 case SPEED_25000:
1027 ctrl |= priv->hw->link.xlgmii.speed25000;
1028 break;
1029 case SPEED_10000:
1030 ctrl |= priv->hw->link.xgmii.speed10000;
1031 break;
1032 case SPEED_2500:
1033 ctrl |= priv->hw->link.speed2500;
1034 break;
1035 case SPEED_1000:
1036 ctrl |= priv->hw->link.speed1000;
1037 break;
1038 default:
1039 return;
1040 }
1041 } else {
1042 switch (speed) {
1043 case SPEED_2500:
1044 ctrl |= priv->hw->link.speed2500;
1045 break;
1046 case SPEED_1000:
1047 ctrl |= priv->hw->link.speed1000;
1048 break;
1049 case SPEED_100:
1050 ctrl |= priv->hw->link.speed100;
1051 break;
1052 case SPEED_10:
1053 ctrl |= priv->hw->link.speed10;
1054 break;
1055 default:
1056 return;
1057 }
1058 }
1059
1060 priv->speed = speed;
1061
1062 if (priv->plat->fix_mac_speed)
1063 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1064
1065 if (!duplex)
1066 ctrl &= ~priv->hw->link.duplex;
1067 else
1068 ctrl |= priv->hw->link.duplex;
1069
1070 /* Flow Control operation */
1071 if (rx_pause && tx_pause)
1072 priv->flow_ctrl = FLOW_AUTO;
1073 else if (rx_pause && !tx_pause)
1074 priv->flow_ctrl = FLOW_RX;
1075 else if (!rx_pause && tx_pause)
1076 priv->flow_ctrl = FLOW_TX;
1077 else
1078 priv->flow_ctrl = FLOW_OFF;
1079
1080 stmmac_mac_flow_ctrl(priv, duplex);
1081
1082 if (ctrl != old_ctrl)
1083 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084
1085 stmmac_mac_set(priv, priv->ioaddr, true);
1086 if (phy && priv->dma_cap.eee) {
1087 priv->eee_active =
1088 phy_init_eee(phy, !(priv->plat->flags &
1089 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1090 priv->eee_enabled = stmmac_eee_init(priv);
1091 priv->tx_lpi_enabled = priv->eee_enabled;
1092 stmmac_set_eee_pls(priv, priv->hw, true);
1093 }
1094
1095 if (stmmac_fpe_supported(priv))
1096 stmmac_fpe_link_state_handle(priv, true);
1097
1098 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1099 stmmac_hwtstamp_correct_latency(priv, priv);
1100}
1101
1102static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103 .mac_get_caps = stmmac_mac_get_caps,
1104 .mac_select_pcs = stmmac_mac_select_pcs,
1105 .mac_config = stmmac_mac_config,
1106 .mac_link_down = stmmac_mac_link_down,
1107 .mac_link_up = stmmac_mac_link_up,
1108};
1109
1110/**
1111 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1112 * @priv: driver private structure
1113 * Description: this is to verify if the HW supports the PCS.
1114 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1115 * configured for the TBI, RTBI, or SGMII PHY interface.
1116 */
1117static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1118{
1119 int interface = priv->plat->mac_interface;
1120
1121 if (priv->dma_cap.pcs) {
1122 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1123 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1124 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1125 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1126 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1127 priv->hw->pcs = STMMAC_PCS_RGMII;
1128 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1129 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1130 priv->hw->pcs = STMMAC_PCS_SGMII;
1131 }
1132 }
1133}
1134
1135/**
1136 * stmmac_init_phy - PHY initialization
1137 * @dev: net device structure
1138 * Description: it initializes the driver's PHY state, and attaches the PHY
1139 * to the mac driver.
1140 * Return value:
1141 * 0 on success
1142 */
1143static int stmmac_init_phy(struct net_device *dev)
1144{
1145 struct stmmac_priv *priv = netdev_priv(dev);
1146 struct fwnode_handle *phy_fwnode;
1147 struct fwnode_handle *fwnode;
1148 int ret;
1149
1150 if (!phylink_expects_phy(priv->phylink))
1151 return 0;
1152
1153 fwnode = priv->plat->port_node;
1154 if (!fwnode)
1155 fwnode = dev_fwnode(priv->device);
1156
1157 if (fwnode)
1158 phy_fwnode = fwnode_get_phy_node(fwnode);
1159 else
1160 phy_fwnode = NULL;
1161
1162 /* Some DT bindings do not set-up the PHY handle. Let's try to
1163 * manually parse it
1164 */
1165 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1166 int addr = priv->plat->phy_addr;
1167 struct phy_device *phydev;
1168
1169 if (addr < 0) {
1170 netdev_err(priv->dev, "no phy found\n");
1171 return -ENODEV;
1172 }
1173
1174 phydev = mdiobus_get_phy(priv->mii, addr);
1175 if (!phydev) {
1176 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1177 return -ENODEV;
1178 }
1179
1180 if (priv->dma_cap.eee)
1181 phy_support_eee(phydev);
1182
1183 ret = phylink_connect_phy(priv->phylink, phydev);
1184 } else {
1185 fwnode_handle_put(phy_fwnode);
1186 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 }
1188
1189 if (!priv->plat->pmt) {
1190 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191
1192 phylink_ethtool_get_wol(priv->phylink, &wol);
1193 device_set_wakeup_capable(priv->device, !!wol.supported);
1194 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 }
1196
1197 return ret;
1198}
1199
1200static int stmmac_phy_setup(struct stmmac_priv *priv)
1201{
1202 struct stmmac_mdio_bus_data *mdio_bus_data;
1203 int mode = priv->plat->phy_interface;
1204 struct fwnode_handle *fwnode;
1205 struct phylink *phylink;
1206
1207 priv->phylink_config.dev = &priv->dev->dev;
1208 priv->phylink_config.type = PHYLINK_NETDEV;
1209 priv->phylink_config.mac_managed_pm = true;
1210
1211 /* Stmmac always requires an RX clock for hardware initialization */
1212 priv->phylink_config.mac_requires_rxc = true;
1213
1214 mdio_bus_data = priv->plat->mdio_bus_data;
1215 if (mdio_bus_data)
1216 priv->phylink_config.default_an_inband =
1217 mdio_bus_data->default_an_inband;
1218
1219 /* Set the platform/firmware specified interface mode. Note, phylink
1220 * deals with the PHY interface mode, not the MAC interface mode.
1221 */
1222 __set_bit(mode, priv->phylink_config.supported_interfaces);
1223
1224 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1225 if (priv->hw->xpcs)
1226 xpcs_get_interfaces(priv->hw->xpcs,
1227 priv->phylink_config.supported_interfaces);
1228
1229 fwnode = priv->plat->port_node;
1230 if (!fwnode)
1231 fwnode = dev_fwnode(priv->device);
1232
1233 phylink = phylink_create(&priv->phylink_config, fwnode,
1234 mode, &stmmac_phylink_mac_ops);
1235 if (IS_ERR(phylink))
1236 return PTR_ERR(phylink);
1237
1238 priv->phylink = phylink;
1239 return 0;
1240}
1241
1242static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1243 struct stmmac_dma_conf *dma_conf)
1244{
1245 u32 rx_cnt = priv->plat->rx_queues_to_use;
1246 unsigned int desc_size;
1247 void *head_rx;
1248 u32 queue;
1249
1250 /* Display RX rings */
1251 for (queue = 0; queue < rx_cnt; queue++) {
1252 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1253
1254 pr_info("\tRX Queue %u rings\n", queue);
1255
1256 if (priv->extend_desc) {
1257 head_rx = (void *)rx_q->dma_erx;
1258 desc_size = sizeof(struct dma_extended_desc);
1259 } else {
1260 head_rx = (void *)rx_q->dma_rx;
1261 desc_size = sizeof(struct dma_desc);
1262 }
1263
1264 /* Display RX ring */
1265 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1266 rx_q->dma_rx_phy, desc_size);
1267 }
1268}
1269
1270static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1271 struct stmmac_dma_conf *dma_conf)
1272{
1273 u32 tx_cnt = priv->plat->tx_queues_to_use;
1274 unsigned int desc_size;
1275 void *head_tx;
1276 u32 queue;
1277
1278 /* Display TX rings */
1279 for (queue = 0; queue < tx_cnt; queue++) {
1280 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1281
1282 pr_info("\tTX Queue %d rings\n", queue);
1283
1284 if (priv->extend_desc) {
1285 head_tx = (void *)tx_q->dma_etx;
1286 desc_size = sizeof(struct dma_extended_desc);
1287 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1288 head_tx = (void *)tx_q->dma_entx;
1289 desc_size = sizeof(struct dma_edesc);
1290 } else {
1291 head_tx = (void *)tx_q->dma_tx;
1292 desc_size = sizeof(struct dma_desc);
1293 }
1294
1295 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1296 tx_q->dma_tx_phy, desc_size);
1297 }
1298}
1299
1300static void stmmac_display_rings(struct stmmac_priv *priv,
1301 struct stmmac_dma_conf *dma_conf)
1302{
1303 /* Display RX ring */
1304 stmmac_display_rx_rings(priv, dma_conf);
1305
1306 /* Display TX ring */
1307 stmmac_display_tx_rings(priv, dma_conf);
1308}
1309
1310static int stmmac_set_bfsize(int mtu, int bufsize)
1311{
1312 int ret = bufsize;
1313
1314 if (mtu >= BUF_SIZE_8KiB)
1315 ret = BUF_SIZE_16KiB;
1316 else if (mtu >= BUF_SIZE_4KiB)
1317 ret = BUF_SIZE_8KiB;
1318 else if (mtu >= BUF_SIZE_2KiB)
1319 ret = BUF_SIZE_4KiB;
1320 else if (mtu > DEFAULT_BUFSIZE)
1321 ret = BUF_SIZE_2KiB;
1322 else
1323 ret = DEFAULT_BUFSIZE;
1324
1325 return ret;
1326}
1327
1328/**
1329 * stmmac_clear_rx_descriptors - clear RX descriptors
1330 * @priv: driver private structure
1331 * @dma_conf: structure to take the dma data
1332 * @queue: RX queue index
1333 * Description: this function is called to clear the RX descriptors
1334 * in case of both basic and extended descriptors are used.
1335 */
1336static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1337 struct stmmac_dma_conf *dma_conf,
1338 u32 queue)
1339{
1340 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1341 int i;
1342
1343 /* Clear the RX descriptors */
1344 for (i = 0; i < dma_conf->dma_rx_size; i++)
1345 if (priv->extend_desc)
1346 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1347 priv->use_riwt, priv->mode,
1348 (i == dma_conf->dma_rx_size - 1),
1349 dma_conf->dma_buf_sz);
1350 else
1351 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1352 priv->use_riwt, priv->mode,
1353 (i == dma_conf->dma_rx_size - 1),
1354 dma_conf->dma_buf_sz);
1355}
1356
1357/**
1358 * stmmac_clear_tx_descriptors - clear tx descriptors
1359 * @priv: driver private structure
1360 * @dma_conf: structure to take the dma data
1361 * @queue: TX queue index.
1362 * Description: this function is called to clear the TX descriptors
1363 * in case of both basic and extended descriptors are used.
1364 */
1365static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1366 struct stmmac_dma_conf *dma_conf,
1367 u32 queue)
1368{
1369 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1370 int i;
1371
1372 /* Clear the TX descriptors */
1373 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1374 int last = (i == (dma_conf->dma_tx_size - 1));
1375 struct dma_desc *p;
1376
1377 if (priv->extend_desc)
1378 p = &tx_q->dma_etx[i].basic;
1379 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1380 p = &tx_q->dma_entx[i].basic;
1381 else
1382 p = &tx_q->dma_tx[i];
1383
1384 stmmac_init_tx_desc(priv, p, priv->mode, last);
1385 }
1386}
1387
1388/**
1389 * stmmac_clear_descriptors - clear descriptors
1390 * @priv: driver private structure
1391 * @dma_conf: structure to take the dma data
1392 * Description: this function is called to clear the TX and RX descriptors
1393 * in case of both basic and extended descriptors are used.
1394 */
1395static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1396 struct stmmac_dma_conf *dma_conf)
1397{
1398 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1399 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1400 u32 queue;
1401
1402 /* Clear the RX descriptors */
1403 for (queue = 0; queue < rx_queue_cnt; queue++)
1404 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1405
1406 /* Clear the TX descriptors */
1407 for (queue = 0; queue < tx_queue_cnt; queue++)
1408 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1409}
1410
1411/**
1412 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * @p: descriptor pointer
1416 * @i: descriptor index
1417 * @flags: gfp flag
1418 * @queue: RX queue index
1419 * Description: this function is called to allocate a receive buffer, perform
1420 * the DMA mapping and init the descriptor.
1421 */
1422static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1423 struct stmmac_dma_conf *dma_conf,
1424 struct dma_desc *p,
1425 int i, gfp_t flags, u32 queue)
1426{
1427 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1428 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1429 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1430
1431 if (priv->dma_cap.host_dma_width <= 32)
1432 gfp |= GFP_DMA32;
1433
1434 if (!buf->page) {
1435 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1436 if (!buf->page)
1437 return -ENOMEM;
1438 buf->page_offset = stmmac_rx_offset(priv);
1439 }
1440
1441 if (priv->sph && !buf->sec_page) {
1442 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1443 if (!buf->sec_page)
1444 return -ENOMEM;
1445
1446 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1447 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1448 } else {
1449 buf->sec_page = NULL;
1450 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1451 }
1452
1453 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1454
1455 stmmac_set_desc_addr(priv, p, buf->addr);
1456 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1457 stmmac_init_desc3(priv, p);
1458
1459 return 0;
1460}
1461
1462/**
1463 * stmmac_free_rx_buffer - free RX dma buffers
1464 * @priv: private structure
1465 * @rx_q: RX queue
1466 * @i: buffer index.
1467 */
1468static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1469 struct stmmac_rx_queue *rx_q,
1470 int i)
1471{
1472 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1473
1474 if (buf->page)
1475 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1476 buf->page = NULL;
1477
1478 if (buf->sec_page)
1479 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1480 buf->sec_page = NULL;
1481}
1482
1483/**
1484 * stmmac_free_tx_buffer - free RX dma buffers
1485 * @priv: private structure
1486 * @dma_conf: structure to take the dma data
1487 * @queue: RX queue index
1488 * @i: buffer index.
1489 */
1490static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1491 struct stmmac_dma_conf *dma_conf,
1492 u32 queue, int i)
1493{
1494 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1495
1496 if (tx_q->tx_skbuff_dma[i].buf &&
1497 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1498 if (tx_q->tx_skbuff_dma[i].map_as_page)
1499 dma_unmap_page(priv->device,
1500 tx_q->tx_skbuff_dma[i].buf,
1501 tx_q->tx_skbuff_dma[i].len,
1502 DMA_TO_DEVICE);
1503 else
1504 dma_unmap_single(priv->device,
1505 tx_q->tx_skbuff_dma[i].buf,
1506 tx_q->tx_skbuff_dma[i].len,
1507 DMA_TO_DEVICE);
1508 }
1509
1510 if (tx_q->xdpf[i] &&
1511 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1512 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1513 xdp_return_frame(tx_q->xdpf[i]);
1514 tx_q->xdpf[i] = NULL;
1515 }
1516
1517 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1518 tx_q->xsk_frames_done++;
1519
1520 if (tx_q->tx_skbuff[i] &&
1521 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1522 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1523 tx_q->tx_skbuff[i] = NULL;
1524 }
1525
1526 tx_q->tx_skbuff_dma[i].buf = 0;
1527 tx_q->tx_skbuff_dma[i].map_as_page = false;
1528}
1529
1530/**
1531 * dma_free_rx_skbufs - free RX dma buffers
1532 * @priv: private structure
1533 * @dma_conf: structure to take the dma data
1534 * @queue: RX queue index
1535 */
1536static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1537 struct stmmac_dma_conf *dma_conf,
1538 u32 queue)
1539{
1540 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1541 int i;
1542
1543 for (i = 0; i < dma_conf->dma_rx_size; i++)
1544 stmmac_free_rx_buffer(priv, rx_q, i);
1545}
1546
1547static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1548 struct stmmac_dma_conf *dma_conf,
1549 u32 queue, gfp_t flags)
1550{
1551 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1552 int i;
1553
1554 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1555 struct dma_desc *p;
1556 int ret;
1557
1558 if (priv->extend_desc)
1559 p = &((rx_q->dma_erx + i)->basic);
1560 else
1561 p = rx_q->dma_rx + i;
1562
1563 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1564 queue);
1565 if (ret)
1566 return ret;
1567
1568 rx_q->buf_alloc_num++;
1569 }
1570
1571 return 0;
1572}
1573
1574/**
1575 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1576 * @priv: private structure
1577 * @dma_conf: structure to take the dma data
1578 * @queue: RX queue index
1579 */
1580static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1581 struct stmmac_dma_conf *dma_conf,
1582 u32 queue)
1583{
1584 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1585 int i;
1586
1587 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1589
1590 if (!buf->xdp)
1591 continue;
1592
1593 xsk_buff_free(buf->xdp);
1594 buf->xdp = NULL;
1595 }
1596}
1597
1598static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1599 struct stmmac_dma_conf *dma_conf,
1600 u32 queue)
1601{
1602 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1603 int i;
1604
1605 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1606 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1607 * use this macro to make sure no size violations.
1608 */
1609 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1610
1611 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1612 struct stmmac_rx_buffer *buf;
1613 dma_addr_t dma_addr;
1614 struct dma_desc *p;
1615
1616 if (priv->extend_desc)
1617 p = (struct dma_desc *)(rx_q->dma_erx + i);
1618 else
1619 p = rx_q->dma_rx + i;
1620
1621 buf = &rx_q->buf_pool[i];
1622
1623 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1624 if (!buf->xdp)
1625 return -ENOMEM;
1626
1627 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1628 stmmac_set_desc_addr(priv, p, dma_addr);
1629 rx_q->buf_alloc_num++;
1630 }
1631
1632 return 0;
1633}
1634
1635static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1636{
1637 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1638 return NULL;
1639
1640 return xsk_get_pool_from_qid(priv->dev, queue);
1641}
1642
1643/**
1644 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1645 * @priv: driver private structure
1646 * @dma_conf: structure to take the dma data
1647 * @queue: RX queue index
1648 * @flags: gfp flag.
1649 * Description: this function initializes the DMA RX descriptors
1650 * and allocates the socket buffers. It supports the chained and ring
1651 * modes.
1652 */
1653static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1654 struct stmmac_dma_conf *dma_conf,
1655 u32 queue, gfp_t flags)
1656{
1657 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1658 int ret;
1659
1660 netif_dbg(priv, probe, priv->dev,
1661 "(%s) dma_rx_phy=0x%08x\n", __func__,
1662 (u32)rx_q->dma_rx_phy);
1663
1664 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1665
1666 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1667
1668 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1669
1670 if (rx_q->xsk_pool) {
1671 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1672 MEM_TYPE_XSK_BUFF_POOL,
1673 NULL));
1674 netdev_info(priv->dev,
1675 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1676 rx_q->queue_index);
1677 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1678 } else {
1679 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 MEM_TYPE_PAGE_POOL,
1681 rx_q->page_pool));
1682 netdev_info(priv->dev,
1683 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1684 rx_q->queue_index);
1685 }
1686
1687 if (rx_q->xsk_pool) {
1688 /* RX XDP ZC buffer pool may not be populated, e.g.
1689 * xdpsock TX-only.
1690 */
1691 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1692 } else {
1693 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1694 if (ret < 0)
1695 return -ENOMEM;
1696 }
1697
1698 /* Setup the chained descriptor addresses */
1699 if (priv->mode == STMMAC_CHAIN_MODE) {
1700 if (priv->extend_desc)
1701 stmmac_mode_init(priv, rx_q->dma_erx,
1702 rx_q->dma_rx_phy,
1703 dma_conf->dma_rx_size, 1);
1704 else
1705 stmmac_mode_init(priv, rx_q->dma_rx,
1706 rx_q->dma_rx_phy,
1707 dma_conf->dma_rx_size, 0);
1708 }
1709
1710 return 0;
1711}
1712
1713static int init_dma_rx_desc_rings(struct net_device *dev,
1714 struct stmmac_dma_conf *dma_conf,
1715 gfp_t flags)
1716{
1717 struct stmmac_priv *priv = netdev_priv(dev);
1718 u32 rx_count = priv->plat->rx_queues_to_use;
1719 int queue;
1720 int ret;
1721
1722 /* RX INITIALIZATION */
1723 netif_dbg(priv, probe, priv->dev,
1724 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1725
1726 for (queue = 0; queue < rx_count; queue++) {
1727 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1728 if (ret)
1729 goto err_init_rx_buffers;
1730 }
1731
1732 return 0;
1733
1734err_init_rx_buffers:
1735 while (queue >= 0) {
1736 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1737
1738 if (rx_q->xsk_pool)
1739 dma_free_rx_xskbufs(priv, dma_conf, queue);
1740 else
1741 dma_free_rx_skbufs(priv, dma_conf, queue);
1742
1743 rx_q->buf_alloc_num = 0;
1744 rx_q->xsk_pool = NULL;
1745
1746 queue--;
1747 }
1748
1749 return ret;
1750}
1751
1752/**
1753 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1754 * @priv: driver private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: TX queue index
1757 * Description: this function initializes the DMA TX descriptors
1758 * and allocates the socket buffers. It supports the chained and ring
1759 * modes.
1760 */
1761static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1762 struct stmmac_dma_conf *dma_conf,
1763 u32 queue)
1764{
1765 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1766 int i;
1767
1768 netif_dbg(priv, probe, priv->dev,
1769 "(%s) dma_tx_phy=0x%08x\n", __func__,
1770 (u32)tx_q->dma_tx_phy);
1771
1772 /* Setup the chained descriptor addresses */
1773 if (priv->mode == STMMAC_CHAIN_MODE) {
1774 if (priv->extend_desc)
1775 stmmac_mode_init(priv, tx_q->dma_etx,
1776 tx_q->dma_tx_phy,
1777 dma_conf->dma_tx_size, 1);
1778 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1779 stmmac_mode_init(priv, tx_q->dma_tx,
1780 tx_q->dma_tx_phy,
1781 dma_conf->dma_tx_size, 0);
1782 }
1783
1784 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1785
1786 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1787 struct dma_desc *p;
1788
1789 if (priv->extend_desc)
1790 p = &((tx_q->dma_etx + i)->basic);
1791 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1792 p = &((tx_q->dma_entx + i)->basic);
1793 else
1794 p = tx_q->dma_tx + i;
1795
1796 stmmac_clear_desc(priv, p);
1797
1798 tx_q->tx_skbuff_dma[i].buf = 0;
1799 tx_q->tx_skbuff_dma[i].map_as_page = false;
1800 tx_q->tx_skbuff_dma[i].len = 0;
1801 tx_q->tx_skbuff_dma[i].last_segment = false;
1802 tx_q->tx_skbuff[i] = NULL;
1803 }
1804
1805 return 0;
1806}
1807
1808static int init_dma_tx_desc_rings(struct net_device *dev,
1809 struct stmmac_dma_conf *dma_conf)
1810{
1811 struct stmmac_priv *priv = netdev_priv(dev);
1812 u32 tx_queue_cnt;
1813 u32 queue;
1814
1815 tx_queue_cnt = priv->plat->tx_queues_to_use;
1816
1817 for (queue = 0; queue < tx_queue_cnt; queue++)
1818 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1819
1820 return 0;
1821}
1822
1823/**
1824 * init_dma_desc_rings - init the RX/TX descriptor rings
1825 * @dev: net device structure
1826 * @dma_conf: structure to take the dma data
1827 * @flags: gfp flag.
1828 * Description: this function initializes the DMA RX/TX descriptors
1829 * and allocates the socket buffers. It supports the chained and ring
1830 * modes.
1831 */
1832static int init_dma_desc_rings(struct net_device *dev,
1833 struct stmmac_dma_conf *dma_conf,
1834 gfp_t flags)
1835{
1836 struct stmmac_priv *priv = netdev_priv(dev);
1837 int ret;
1838
1839 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1840 if (ret)
1841 return ret;
1842
1843 ret = init_dma_tx_desc_rings(dev, dma_conf);
1844
1845 stmmac_clear_descriptors(priv, dma_conf);
1846
1847 if (netif_msg_hw(priv))
1848 stmmac_display_rings(priv, dma_conf);
1849
1850 return ret;
1851}
1852
1853/**
1854 * dma_free_tx_skbufs - free TX dma buffers
1855 * @priv: private structure
1856 * @dma_conf: structure to take the dma data
1857 * @queue: TX queue index
1858 */
1859static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1860 struct stmmac_dma_conf *dma_conf,
1861 u32 queue)
1862{
1863 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1864 int i;
1865
1866 tx_q->xsk_frames_done = 0;
1867
1868 for (i = 0; i < dma_conf->dma_tx_size; i++)
1869 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1870
1871 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1872 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1873 tx_q->xsk_frames_done = 0;
1874 tx_q->xsk_pool = NULL;
1875 }
1876}
1877
1878/**
1879 * stmmac_free_tx_skbufs - free TX skb buffers
1880 * @priv: private structure
1881 */
1882static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1883{
1884 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1885 u32 queue;
1886
1887 for (queue = 0; queue < tx_queue_cnt; queue++)
1888 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1889}
1890
1891/**
1892 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1893 * @priv: private structure
1894 * @dma_conf: structure to take the dma data
1895 * @queue: RX queue index
1896 */
1897static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1898 struct stmmac_dma_conf *dma_conf,
1899 u32 queue)
1900{
1901 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1902
1903 /* Release the DMA RX socket buffers */
1904 if (rx_q->xsk_pool)
1905 dma_free_rx_xskbufs(priv, dma_conf, queue);
1906 else
1907 dma_free_rx_skbufs(priv, dma_conf, queue);
1908
1909 rx_q->buf_alloc_num = 0;
1910 rx_q->xsk_pool = NULL;
1911
1912 /* Free DMA regions of consistent memory previously allocated */
1913 if (!priv->extend_desc)
1914 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1915 sizeof(struct dma_desc),
1916 rx_q->dma_rx, rx_q->dma_rx_phy);
1917 else
1918 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1919 sizeof(struct dma_extended_desc),
1920 rx_q->dma_erx, rx_q->dma_rx_phy);
1921
1922 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1923 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1924
1925 kfree(rx_q->buf_pool);
1926 if (rx_q->page_pool)
1927 page_pool_destroy(rx_q->page_pool);
1928}
1929
1930static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1931 struct stmmac_dma_conf *dma_conf)
1932{
1933 u32 rx_count = priv->plat->rx_queues_to_use;
1934 u32 queue;
1935
1936 /* Free RX queue resources */
1937 for (queue = 0; queue < rx_count; queue++)
1938 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1939}
1940
1941/**
1942 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1943 * @priv: private structure
1944 * @dma_conf: structure to take the dma data
1945 * @queue: TX queue index
1946 */
1947static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1948 struct stmmac_dma_conf *dma_conf,
1949 u32 queue)
1950{
1951 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1952 size_t size;
1953 void *addr;
1954
1955 /* Release the DMA TX socket buffers */
1956 dma_free_tx_skbufs(priv, dma_conf, queue);
1957
1958 if (priv->extend_desc) {
1959 size = sizeof(struct dma_extended_desc);
1960 addr = tx_q->dma_etx;
1961 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1962 size = sizeof(struct dma_edesc);
1963 addr = tx_q->dma_entx;
1964 } else {
1965 size = sizeof(struct dma_desc);
1966 addr = tx_q->dma_tx;
1967 }
1968
1969 size *= dma_conf->dma_tx_size;
1970
1971 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1972
1973 kfree(tx_q->tx_skbuff_dma);
1974 kfree(tx_q->tx_skbuff);
1975}
1976
1977static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1978 struct stmmac_dma_conf *dma_conf)
1979{
1980 u32 tx_count = priv->plat->tx_queues_to_use;
1981 u32 queue;
1982
1983 /* Free TX queue resources */
1984 for (queue = 0; queue < tx_count; queue++)
1985 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1986}
1987
1988/**
1989 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990 * @priv: private structure
1991 * @dma_conf: structure to take the dma data
1992 * @queue: RX queue index
1993 * Description: according to which descriptor can be used (extend or basic)
1994 * this function allocates the resources for TX and RX paths. In case of
1995 * reception, for example, it pre-allocated the RX socket buffer in order to
1996 * allow zero-copy mechanism.
1997 */
1998static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1999 struct stmmac_dma_conf *dma_conf,
2000 u32 queue)
2001{
2002 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2003 struct stmmac_channel *ch = &priv->channel[queue];
2004 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2005 struct page_pool_params pp_params = { 0 };
2006 unsigned int num_pages;
2007 unsigned int napi_id;
2008 int ret;
2009
2010 rx_q->queue_index = queue;
2011 rx_q->priv_data = priv;
2012
2013 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2014 pp_params.pool_size = dma_conf->dma_rx_size;
2015 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2016 pp_params.order = ilog2(num_pages);
2017 pp_params.nid = dev_to_node(priv->device);
2018 pp_params.dev = priv->device;
2019 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2020 pp_params.offset = stmmac_rx_offset(priv);
2021 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2022
2023 rx_q->page_pool = page_pool_create(&pp_params);
2024 if (IS_ERR(rx_q->page_pool)) {
2025 ret = PTR_ERR(rx_q->page_pool);
2026 rx_q->page_pool = NULL;
2027 return ret;
2028 }
2029
2030 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2031 sizeof(*rx_q->buf_pool),
2032 GFP_KERNEL);
2033 if (!rx_q->buf_pool)
2034 return -ENOMEM;
2035
2036 if (priv->extend_desc) {
2037 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2038 dma_conf->dma_rx_size *
2039 sizeof(struct dma_extended_desc),
2040 &rx_q->dma_rx_phy,
2041 GFP_KERNEL);
2042 if (!rx_q->dma_erx)
2043 return -ENOMEM;
2044
2045 } else {
2046 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2047 dma_conf->dma_rx_size *
2048 sizeof(struct dma_desc),
2049 &rx_q->dma_rx_phy,
2050 GFP_KERNEL);
2051 if (!rx_q->dma_rx)
2052 return -ENOMEM;
2053 }
2054
2055 if (stmmac_xdp_is_enabled(priv) &&
2056 test_bit(queue, priv->af_xdp_zc_qps))
2057 napi_id = ch->rxtx_napi.napi_id;
2058 else
2059 napi_id = ch->rx_napi.napi_id;
2060
2061 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2062 rx_q->queue_index,
2063 napi_id);
2064 if (ret) {
2065 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2066 return -EINVAL;
2067 }
2068
2069 return 0;
2070}
2071
2072static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2073 struct stmmac_dma_conf *dma_conf)
2074{
2075 u32 rx_count = priv->plat->rx_queues_to_use;
2076 u32 queue;
2077 int ret;
2078
2079 /* RX queues buffers and DMA */
2080 for (queue = 0; queue < rx_count; queue++) {
2081 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2082 if (ret)
2083 goto err_dma;
2084 }
2085
2086 return 0;
2087
2088err_dma:
2089 free_dma_rx_desc_resources(priv, dma_conf);
2090
2091 return ret;
2092}
2093
2094/**
2095 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2096 * @priv: private structure
2097 * @dma_conf: structure to take the dma data
2098 * @queue: TX queue index
2099 * Description: according to which descriptor can be used (extend or basic)
2100 * this function allocates the resources for TX and RX paths. In case of
2101 * reception, for example, it pre-allocated the RX socket buffer in order to
2102 * allow zero-copy mechanism.
2103 */
2104static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2105 struct stmmac_dma_conf *dma_conf,
2106 u32 queue)
2107{
2108 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2109 size_t size;
2110 void *addr;
2111
2112 tx_q->queue_index = queue;
2113 tx_q->priv_data = priv;
2114
2115 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2116 sizeof(*tx_q->tx_skbuff_dma),
2117 GFP_KERNEL);
2118 if (!tx_q->tx_skbuff_dma)
2119 return -ENOMEM;
2120
2121 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2122 sizeof(struct sk_buff *),
2123 GFP_KERNEL);
2124 if (!tx_q->tx_skbuff)
2125 return -ENOMEM;
2126
2127 if (priv->extend_desc)
2128 size = sizeof(struct dma_extended_desc);
2129 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2130 size = sizeof(struct dma_edesc);
2131 else
2132 size = sizeof(struct dma_desc);
2133
2134 size *= dma_conf->dma_tx_size;
2135
2136 addr = dma_alloc_coherent(priv->device, size,
2137 &tx_q->dma_tx_phy, GFP_KERNEL);
2138 if (!addr)
2139 return -ENOMEM;
2140
2141 if (priv->extend_desc)
2142 tx_q->dma_etx = addr;
2143 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 tx_q->dma_entx = addr;
2145 else
2146 tx_q->dma_tx = addr;
2147
2148 return 0;
2149}
2150
2151static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2152 struct stmmac_dma_conf *dma_conf)
2153{
2154 u32 tx_count = priv->plat->tx_queues_to_use;
2155 u32 queue;
2156 int ret;
2157
2158 /* TX queues buffers and DMA */
2159 for (queue = 0; queue < tx_count; queue++) {
2160 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2161 if (ret)
2162 goto err_dma;
2163 }
2164
2165 return 0;
2166
2167err_dma:
2168 free_dma_tx_desc_resources(priv, dma_conf);
2169 return ret;
2170}
2171
2172/**
2173 * alloc_dma_desc_resources - alloc TX/RX resources.
2174 * @priv: private structure
2175 * @dma_conf: structure to take the dma data
2176 * Description: according to which descriptor can be used (extend or basic)
2177 * this function allocates the resources for TX and RX paths. In case of
2178 * reception, for example, it pre-allocated the RX socket buffer in order to
2179 * allow zero-copy mechanism.
2180 */
2181static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2182 struct stmmac_dma_conf *dma_conf)
2183{
2184 /* RX Allocation */
2185 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2186
2187 if (ret)
2188 return ret;
2189
2190 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2191
2192 return ret;
2193}
2194
2195/**
2196 * free_dma_desc_resources - free dma desc resources
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 */
2200static void free_dma_desc_resources(struct stmmac_priv *priv,
2201 struct stmmac_dma_conf *dma_conf)
2202{
2203 /* Release the DMA TX socket buffers */
2204 free_dma_tx_desc_resources(priv, dma_conf);
2205
2206 /* Release the DMA RX socket buffers later
2207 * to ensure all pending XDP_TX buffers are returned.
2208 */
2209 free_dma_rx_desc_resources(priv, dma_conf);
2210}
2211
2212/**
2213 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2214 * @priv: driver private structure
2215 * Description: It is used for enabling the rx queues in the MAC
2216 */
2217static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2218{
2219 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2220 int queue;
2221 u8 mode;
2222
2223 for (queue = 0; queue < rx_queues_count; queue++) {
2224 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2225 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2226 }
2227}
2228
2229/**
2230 * stmmac_start_rx_dma - start RX DMA channel
2231 * @priv: driver private structure
2232 * @chan: RX channel index
2233 * Description:
2234 * This starts a RX DMA channel
2235 */
2236static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2237{
2238 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2239 stmmac_start_rx(priv, priv->ioaddr, chan);
2240}
2241
2242/**
2243 * stmmac_start_tx_dma - start TX DMA channel
2244 * @priv: driver private structure
2245 * @chan: TX channel index
2246 * Description:
2247 * This starts a TX DMA channel
2248 */
2249static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2250{
2251 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2252 stmmac_start_tx(priv, priv->ioaddr, chan);
2253}
2254
2255/**
2256 * stmmac_stop_rx_dma - stop RX DMA channel
2257 * @priv: driver private structure
2258 * @chan: RX channel index
2259 * Description:
2260 * This stops a RX DMA channel
2261 */
2262static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2263{
2264 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2265 stmmac_stop_rx(priv, priv->ioaddr, chan);
2266}
2267
2268/**
2269 * stmmac_stop_tx_dma - stop TX DMA channel
2270 * @priv: driver private structure
2271 * @chan: TX channel index
2272 * Description:
2273 * This stops a TX DMA channel
2274 */
2275static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2276{
2277 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2278 stmmac_stop_tx(priv, priv->ioaddr, chan);
2279}
2280
2281static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2282{
2283 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2284 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2285 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2286 u32 chan;
2287
2288 for (chan = 0; chan < dma_csr_ch; chan++) {
2289 struct stmmac_channel *ch = &priv->channel[chan];
2290 unsigned long flags;
2291
2292 spin_lock_irqsave(&ch->lock, flags);
2293 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2294 spin_unlock_irqrestore(&ch->lock, flags);
2295 }
2296}
2297
2298/**
2299 * stmmac_start_all_dma - start all RX and TX DMA channels
2300 * @priv: driver private structure
2301 * Description:
2302 * This starts all the RX and TX DMA channels
2303 */
2304static void stmmac_start_all_dma(struct stmmac_priv *priv)
2305{
2306 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 u32 chan = 0;
2309
2310 for (chan = 0; chan < rx_channels_count; chan++)
2311 stmmac_start_rx_dma(priv, chan);
2312
2313 for (chan = 0; chan < tx_channels_count; chan++)
2314 stmmac_start_tx_dma(priv, chan);
2315}
2316
2317/**
2318 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2319 * @priv: driver private structure
2320 * Description:
2321 * This stops the RX and TX DMA channels
2322 */
2323static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2324{
2325 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2326 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2327 u32 chan = 0;
2328
2329 for (chan = 0; chan < rx_channels_count; chan++)
2330 stmmac_stop_rx_dma(priv, chan);
2331
2332 for (chan = 0; chan < tx_channels_count; chan++)
2333 stmmac_stop_tx_dma(priv, chan);
2334}
2335
2336/**
2337 * stmmac_dma_operation_mode - HW DMA operation mode
2338 * @priv: driver private structure
2339 * Description: it is used for configuring the DMA operation mode register in
2340 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2341 */
2342static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2343{
2344 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2345 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2346 int rxfifosz = priv->plat->rx_fifo_size;
2347 int txfifosz = priv->plat->tx_fifo_size;
2348 u32 txmode = 0;
2349 u32 rxmode = 0;
2350 u32 chan = 0;
2351 u8 qmode = 0;
2352
2353 if (rxfifosz == 0)
2354 rxfifosz = priv->dma_cap.rx_fifo_size;
2355 if (txfifosz == 0)
2356 txfifosz = priv->dma_cap.tx_fifo_size;
2357
2358 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2359 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2360 rxfifosz /= rx_channels_count;
2361 txfifosz /= tx_channels_count;
2362 }
2363
2364 if (priv->plat->force_thresh_dma_mode) {
2365 txmode = tc;
2366 rxmode = tc;
2367 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2368 /*
2369 * In case of GMAC, SF mode can be enabled
2370 * to perform the TX COE in HW. This depends on:
2371 * 1) TX COE if actually supported
2372 * 2) There is no bugged Jumbo frame support
2373 * that needs to not insert csum in the TDES.
2374 */
2375 txmode = SF_DMA_MODE;
2376 rxmode = SF_DMA_MODE;
2377 priv->xstats.threshold = SF_DMA_MODE;
2378 } else {
2379 txmode = tc;
2380 rxmode = SF_DMA_MODE;
2381 }
2382
2383 /* configure all channels */
2384 for (chan = 0; chan < rx_channels_count; chan++) {
2385 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2386 u32 buf_size;
2387
2388 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2389
2390 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2391 rxfifosz, qmode);
2392
2393 if (rx_q->xsk_pool) {
2394 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2395 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2396 buf_size,
2397 chan);
2398 } else {
2399 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2400 priv->dma_conf.dma_buf_sz,
2401 chan);
2402 }
2403 }
2404
2405 for (chan = 0; chan < tx_channels_count; chan++) {
2406 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2407
2408 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2409 txfifosz, qmode);
2410 }
2411}
2412
2413static void stmmac_xsk_request_timestamp(void *_priv)
2414{
2415 struct stmmac_metadata_request *meta_req = _priv;
2416
2417 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2418 *meta_req->set_ic = true;
2419}
2420
2421static u64 stmmac_xsk_fill_timestamp(void *_priv)
2422{
2423 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2424 struct stmmac_priv *priv = tx_compl->priv;
2425 struct dma_desc *desc = tx_compl->desc;
2426 bool found = false;
2427 u64 ns = 0;
2428
2429 if (!priv->hwts_tx_en)
2430 return 0;
2431
2432 /* check tx tstamp status */
2433 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2434 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2435 found = true;
2436 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2437 found = true;
2438 }
2439
2440 if (found) {
2441 ns -= priv->plat->cdc_error_adj;
2442 return ns_to_ktime(ns);
2443 }
2444
2445 return 0;
2446}
2447
2448static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2449 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2450 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2451};
2452
2453static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2454{
2455 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2456 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2457 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2458 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2459 unsigned int entry = tx_q->cur_tx;
2460 struct dma_desc *tx_desc = NULL;
2461 struct xdp_desc xdp_desc;
2462 bool work_done = true;
2463 u32 tx_set_ic_bit = 0;
2464
2465 /* Avoids TX time-out as we are sharing with slow path */
2466 txq_trans_cond_update(nq);
2467
2468 budget = min(budget, stmmac_tx_avail(priv, queue));
2469
2470 while (budget-- > 0) {
2471 struct stmmac_metadata_request meta_req;
2472 struct xsk_tx_metadata *meta = NULL;
2473 dma_addr_t dma_addr;
2474 bool set_ic;
2475
2476 /* We are sharing with slow path and stop XSK TX desc submission when
2477 * available TX ring is less than threshold.
2478 */
2479 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2480 !netif_carrier_ok(priv->dev)) {
2481 work_done = false;
2482 break;
2483 }
2484
2485 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2486 break;
2487
2488 if (priv->est && priv->est->enable &&
2489 priv->est->max_sdu[queue] &&
2490 xdp_desc.len > priv->est->max_sdu[queue]) {
2491 priv->xstats.max_sdu_txq_drop[queue]++;
2492 continue;
2493 }
2494
2495 if (likely(priv->extend_desc))
2496 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2497 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2498 tx_desc = &tx_q->dma_entx[entry].basic;
2499 else
2500 tx_desc = tx_q->dma_tx + entry;
2501
2502 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2503 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2504 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2505
2506 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2507
2508 /* To return XDP buffer to XSK pool, we simple call
2509 * xsk_tx_completed(), so we don't need to fill up
2510 * 'buf' and 'xdpf'.
2511 */
2512 tx_q->tx_skbuff_dma[entry].buf = 0;
2513 tx_q->xdpf[entry] = NULL;
2514
2515 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2516 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2517 tx_q->tx_skbuff_dma[entry].last_segment = true;
2518 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2519
2520 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2521
2522 tx_q->tx_count_frames++;
2523
2524 if (!priv->tx_coal_frames[queue])
2525 set_ic = false;
2526 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2527 set_ic = true;
2528 else
2529 set_ic = false;
2530
2531 meta_req.priv = priv;
2532 meta_req.tx_desc = tx_desc;
2533 meta_req.set_ic = &set_ic;
2534 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2535 &meta_req);
2536 if (set_ic) {
2537 tx_q->tx_count_frames = 0;
2538 stmmac_set_tx_ic(priv, tx_desc);
2539 tx_set_ic_bit++;
2540 }
2541
2542 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2543 true, priv->mode, true, true,
2544 xdp_desc.len);
2545
2546 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2547
2548 xsk_tx_metadata_to_compl(meta,
2549 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2550
2551 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2552 entry = tx_q->cur_tx;
2553 }
2554 u64_stats_update_begin(&txq_stats->napi_syncp);
2555 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2556 u64_stats_update_end(&txq_stats->napi_syncp);
2557
2558 if (tx_desc) {
2559 stmmac_flush_tx_descriptors(priv, queue);
2560 xsk_tx_release(pool);
2561 }
2562
2563 /* Return true if all of the 3 conditions are met
2564 * a) TX Budget is still available
2565 * b) work_done = true when XSK TX desc peek is empty (no more
2566 * pending XSK TX for transmission)
2567 */
2568 return !!budget && work_done;
2569}
2570
2571static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2572{
2573 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2574 tc += 64;
2575
2576 if (priv->plat->force_thresh_dma_mode)
2577 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2578 else
2579 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2580 chan);
2581
2582 priv->xstats.threshold = tc;
2583 }
2584}
2585
2586/**
2587 * stmmac_tx_clean - to manage the transmission completion
2588 * @priv: driver private structure
2589 * @budget: napi budget limiting this functions packet handling
2590 * @queue: TX queue index
2591 * @pending_packets: signal to arm the TX coal timer
2592 * Description: it reclaims the transmit resources after transmission completes.
2593 * If some packets still needs to be handled, due to TX coalesce, set
2594 * pending_packets to true to make NAPI arm the TX coal timer.
2595 */
2596static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2597 bool *pending_packets)
2598{
2599 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2600 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2601 unsigned int bytes_compl = 0, pkts_compl = 0;
2602 unsigned int entry, xmits = 0, count = 0;
2603 u32 tx_packets = 0, tx_errors = 0;
2604
2605 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2606
2607 tx_q->xsk_frames_done = 0;
2608
2609 entry = tx_q->dirty_tx;
2610
2611 /* Try to clean all TX complete frame in 1 shot */
2612 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2613 struct xdp_frame *xdpf;
2614 struct sk_buff *skb;
2615 struct dma_desc *p;
2616 int status;
2617
2618 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2619 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2620 xdpf = tx_q->xdpf[entry];
2621 skb = NULL;
2622 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2623 xdpf = NULL;
2624 skb = tx_q->tx_skbuff[entry];
2625 } else {
2626 xdpf = NULL;
2627 skb = NULL;
2628 }
2629
2630 if (priv->extend_desc)
2631 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2632 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2633 p = &tx_q->dma_entx[entry].basic;
2634 else
2635 p = tx_q->dma_tx + entry;
2636
2637 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2638 /* Check if the descriptor is owned by the DMA */
2639 if (unlikely(status & tx_dma_own))
2640 break;
2641
2642 count++;
2643
2644 /* Make sure descriptor fields are read after reading
2645 * the own bit.
2646 */
2647 dma_rmb();
2648
2649 /* Just consider the last segment and ...*/
2650 if (likely(!(status & tx_not_ls))) {
2651 /* ... verify the status error condition */
2652 if (unlikely(status & tx_err)) {
2653 tx_errors++;
2654 if (unlikely(status & tx_err_bump_tc))
2655 stmmac_bump_dma_threshold(priv, queue);
2656 } else {
2657 tx_packets++;
2658 }
2659 if (skb) {
2660 stmmac_get_tx_hwtstamp(priv, p, skb);
2661 } else if (tx_q->xsk_pool &&
2662 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2663 struct stmmac_xsk_tx_complete tx_compl = {
2664 .priv = priv,
2665 .desc = p,
2666 };
2667
2668 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2669 &stmmac_xsk_tx_metadata_ops,
2670 &tx_compl);
2671 }
2672 }
2673
2674 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2675 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2676 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2677 dma_unmap_page(priv->device,
2678 tx_q->tx_skbuff_dma[entry].buf,
2679 tx_q->tx_skbuff_dma[entry].len,
2680 DMA_TO_DEVICE);
2681 else
2682 dma_unmap_single(priv->device,
2683 tx_q->tx_skbuff_dma[entry].buf,
2684 tx_q->tx_skbuff_dma[entry].len,
2685 DMA_TO_DEVICE);
2686 tx_q->tx_skbuff_dma[entry].buf = 0;
2687 tx_q->tx_skbuff_dma[entry].len = 0;
2688 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2689 }
2690
2691 stmmac_clean_desc3(priv, tx_q, p);
2692
2693 tx_q->tx_skbuff_dma[entry].last_segment = false;
2694 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2695
2696 if (xdpf &&
2697 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2698 xdp_return_frame_rx_napi(xdpf);
2699 tx_q->xdpf[entry] = NULL;
2700 }
2701
2702 if (xdpf &&
2703 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2704 xdp_return_frame(xdpf);
2705 tx_q->xdpf[entry] = NULL;
2706 }
2707
2708 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2709 tx_q->xsk_frames_done++;
2710
2711 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2712 if (likely(skb)) {
2713 pkts_compl++;
2714 bytes_compl += skb->len;
2715 dev_consume_skb_any(skb);
2716 tx_q->tx_skbuff[entry] = NULL;
2717 }
2718 }
2719
2720 stmmac_release_tx_desc(priv, p, priv->mode);
2721
2722 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2723 }
2724 tx_q->dirty_tx = entry;
2725
2726 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2727 pkts_compl, bytes_compl);
2728
2729 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2730 queue))) &&
2731 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2732
2733 netif_dbg(priv, tx_done, priv->dev,
2734 "%s: restart transmit\n", __func__);
2735 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2736 }
2737
2738 if (tx_q->xsk_pool) {
2739 bool work_done;
2740
2741 if (tx_q->xsk_frames_done)
2742 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2743
2744 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2745 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2746
2747 /* For XSK TX, we try to send as many as possible.
2748 * If XSK work done (XSK TX desc empty and budget still
2749 * available), return "budget - 1" to reenable TX IRQ.
2750 * Else, return "budget" to make NAPI continue polling.
2751 */
2752 work_done = stmmac_xdp_xmit_zc(priv, queue,
2753 STMMAC_XSK_TX_BUDGET_MAX);
2754 if (work_done)
2755 xmits = budget - 1;
2756 else
2757 xmits = budget;
2758 }
2759
2760 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2761 priv->eee_sw_timer_en) {
2762 if (stmmac_enable_eee_mode(priv))
2763 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2764 }
2765
2766 /* We still have pending packets, let's call for a new scheduling */
2767 if (tx_q->dirty_tx != tx_q->cur_tx)
2768 *pending_packets = true;
2769
2770 u64_stats_update_begin(&txq_stats->napi_syncp);
2771 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2772 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2773 u64_stats_inc(&txq_stats->napi.tx_clean);
2774 u64_stats_update_end(&txq_stats->napi_syncp);
2775
2776 priv->xstats.tx_errors += tx_errors;
2777
2778 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2779
2780 /* Combine decisions from TX clean and XSK TX */
2781 return max(count, xmits);
2782}
2783
2784/**
2785 * stmmac_tx_err - to manage the tx error
2786 * @priv: driver private structure
2787 * @chan: channel index
2788 * Description: it cleans the descriptors and restarts the transmission
2789 * in case of transmission errors.
2790 */
2791static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2792{
2793 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2794
2795 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2796
2797 stmmac_stop_tx_dma(priv, chan);
2798 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2799 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2800 stmmac_reset_tx_queue(priv, chan);
2801 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2802 tx_q->dma_tx_phy, chan);
2803 stmmac_start_tx_dma(priv, chan);
2804
2805 priv->xstats.tx_errors++;
2806 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2807}
2808
2809/**
2810 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2811 * @priv: driver private structure
2812 * @txmode: TX operating mode
2813 * @rxmode: RX operating mode
2814 * @chan: channel index
2815 * Description: it is used for configuring of the DMA operation mode in
2816 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2817 * mode.
2818 */
2819static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2820 u32 rxmode, u32 chan)
2821{
2822 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2823 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2824 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2825 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2826 int rxfifosz = priv->plat->rx_fifo_size;
2827 int txfifosz = priv->plat->tx_fifo_size;
2828
2829 if (rxfifosz == 0)
2830 rxfifosz = priv->dma_cap.rx_fifo_size;
2831 if (txfifosz == 0)
2832 txfifosz = priv->dma_cap.tx_fifo_size;
2833
2834 /* Adjust for real per queue fifo size */
2835 rxfifosz /= rx_channels_count;
2836 txfifosz /= tx_channels_count;
2837
2838 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2839 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2840}
2841
2842static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2843{
2844 int ret;
2845
2846 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2847 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2848 if (ret && (ret != -EINVAL)) {
2849 stmmac_global_err(priv);
2850 return true;
2851 }
2852
2853 return false;
2854}
2855
2856static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2857{
2858 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2859 &priv->xstats, chan, dir);
2860 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2861 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2862 struct stmmac_channel *ch = &priv->channel[chan];
2863 struct napi_struct *rx_napi;
2864 struct napi_struct *tx_napi;
2865 unsigned long flags;
2866
2867 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2868 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2869
2870 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2871 if (napi_schedule_prep(rx_napi)) {
2872 spin_lock_irqsave(&ch->lock, flags);
2873 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2874 spin_unlock_irqrestore(&ch->lock, flags);
2875 __napi_schedule(rx_napi);
2876 }
2877 }
2878
2879 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2880 if (napi_schedule_prep(tx_napi)) {
2881 spin_lock_irqsave(&ch->lock, flags);
2882 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2883 spin_unlock_irqrestore(&ch->lock, flags);
2884 __napi_schedule(tx_napi);
2885 }
2886 }
2887
2888 return status;
2889}
2890
2891/**
2892 * stmmac_dma_interrupt - DMA ISR
2893 * @priv: driver private structure
2894 * Description: this is the DMA ISR. It is called by the main ISR.
2895 * It calls the dwmac dma routine and schedule poll method in case of some
2896 * work can be done.
2897 */
2898static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2899{
2900 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2901 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2902 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2903 tx_channel_count : rx_channel_count;
2904 u32 chan;
2905 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2906
2907 /* Make sure we never check beyond our status buffer. */
2908 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2909 channels_to_check = ARRAY_SIZE(status);
2910
2911 for (chan = 0; chan < channels_to_check; chan++)
2912 status[chan] = stmmac_napi_check(priv, chan,
2913 DMA_DIR_RXTX);
2914
2915 for (chan = 0; chan < tx_channel_count; chan++) {
2916 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2917 /* Try to bump up the dma threshold on this failure */
2918 stmmac_bump_dma_threshold(priv, chan);
2919 } else if (unlikely(status[chan] == tx_hard_error)) {
2920 stmmac_tx_err(priv, chan);
2921 }
2922 }
2923}
2924
2925/**
2926 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2927 * @priv: driver private structure
2928 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2929 */
2930static void stmmac_mmc_setup(struct stmmac_priv *priv)
2931{
2932 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2933 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2934
2935 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2936
2937 if (priv->dma_cap.rmon) {
2938 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2939 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2940 } else
2941 netdev_info(priv->dev, "No MAC Management Counters available\n");
2942}
2943
2944/**
2945 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2946 * @priv: driver private structure
2947 * Description:
2948 * new GMAC chip generations have a new register to indicate the
2949 * presence of the optional feature/functions.
2950 * This can be also used to override the value passed through the
2951 * platform and necessary for old MAC10/100 and GMAC chips.
2952 */
2953static int stmmac_get_hw_features(struct stmmac_priv *priv)
2954{
2955 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2956}
2957
2958/**
2959 * stmmac_check_ether_addr - check if the MAC addr is valid
2960 * @priv: driver private structure
2961 * Description:
2962 * it is to verify if the MAC address is valid, in case of failures it
2963 * generates a random MAC address
2964 */
2965static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2966{
2967 u8 addr[ETH_ALEN];
2968
2969 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2970 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2971 if (is_valid_ether_addr(addr))
2972 eth_hw_addr_set(priv->dev, addr);
2973 else
2974 eth_hw_addr_random(priv->dev);
2975 dev_info(priv->device, "device MAC address %pM\n",
2976 priv->dev->dev_addr);
2977 }
2978}
2979
2980/**
2981 * stmmac_init_dma_engine - DMA init.
2982 * @priv: driver private structure
2983 * Description:
2984 * It inits the DMA invoking the specific MAC/GMAC callback.
2985 * Some DMA parameters can be passed from the platform;
2986 * in case of these are not passed a default is kept for the MAC or GMAC.
2987 */
2988static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2989{
2990 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2991 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2992 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2993 struct stmmac_rx_queue *rx_q;
2994 struct stmmac_tx_queue *tx_q;
2995 u32 chan = 0;
2996 int ret = 0;
2997
2998 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2999 dev_err(priv->device, "Invalid DMA configuration\n");
3000 return -EINVAL;
3001 }
3002
3003 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3004 priv->plat->dma_cfg->atds = 1;
3005
3006 ret = stmmac_reset(priv, priv->ioaddr);
3007 if (ret) {
3008 dev_err(priv->device, "Failed to reset the dma\n");
3009 return ret;
3010 }
3011
3012 /* DMA Configuration */
3013 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3014
3015 if (priv->plat->axi)
3016 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3017
3018 /* DMA CSR Channel configuration */
3019 for (chan = 0; chan < dma_csr_ch; chan++) {
3020 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3021 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3022 }
3023
3024 /* DMA RX Channel Configuration */
3025 for (chan = 0; chan < rx_channels_count; chan++) {
3026 rx_q = &priv->dma_conf.rx_queue[chan];
3027
3028 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3029 rx_q->dma_rx_phy, chan);
3030
3031 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3032 (rx_q->buf_alloc_num *
3033 sizeof(struct dma_desc));
3034 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3035 rx_q->rx_tail_addr, chan);
3036 }
3037
3038 /* DMA TX Channel Configuration */
3039 for (chan = 0; chan < tx_channels_count; chan++) {
3040 tx_q = &priv->dma_conf.tx_queue[chan];
3041
3042 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043 tx_q->dma_tx_phy, chan);
3044
3045 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3046 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3047 tx_q->tx_tail_addr, chan);
3048 }
3049
3050 return ret;
3051}
3052
3053static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3054{
3055 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3056 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3057 struct stmmac_channel *ch;
3058 struct napi_struct *napi;
3059
3060 if (!tx_coal_timer)
3061 return;
3062
3063 ch = &priv->channel[tx_q->queue_index];
3064 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3065
3066 /* Arm timer only if napi is not already scheduled.
3067 * Try to cancel any timer if napi is scheduled, timer will be armed
3068 * again in the next scheduled napi.
3069 */
3070 if (unlikely(!napi_is_scheduled(napi)))
3071 hrtimer_start(&tx_q->txtimer,
3072 STMMAC_COAL_TIMER(tx_coal_timer),
3073 HRTIMER_MODE_REL);
3074 else
3075 hrtimer_try_to_cancel(&tx_q->txtimer);
3076}
3077
3078/**
3079 * stmmac_tx_timer - mitigation sw timer for tx.
3080 * @t: data pointer
3081 * Description:
3082 * This is the timer handler to directly invoke the stmmac_tx_clean.
3083 */
3084static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3085{
3086 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3087 struct stmmac_priv *priv = tx_q->priv_data;
3088 struct stmmac_channel *ch;
3089 struct napi_struct *napi;
3090
3091 ch = &priv->channel[tx_q->queue_index];
3092 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093
3094 if (likely(napi_schedule_prep(napi))) {
3095 unsigned long flags;
3096
3097 spin_lock_irqsave(&ch->lock, flags);
3098 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3099 spin_unlock_irqrestore(&ch->lock, flags);
3100 __napi_schedule(napi);
3101 }
3102
3103 return HRTIMER_NORESTART;
3104}
3105
3106/**
3107 * stmmac_init_coalesce - init mitigation options.
3108 * @priv: driver private structure
3109 * Description:
3110 * This inits the coalesce parameters: i.e. timer rate,
3111 * timer handler and default threshold used for enabling the
3112 * interrupt on completion bit.
3113 */
3114static void stmmac_init_coalesce(struct stmmac_priv *priv)
3115{
3116 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3117 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3118 u32 chan;
3119
3120 for (chan = 0; chan < tx_channel_count; chan++) {
3121 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3122
3123 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3124 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3125
3126 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3127 tx_q->txtimer.function = stmmac_tx_timer;
3128 }
3129
3130 for (chan = 0; chan < rx_channel_count; chan++)
3131 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3132}
3133
3134static void stmmac_set_rings_length(struct stmmac_priv *priv)
3135{
3136 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3137 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3138 u32 chan;
3139
3140 /* set TX ring length */
3141 for (chan = 0; chan < tx_channels_count; chan++)
3142 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3143 (priv->dma_conf.dma_tx_size - 1), chan);
3144
3145 /* set RX ring length */
3146 for (chan = 0; chan < rx_channels_count; chan++)
3147 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3148 (priv->dma_conf.dma_rx_size - 1), chan);
3149}
3150
3151/**
3152 * stmmac_set_tx_queue_weight - Set TX queue weight
3153 * @priv: driver private structure
3154 * Description: It is used for setting TX queues weight
3155 */
3156static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3157{
3158 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3159 u32 weight;
3160 u32 queue;
3161
3162 for (queue = 0; queue < tx_queues_count; queue++) {
3163 weight = priv->plat->tx_queues_cfg[queue].weight;
3164 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3165 }
3166}
3167
3168/**
3169 * stmmac_configure_cbs - Configure CBS in TX queue
3170 * @priv: driver private structure
3171 * Description: It is used for configuring CBS in AVB TX queues
3172 */
3173static void stmmac_configure_cbs(struct stmmac_priv *priv)
3174{
3175 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176 u32 mode_to_use;
3177 u32 queue;
3178
3179 /* queue 0 is reserved for legacy traffic */
3180 for (queue = 1; queue < tx_queues_count; queue++) {
3181 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3182 if (mode_to_use == MTL_QUEUE_DCB)
3183 continue;
3184
3185 stmmac_config_cbs(priv, priv->hw,
3186 priv->plat->tx_queues_cfg[queue].send_slope,
3187 priv->plat->tx_queues_cfg[queue].idle_slope,
3188 priv->plat->tx_queues_cfg[queue].high_credit,
3189 priv->plat->tx_queues_cfg[queue].low_credit,
3190 queue);
3191 }
3192}
3193
3194/**
3195 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3196 * @priv: driver private structure
3197 * Description: It is used for mapping RX queues to RX dma channels
3198 */
3199static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3200{
3201 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3202 u32 queue;
3203 u32 chan;
3204
3205 for (queue = 0; queue < rx_queues_count; queue++) {
3206 chan = priv->plat->rx_queues_cfg[queue].chan;
3207 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3208 }
3209}
3210
3211/**
3212 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3213 * @priv: driver private structure
3214 * Description: It is used for configuring the RX Queue Priority
3215 */
3216static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3217{
3218 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3219 u32 queue;
3220 u32 prio;
3221
3222 for (queue = 0; queue < rx_queues_count; queue++) {
3223 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3224 continue;
3225
3226 prio = priv->plat->rx_queues_cfg[queue].prio;
3227 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3228 }
3229}
3230
3231/**
3232 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3233 * @priv: driver private structure
3234 * Description: It is used for configuring the TX Queue Priority
3235 */
3236static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3237{
3238 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3239 u32 queue;
3240 u32 prio;
3241
3242 for (queue = 0; queue < tx_queues_count; queue++) {
3243 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3244 continue;
3245
3246 prio = priv->plat->tx_queues_cfg[queue].prio;
3247 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3248 }
3249}
3250
3251/**
3252 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3253 * @priv: driver private structure
3254 * Description: It is used for configuring the RX queue routing
3255 */
3256static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3257{
3258 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3259 u32 queue;
3260 u8 packet;
3261
3262 for (queue = 0; queue < rx_queues_count; queue++) {
3263 /* no specific packet type routing specified for the queue */
3264 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3265 continue;
3266
3267 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3268 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3269 }
3270}
3271
3272static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3273{
3274 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3275 priv->rss.enable = false;
3276 return;
3277 }
3278
3279 if (priv->dev->features & NETIF_F_RXHASH)
3280 priv->rss.enable = true;
3281 else
3282 priv->rss.enable = false;
3283
3284 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3285 priv->plat->rx_queues_to_use);
3286}
3287
3288/**
3289 * stmmac_mtl_configuration - Configure MTL
3290 * @priv: driver private structure
3291 * Description: It is used for configurring MTL
3292 */
3293static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3294{
3295 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3296 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3297
3298 if (tx_queues_count > 1)
3299 stmmac_set_tx_queue_weight(priv);
3300
3301 /* Configure MTL RX algorithms */
3302 if (rx_queues_count > 1)
3303 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3304 priv->plat->rx_sched_algorithm);
3305
3306 /* Configure MTL TX algorithms */
3307 if (tx_queues_count > 1)
3308 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3309 priv->plat->tx_sched_algorithm);
3310
3311 /* Configure CBS in AVB TX queues */
3312 if (tx_queues_count > 1)
3313 stmmac_configure_cbs(priv);
3314
3315 /* Map RX MTL to DMA channels */
3316 stmmac_rx_queue_dma_chan_map(priv);
3317
3318 /* Enable MAC RX Queues */
3319 stmmac_mac_enable_rx_queues(priv);
3320
3321 /* Set RX priorities */
3322 if (rx_queues_count > 1)
3323 stmmac_mac_config_rx_queues_prio(priv);
3324
3325 /* Set TX priorities */
3326 if (tx_queues_count > 1)
3327 stmmac_mac_config_tx_queues_prio(priv);
3328
3329 /* Set RX routing */
3330 if (rx_queues_count > 1)
3331 stmmac_mac_config_rx_queues_routing(priv);
3332
3333 /* Receive Side Scaling */
3334 if (rx_queues_count > 1)
3335 stmmac_mac_config_rss(priv);
3336}
3337
3338static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3339{
3340 if (priv->dma_cap.asp) {
3341 netdev_info(priv->dev, "Enabling Safety Features\n");
3342 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3343 priv->plat->safety_feat_cfg);
3344 } else {
3345 netdev_info(priv->dev, "No Safety Features support found\n");
3346 }
3347}
3348
3349/**
3350 * stmmac_hw_setup - setup mac in a usable state.
3351 * @dev : pointer to the device structure.
3352 * @ptp_register: register PTP if set
3353 * Description:
3354 * this is the main function to setup the HW in a usable state because the
3355 * dma engine is reset, the core registers are configured (e.g. AXI,
3356 * Checksum features, timers). The DMA is ready to start receiving and
3357 * transmitting.
3358 * Return value:
3359 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3360 * file on failure.
3361 */
3362static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3363{
3364 struct stmmac_priv *priv = netdev_priv(dev);
3365 u32 rx_cnt = priv->plat->rx_queues_to_use;
3366 u32 tx_cnt = priv->plat->tx_queues_to_use;
3367 bool sph_en;
3368 u32 chan;
3369 int ret;
3370
3371 /* Make sure RX clock is enabled */
3372 if (priv->hw->phylink_pcs)
3373 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3374
3375 /* DMA initialization and SW reset */
3376 ret = stmmac_init_dma_engine(priv);
3377 if (ret < 0) {
3378 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3379 __func__);
3380 return ret;
3381 }
3382
3383 /* Copy the MAC addr into the HW */
3384 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3385
3386 /* PS and related bits will be programmed according to the speed */
3387 if (priv->hw->pcs) {
3388 int speed = priv->plat->mac_port_sel_speed;
3389
3390 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3391 (speed == SPEED_1000)) {
3392 priv->hw->ps = speed;
3393 } else {
3394 dev_warn(priv->device, "invalid port speed\n");
3395 priv->hw->ps = 0;
3396 }
3397 }
3398
3399 /* Initialize the MAC Core */
3400 stmmac_core_init(priv, priv->hw, dev);
3401
3402 /* Initialize MTL*/
3403 stmmac_mtl_configuration(priv);
3404
3405 /* Initialize Safety Features */
3406 stmmac_safety_feat_configuration(priv);
3407
3408 ret = stmmac_rx_ipc(priv, priv->hw);
3409 if (!ret) {
3410 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3411 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3412 priv->hw->rx_csum = 0;
3413 }
3414
3415 /* Enable the MAC Rx/Tx */
3416 stmmac_mac_set(priv, priv->ioaddr, true);
3417
3418 /* Set the HW DMA mode and the COE */
3419 stmmac_dma_operation_mode(priv);
3420
3421 stmmac_mmc_setup(priv);
3422
3423 if (ptp_register) {
3424 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3425 if (ret < 0)
3426 netdev_warn(priv->dev,
3427 "failed to enable PTP reference clock: %pe\n",
3428 ERR_PTR(ret));
3429 }
3430
3431 ret = stmmac_init_ptp(priv);
3432 if (ret == -EOPNOTSUPP)
3433 netdev_info(priv->dev, "PTP not supported by HW\n");
3434 else if (ret)
3435 netdev_warn(priv->dev, "PTP init failed\n");
3436 else if (ptp_register)
3437 stmmac_ptp_register(priv);
3438
3439 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3440
3441 /* Convert the timer from msec to usec */
3442 if (!priv->tx_lpi_timer)
3443 priv->tx_lpi_timer = eee_timer * 1000;
3444
3445 if (priv->use_riwt) {
3446 u32 queue;
3447
3448 for (queue = 0; queue < rx_cnt; queue++) {
3449 if (!priv->rx_riwt[queue])
3450 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3451
3452 stmmac_rx_watchdog(priv, priv->ioaddr,
3453 priv->rx_riwt[queue], queue);
3454 }
3455 }
3456
3457 if (priv->hw->pcs)
3458 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3459
3460 /* set TX and RX rings length */
3461 stmmac_set_rings_length(priv);
3462
3463 /* Enable TSO */
3464 if (priv->tso) {
3465 for (chan = 0; chan < tx_cnt; chan++) {
3466 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3467
3468 /* TSO and TBS cannot co-exist */
3469 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3470 continue;
3471
3472 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3473 }
3474 }
3475
3476 /* Enable Split Header */
3477 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3478 for (chan = 0; chan < rx_cnt; chan++)
3479 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3480
3481
3482 /* VLAN Tag Insertion */
3483 if (priv->dma_cap.vlins)
3484 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3485
3486 /* TBS */
3487 for (chan = 0; chan < tx_cnt; chan++) {
3488 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3489 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3490
3491 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3492 }
3493
3494 /* Configure real RX and TX queues */
3495 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3496 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3497
3498 /* Start the ball rolling... */
3499 stmmac_start_all_dma(priv);
3500
3501 stmmac_set_hw_vlan_mode(priv, priv->hw);
3502
3503 return 0;
3504}
3505
3506static void stmmac_hw_teardown(struct net_device *dev)
3507{
3508 struct stmmac_priv *priv = netdev_priv(dev);
3509
3510 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3511}
3512
3513static void stmmac_free_irq(struct net_device *dev,
3514 enum request_irq_err irq_err, int irq_idx)
3515{
3516 struct stmmac_priv *priv = netdev_priv(dev);
3517 int j;
3518
3519 switch (irq_err) {
3520 case REQ_IRQ_ERR_ALL:
3521 irq_idx = priv->plat->tx_queues_to_use;
3522 fallthrough;
3523 case REQ_IRQ_ERR_TX:
3524 for (j = irq_idx - 1; j >= 0; j--) {
3525 if (priv->tx_irq[j] > 0) {
3526 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3527 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3528 }
3529 }
3530 irq_idx = priv->plat->rx_queues_to_use;
3531 fallthrough;
3532 case REQ_IRQ_ERR_RX:
3533 for (j = irq_idx - 1; j >= 0; j--) {
3534 if (priv->rx_irq[j] > 0) {
3535 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3536 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3537 }
3538 }
3539
3540 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3541 free_irq(priv->sfty_ue_irq, dev);
3542 fallthrough;
3543 case REQ_IRQ_ERR_SFTY_UE:
3544 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3545 free_irq(priv->sfty_ce_irq, dev);
3546 fallthrough;
3547 case REQ_IRQ_ERR_SFTY_CE:
3548 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3549 free_irq(priv->lpi_irq, dev);
3550 fallthrough;
3551 case REQ_IRQ_ERR_LPI:
3552 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3553 free_irq(priv->wol_irq, dev);
3554 fallthrough;
3555 case REQ_IRQ_ERR_SFTY:
3556 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3557 free_irq(priv->sfty_irq, dev);
3558 fallthrough;
3559 case REQ_IRQ_ERR_WOL:
3560 free_irq(dev->irq, dev);
3561 fallthrough;
3562 case REQ_IRQ_ERR_MAC:
3563 case REQ_IRQ_ERR_NO:
3564 /* If MAC IRQ request error, no more IRQ to free */
3565 break;
3566 }
3567}
3568
3569static int stmmac_request_irq_multi_msi(struct net_device *dev)
3570{
3571 struct stmmac_priv *priv = netdev_priv(dev);
3572 enum request_irq_err irq_err;
3573 cpumask_t cpu_mask;
3574 int irq_idx = 0;
3575 char *int_name;
3576 int ret;
3577 int i;
3578
3579 /* For common interrupt */
3580 int_name = priv->int_name_mac;
3581 sprintf(int_name, "%s:%s", dev->name, "mac");
3582 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3583 0, int_name, dev);
3584 if (unlikely(ret < 0)) {
3585 netdev_err(priv->dev,
3586 "%s: alloc mac MSI %d (error: %d)\n",
3587 __func__, dev->irq, ret);
3588 irq_err = REQ_IRQ_ERR_MAC;
3589 goto irq_error;
3590 }
3591
3592 /* Request the Wake IRQ in case of another line
3593 * is used for WoL
3594 */
3595 priv->wol_irq_disabled = true;
3596 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3597 int_name = priv->int_name_wol;
3598 sprintf(int_name, "%s:%s", dev->name, "wol");
3599 ret = request_irq(priv->wol_irq,
3600 stmmac_mac_interrupt,
3601 0, int_name, dev);
3602 if (unlikely(ret < 0)) {
3603 netdev_err(priv->dev,
3604 "%s: alloc wol MSI %d (error: %d)\n",
3605 __func__, priv->wol_irq, ret);
3606 irq_err = REQ_IRQ_ERR_WOL;
3607 goto irq_error;
3608 }
3609 }
3610
3611 /* Request the LPI IRQ in case of another line
3612 * is used for LPI
3613 */
3614 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3615 int_name = priv->int_name_lpi;
3616 sprintf(int_name, "%s:%s", dev->name, "lpi");
3617 ret = request_irq(priv->lpi_irq,
3618 stmmac_mac_interrupt,
3619 0, int_name, dev);
3620 if (unlikely(ret < 0)) {
3621 netdev_err(priv->dev,
3622 "%s: alloc lpi MSI %d (error: %d)\n",
3623 __func__, priv->lpi_irq, ret);
3624 irq_err = REQ_IRQ_ERR_LPI;
3625 goto irq_error;
3626 }
3627 }
3628
3629 /* Request the common Safety Feature Correctible/Uncorrectible
3630 * Error line in case of another line is used
3631 */
3632 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3633 int_name = priv->int_name_sfty;
3634 sprintf(int_name, "%s:%s", dev->name, "safety");
3635 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3636 0, int_name, dev);
3637 if (unlikely(ret < 0)) {
3638 netdev_err(priv->dev,
3639 "%s: alloc sfty MSI %d (error: %d)\n",
3640 __func__, priv->sfty_irq, ret);
3641 irq_err = REQ_IRQ_ERR_SFTY;
3642 goto irq_error;
3643 }
3644 }
3645
3646 /* Request the Safety Feature Correctible Error line in
3647 * case of another line is used
3648 */
3649 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3650 int_name = priv->int_name_sfty_ce;
3651 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3652 ret = request_irq(priv->sfty_ce_irq,
3653 stmmac_safety_interrupt,
3654 0, int_name, dev);
3655 if (unlikely(ret < 0)) {
3656 netdev_err(priv->dev,
3657 "%s: alloc sfty ce MSI %d (error: %d)\n",
3658 __func__, priv->sfty_ce_irq, ret);
3659 irq_err = REQ_IRQ_ERR_SFTY_CE;
3660 goto irq_error;
3661 }
3662 }
3663
3664 /* Request the Safety Feature Uncorrectible Error line in
3665 * case of another line is used
3666 */
3667 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3668 int_name = priv->int_name_sfty_ue;
3669 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3670 ret = request_irq(priv->sfty_ue_irq,
3671 stmmac_safety_interrupt,
3672 0, int_name, dev);
3673 if (unlikely(ret < 0)) {
3674 netdev_err(priv->dev,
3675 "%s: alloc sfty ue MSI %d (error: %d)\n",
3676 __func__, priv->sfty_ue_irq, ret);
3677 irq_err = REQ_IRQ_ERR_SFTY_UE;
3678 goto irq_error;
3679 }
3680 }
3681
3682 /* Request Rx MSI irq */
3683 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3684 if (i >= MTL_MAX_RX_QUEUES)
3685 break;
3686 if (priv->rx_irq[i] == 0)
3687 continue;
3688
3689 int_name = priv->int_name_rx_irq[i];
3690 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3691 ret = request_irq(priv->rx_irq[i],
3692 stmmac_msi_intr_rx,
3693 0, int_name, &priv->dma_conf.rx_queue[i]);
3694 if (unlikely(ret < 0)) {
3695 netdev_err(priv->dev,
3696 "%s: alloc rx-%d MSI %d (error: %d)\n",
3697 __func__, i, priv->rx_irq[i], ret);
3698 irq_err = REQ_IRQ_ERR_RX;
3699 irq_idx = i;
3700 goto irq_error;
3701 }
3702 cpumask_clear(&cpu_mask);
3703 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3704 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3705 }
3706
3707 /* Request Tx MSI irq */
3708 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3709 if (i >= MTL_MAX_TX_QUEUES)
3710 break;
3711 if (priv->tx_irq[i] == 0)
3712 continue;
3713
3714 int_name = priv->int_name_tx_irq[i];
3715 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3716 ret = request_irq(priv->tx_irq[i],
3717 stmmac_msi_intr_tx,
3718 0, int_name, &priv->dma_conf.tx_queue[i]);
3719 if (unlikely(ret < 0)) {
3720 netdev_err(priv->dev,
3721 "%s: alloc tx-%d MSI %d (error: %d)\n",
3722 __func__, i, priv->tx_irq[i], ret);
3723 irq_err = REQ_IRQ_ERR_TX;
3724 irq_idx = i;
3725 goto irq_error;
3726 }
3727 cpumask_clear(&cpu_mask);
3728 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3730 }
3731
3732 return 0;
3733
3734irq_error:
3735 stmmac_free_irq(dev, irq_err, irq_idx);
3736 return ret;
3737}
3738
3739static int stmmac_request_irq_single(struct net_device *dev)
3740{
3741 struct stmmac_priv *priv = netdev_priv(dev);
3742 enum request_irq_err irq_err;
3743 int ret;
3744
3745 ret = request_irq(dev->irq, stmmac_interrupt,
3746 IRQF_SHARED, dev->name, dev);
3747 if (unlikely(ret < 0)) {
3748 netdev_err(priv->dev,
3749 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3750 __func__, dev->irq, ret);
3751 irq_err = REQ_IRQ_ERR_MAC;
3752 goto irq_error;
3753 }
3754
3755 /* Request the Wake IRQ in case of another line
3756 * is used for WoL
3757 */
3758 priv->wol_irq_disabled = true;
3759 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3760 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3761 IRQF_SHARED, dev->name, dev);
3762 if (unlikely(ret < 0)) {
3763 netdev_err(priv->dev,
3764 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3765 __func__, priv->wol_irq, ret);
3766 irq_err = REQ_IRQ_ERR_WOL;
3767 goto irq_error;
3768 }
3769 }
3770
3771 /* Request the IRQ lines */
3772 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3773 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3774 IRQF_SHARED, dev->name, dev);
3775 if (unlikely(ret < 0)) {
3776 netdev_err(priv->dev,
3777 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3778 __func__, priv->lpi_irq, ret);
3779 irq_err = REQ_IRQ_ERR_LPI;
3780 goto irq_error;
3781 }
3782 }
3783
3784 /* Request the common Safety Feature Correctible/Uncorrectible
3785 * Error line in case of another line is used
3786 */
3787 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3788 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3789 IRQF_SHARED, dev->name, dev);
3790 if (unlikely(ret < 0)) {
3791 netdev_err(priv->dev,
3792 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3793 __func__, priv->sfty_irq, ret);
3794 irq_err = REQ_IRQ_ERR_SFTY;
3795 goto irq_error;
3796 }
3797 }
3798
3799 return 0;
3800
3801irq_error:
3802 stmmac_free_irq(dev, irq_err, 0);
3803 return ret;
3804}
3805
3806static int stmmac_request_irq(struct net_device *dev)
3807{
3808 struct stmmac_priv *priv = netdev_priv(dev);
3809 int ret;
3810
3811 /* Request the IRQ lines */
3812 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3813 ret = stmmac_request_irq_multi_msi(dev);
3814 else
3815 ret = stmmac_request_irq_single(dev);
3816
3817 return ret;
3818}
3819
3820/**
3821 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3822 * @priv: driver private structure
3823 * @mtu: MTU to setup the dma queue and buf with
3824 * Description: Allocate and generate a dma_conf based on the provided MTU.
3825 * Allocate the Tx/Rx DMA queue and init them.
3826 * Return value:
3827 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3828 */
3829static struct stmmac_dma_conf *
3830stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3831{
3832 struct stmmac_dma_conf *dma_conf;
3833 int chan, bfsize, ret;
3834
3835 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3836 if (!dma_conf) {
3837 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3838 __func__);
3839 return ERR_PTR(-ENOMEM);
3840 }
3841
3842 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3843 if (bfsize < 0)
3844 bfsize = 0;
3845
3846 if (bfsize < BUF_SIZE_16KiB)
3847 bfsize = stmmac_set_bfsize(mtu, 0);
3848
3849 dma_conf->dma_buf_sz = bfsize;
3850 /* Chose the tx/rx size from the already defined one in the
3851 * priv struct. (if defined)
3852 */
3853 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3854 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3855
3856 if (!dma_conf->dma_tx_size)
3857 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3858 if (!dma_conf->dma_rx_size)
3859 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3860
3861 /* Earlier check for TBS */
3862 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3863 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3864 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3865
3866 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3867 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3868 }
3869
3870 ret = alloc_dma_desc_resources(priv, dma_conf);
3871 if (ret < 0) {
3872 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3873 __func__);
3874 goto alloc_error;
3875 }
3876
3877 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3878 if (ret < 0) {
3879 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3880 __func__);
3881 goto init_error;
3882 }
3883
3884 return dma_conf;
3885
3886init_error:
3887 free_dma_desc_resources(priv, dma_conf);
3888alloc_error:
3889 kfree(dma_conf);
3890 return ERR_PTR(ret);
3891}
3892
3893/**
3894 * __stmmac_open - open entry point of the driver
3895 * @dev : pointer to the device structure.
3896 * @dma_conf : structure to take the dma data
3897 * Description:
3898 * This function is the open entry point of the driver.
3899 * Return value:
3900 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3901 * file on failure.
3902 */
3903static int __stmmac_open(struct net_device *dev,
3904 struct stmmac_dma_conf *dma_conf)
3905{
3906 struct stmmac_priv *priv = netdev_priv(dev);
3907 int mode = priv->plat->phy_interface;
3908 u32 chan;
3909 int ret;
3910
3911 ret = pm_runtime_resume_and_get(priv->device);
3912 if (ret < 0)
3913 return ret;
3914
3915 if ((!priv->hw->xpcs ||
3916 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3917 ret = stmmac_init_phy(dev);
3918 if (ret) {
3919 netdev_err(priv->dev,
3920 "%s: Cannot attach to PHY (error: %d)\n",
3921 __func__, ret);
3922 goto init_phy_error;
3923 }
3924 }
3925
3926 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3927
3928 buf_sz = dma_conf->dma_buf_sz;
3929 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3930 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3931 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3932 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3933
3934 stmmac_reset_queues_param(priv);
3935
3936 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3937 priv->plat->serdes_powerup) {
3938 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3939 if (ret < 0) {
3940 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3941 __func__);
3942 goto init_error;
3943 }
3944 }
3945
3946 ret = stmmac_hw_setup(dev, true);
3947 if (ret < 0) {
3948 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3949 goto init_error;
3950 }
3951
3952 stmmac_init_coalesce(priv);
3953
3954 phylink_start(priv->phylink);
3955 /* We may have called phylink_speed_down before */
3956 phylink_speed_up(priv->phylink);
3957
3958 ret = stmmac_request_irq(dev);
3959 if (ret)
3960 goto irq_error;
3961
3962 stmmac_enable_all_queues(priv);
3963 netif_tx_start_all_queues(priv->dev);
3964 stmmac_enable_all_dma_irq(priv);
3965
3966 return 0;
3967
3968irq_error:
3969 phylink_stop(priv->phylink);
3970
3971 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3972 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3973
3974 stmmac_hw_teardown(dev);
3975init_error:
3976 phylink_disconnect_phy(priv->phylink);
3977init_phy_error:
3978 pm_runtime_put(priv->device);
3979 return ret;
3980}
3981
3982static int stmmac_open(struct net_device *dev)
3983{
3984 struct stmmac_priv *priv = netdev_priv(dev);
3985 struct stmmac_dma_conf *dma_conf;
3986 int ret;
3987
3988 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3989 if (IS_ERR(dma_conf))
3990 return PTR_ERR(dma_conf);
3991
3992 ret = __stmmac_open(dev, dma_conf);
3993 if (ret)
3994 free_dma_desc_resources(priv, dma_conf);
3995
3996 kfree(dma_conf);
3997 return ret;
3998}
3999
4000/**
4001 * stmmac_release - close entry point of the driver
4002 * @dev : device pointer.
4003 * Description:
4004 * This is the stop entry point of the driver.
4005 */
4006static int stmmac_release(struct net_device *dev)
4007{
4008 struct stmmac_priv *priv = netdev_priv(dev);
4009 u32 chan;
4010
4011 if (device_may_wakeup(priv->device))
4012 phylink_speed_down(priv->phylink, false);
4013 /* Stop and disconnect the PHY */
4014 phylink_stop(priv->phylink);
4015 phylink_disconnect_phy(priv->phylink);
4016
4017 stmmac_disable_all_queues(priv);
4018
4019 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4020 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4021
4022 netif_tx_disable(dev);
4023
4024 /* Free the IRQ lines */
4025 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4026
4027 if (priv->eee_enabled) {
4028 priv->tx_path_in_lpi_mode = false;
4029 del_timer_sync(&priv->eee_ctrl_timer);
4030 }
4031
4032 /* Stop TX/RX DMA and clear the descriptors */
4033 stmmac_stop_all_dma(priv);
4034
4035 /* Release and free the Rx/Tx resources */
4036 free_dma_desc_resources(priv, &priv->dma_conf);
4037
4038 /* Disable the MAC Rx/Tx */
4039 stmmac_mac_set(priv, priv->ioaddr, false);
4040
4041 /* Powerdown Serdes if there is */
4042 if (priv->plat->serdes_powerdown)
4043 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4044
4045 stmmac_release_ptp(priv);
4046
4047 if (stmmac_fpe_supported(priv))
4048 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4049
4050 pm_runtime_put(priv->device);
4051
4052 return 0;
4053}
4054
4055static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4056 struct stmmac_tx_queue *tx_q)
4057{
4058 u16 tag = 0x0, inner_tag = 0x0;
4059 u32 inner_type = 0x0;
4060 struct dma_desc *p;
4061
4062 if (!priv->dma_cap.vlins)
4063 return false;
4064 if (!skb_vlan_tag_present(skb))
4065 return false;
4066 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4067 inner_tag = skb_vlan_tag_get(skb);
4068 inner_type = STMMAC_VLAN_INSERT;
4069 }
4070
4071 tag = skb_vlan_tag_get(skb);
4072
4073 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4074 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4075 else
4076 p = &tx_q->dma_tx[tx_q->cur_tx];
4077
4078 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4079 return false;
4080
4081 stmmac_set_tx_owner(priv, p);
4082 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4083 return true;
4084}
4085
4086/**
4087 * stmmac_tso_allocator - close entry point of the driver
4088 * @priv: driver private structure
4089 * @des: buffer start address
4090 * @total_len: total length to fill in descriptors
4091 * @last_segment: condition for the last descriptor
4092 * @queue: TX queue index
4093 * Description:
4094 * This function fills descriptor and request new descriptors according to
4095 * buffer length to fill
4096 */
4097static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4098 int total_len, bool last_segment, u32 queue)
4099{
4100 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4101 struct dma_desc *desc;
4102 u32 buff_size;
4103 int tmp_len;
4104
4105 tmp_len = total_len;
4106
4107 while (tmp_len > 0) {
4108 dma_addr_t curr_addr;
4109
4110 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4111 priv->dma_conf.dma_tx_size);
4112 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4113
4114 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4115 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4116 else
4117 desc = &tx_q->dma_tx[tx_q->cur_tx];
4118
4119 curr_addr = des + (total_len - tmp_len);
4120 if (priv->dma_cap.addr64 <= 32)
4121 desc->des0 = cpu_to_le32(curr_addr);
4122 else
4123 stmmac_set_desc_addr(priv, desc, curr_addr);
4124
4125 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4126 TSO_MAX_BUFF_SIZE : tmp_len;
4127
4128 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4129 0, 1,
4130 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4131 0, 0);
4132
4133 tmp_len -= TSO_MAX_BUFF_SIZE;
4134 }
4135}
4136
4137static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4138{
4139 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4140 int desc_size;
4141
4142 if (likely(priv->extend_desc))
4143 desc_size = sizeof(struct dma_extended_desc);
4144 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4145 desc_size = sizeof(struct dma_edesc);
4146 else
4147 desc_size = sizeof(struct dma_desc);
4148
4149 /* The own bit must be the latest setting done when prepare the
4150 * descriptor and then barrier is needed to make sure that
4151 * all is coherent before granting the DMA engine.
4152 */
4153 wmb();
4154
4155 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4156 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4157}
4158
4159/**
4160 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4161 * @skb : the socket buffer
4162 * @dev : device pointer
4163 * Description: this is the transmit function that is called on TSO frames
4164 * (support available on GMAC4 and newer chips).
4165 * Diagram below show the ring programming in case of TSO frames:
4166 *
4167 * First Descriptor
4168 * --------
4169 * | DES0 |---> buffer1 = L2/L3/L4 header
4170 * | DES1 |---> TCP Payload (can continue on next descr...)
4171 * | DES2 |---> buffer 1 and 2 len
4172 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4173 * --------
4174 * |
4175 * ...
4176 * |
4177 * --------
4178 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4179 * | DES1 | --|
4180 * | DES2 | --> buffer 1 and 2 len
4181 * | DES3 |
4182 * --------
4183 *
4184 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4185 */
4186static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4187{
4188 struct dma_desc *desc, *first, *mss_desc = NULL;
4189 struct stmmac_priv *priv = netdev_priv(dev);
4190 int tmp_pay_len = 0, first_tx, nfrags;
4191 unsigned int first_entry, tx_packets;
4192 struct stmmac_txq_stats *txq_stats;
4193 struct stmmac_tx_queue *tx_q;
4194 u32 pay_len, mss, queue;
4195 dma_addr_t tso_des, des;
4196 u8 proto_hdr_len, hdr;
4197 bool set_ic;
4198 int i;
4199
4200 /* Always insert VLAN tag to SKB payload for TSO frames.
4201 *
4202 * Never insert VLAN tag by HW, since segments splited by
4203 * TSO engine will be un-tagged by mistake.
4204 */
4205 if (skb_vlan_tag_present(skb)) {
4206 skb = __vlan_hwaccel_push_inside(skb);
4207 if (unlikely(!skb)) {
4208 priv->xstats.tx_dropped++;
4209 return NETDEV_TX_OK;
4210 }
4211 }
4212
4213 nfrags = skb_shinfo(skb)->nr_frags;
4214 queue = skb_get_queue_mapping(skb);
4215
4216 tx_q = &priv->dma_conf.tx_queue[queue];
4217 txq_stats = &priv->xstats.txq_stats[queue];
4218 first_tx = tx_q->cur_tx;
4219
4220 /* Compute header lengths */
4221 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4222 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4223 hdr = sizeof(struct udphdr);
4224 } else {
4225 proto_hdr_len = skb_tcp_all_headers(skb);
4226 hdr = tcp_hdrlen(skb);
4227 }
4228
4229 /* Desc availability based on threshold should be enough safe */
4230 if (unlikely(stmmac_tx_avail(priv, queue) <
4231 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4232 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4233 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4234 queue));
4235 /* This is a hard error, log it. */
4236 netdev_err(priv->dev,
4237 "%s: Tx Ring full when queue awake\n",
4238 __func__);
4239 }
4240 return NETDEV_TX_BUSY;
4241 }
4242
4243 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4244
4245 mss = skb_shinfo(skb)->gso_size;
4246
4247 /* set new MSS value if needed */
4248 if (mss != tx_q->mss) {
4249 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4250 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4251 else
4252 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4253
4254 stmmac_set_mss(priv, mss_desc, mss);
4255 tx_q->mss = mss;
4256 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4257 priv->dma_conf.dma_tx_size);
4258 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4259 }
4260
4261 if (netif_msg_tx_queued(priv)) {
4262 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4263 __func__, hdr, proto_hdr_len, pay_len, mss);
4264 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4265 skb->data_len);
4266 }
4267
4268 first_entry = tx_q->cur_tx;
4269 WARN_ON(tx_q->tx_skbuff[first_entry]);
4270
4271 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272 desc = &tx_q->dma_entx[first_entry].basic;
4273 else
4274 desc = &tx_q->dma_tx[first_entry];
4275 first = desc;
4276
4277 /* first descriptor: fill Headers on Buf1 */
4278 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4279 DMA_TO_DEVICE);
4280 if (dma_mapping_error(priv->device, des))
4281 goto dma_map_err;
4282
4283 if (priv->dma_cap.addr64 <= 32) {
4284 first->des0 = cpu_to_le32(des);
4285
4286 /* Fill start of payload in buff2 of first descriptor */
4287 if (pay_len)
4288 first->des1 = cpu_to_le32(des + proto_hdr_len);
4289
4290 /* If needed take extra descriptors to fill the remaining payload */
4291 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4292 tso_des = des;
4293 } else {
4294 stmmac_set_desc_addr(priv, first, des);
4295 tmp_pay_len = pay_len;
4296 tso_des = des + proto_hdr_len;
4297 pay_len = 0;
4298 }
4299
4300 stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4301
4302 /* In case two or more DMA transmit descriptors are allocated for this
4303 * non-paged SKB data, the DMA buffer address should be saved to
4304 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4305 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4306 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4307 * since the tail areas of the DMA buffer can be accessed by DMA engine
4308 * sooner or later.
4309 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4310 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4311 * this DMA buffer right after the DMA engine completely finishes the
4312 * full buffer transmission.
4313 */
4314 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4315 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4316 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4317 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4318
4319 /* Prepare fragments */
4320 for (i = 0; i < nfrags; i++) {
4321 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4322
4323 des = skb_frag_dma_map(priv->device, frag, 0,
4324 skb_frag_size(frag),
4325 DMA_TO_DEVICE);
4326 if (dma_mapping_error(priv->device, des))
4327 goto dma_map_err;
4328
4329 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4330 (i == nfrags - 1), queue);
4331
4332 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4333 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4334 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4335 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4336 }
4337
4338 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4339
4340 /* Only the last descriptor gets to point to the skb. */
4341 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4342 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4343
4344 /* Manage tx mitigation */
4345 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4346 tx_q->tx_count_frames += tx_packets;
4347
4348 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4349 set_ic = true;
4350 else if (!priv->tx_coal_frames[queue])
4351 set_ic = false;
4352 else if (tx_packets > priv->tx_coal_frames[queue])
4353 set_ic = true;
4354 else if ((tx_q->tx_count_frames %
4355 priv->tx_coal_frames[queue]) < tx_packets)
4356 set_ic = true;
4357 else
4358 set_ic = false;
4359
4360 if (set_ic) {
4361 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4362 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4363 else
4364 desc = &tx_q->dma_tx[tx_q->cur_tx];
4365
4366 tx_q->tx_count_frames = 0;
4367 stmmac_set_tx_ic(priv, desc);
4368 }
4369
4370 /* We've used all descriptors we need for this skb, however,
4371 * advance cur_tx so that it references a fresh descriptor.
4372 * ndo_start_xmit will fill this descriptor the next time it's
4373 * called and stmmac_tx_clean may clean up to this descriptor.
4374 */
4375 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4376
4377 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4378 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4379 __func__);
4380 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4381 }
4382
4383 u64_stats_update_begin(&txq_stats->q_syncp);
4384 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4385 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4386 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4387 if (set_ic)
4388 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4389 u64_stats_update_end(&txq_stats->q_syncp);
4390
4391 if (priv->sarc_type)
4392 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4393
4394 skb_tx_timestamp(skb);
4395
4396 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4397 priv->hwts_tx_en)) {
4398 /* declare that device is doing timestamping */
4399 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4400 stmmac_enable_tx_timestamp(priv, first);
4401 }
4402
4403 /* Complete the first descriptor before granting the DMA */
4404 stmmac_prepare_tso_tx_desc(priv, first, 1,
4405 proto_hdr_len,
4406 pay_len,
4407 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4408 hdr / 4, (skb->len - proto_hdr_len));
4409
4410 /* If context desc is used to change MSS */
4411 if (mss_desc) {
4412 /* Make sure that first descriptor has been completely
4413 * written, including its own bit. This is because MSS is
4414 * actually before first descriptor, so we need to make
4415 * sure that MSS's own bit is the last thing written.
4416 */
4417 dma_wmb();
4418 stmmac_set_tx_owner(priv, mss_desc);
4419 }
4420
4421 if (netif_msg_pktdata(priv)) {
4422 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4423 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4424 tx_q->cur_tx, first, nfrags);
4425 pr_info(">>> frame to be transmitted: ");
4426 print_pkt(skb->data, skb_headlen(skb));
4427 }
4428
4429 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4430
4431 stmmac_flush_tx_descriptors(priv, queue);
4432 stmmac_tx_timer_arm(priv, queue);
4433
4434 return NETDEV_TX_OK;
4435
4436dma_map_err:
4437 dev_err(priv->device, "Tx dma map failed\n");
4438 dev_kfree_skb(skb);
4439 priv->xstats.tx_dropped++;
4440 return NETDEV_TX_OK;
4441}
4442
4443/**
4444 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4445 * @skb: socket buffer to check
4446 *
4447 * Check if a packet has an ethertype that will trigger the IP header checks
4448 * and IP/TCP checksum engine of the stmmac core.
4449 *
4450 * Return: true if the ethertype can trigger the checksum engine, false
4451 * otherwise
4452 */
4453static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4454{
4455 int depth = 0;
4456 __be16 proto;
4457
4458 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4459 &depth);
4460
4461 return (depth <= ETH_HLEN) &&
4462 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4463}
4464
4465/**
4466 * stmmac_xmit - Tx entry point of the driver
4467 * @skb : the socket buffer
4468 * @dev : device pointer
4469 * Description : this is the tx entry point of the driver.
4470 * It programs the chain or the ring and supports oversized frames
4471 * and SG feature.
4472 */
4473static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4474{
4475 unsigned int first_entry, tx_packets, enh_desc;
4476 struct stmmac_priv *priv = netdev_priv(dev);
4477 unsigned int nopaged_len = skb_headlen(skb);
4478 int i, csum_insertion = 0, is_jumbo = 0;
4479 u32 queue = skb_get_queue_mapping(skb);
4480 int nfrags = skb_shinfo(skb)->nr_frags;
4481 int gso = skb_shinfo(skb)->gso_type;
4482 struct stmmac_txq_stats *txq_stats;
4483 struct dma_edesc *tbs_desc = NULL;
4484 struct dma_desc *desc, *first;
4485 struct stmmac_tx_queue *tx_q;
4486 bool has_vlan, set_ic;
4487 int entry, first_tx;
4488 dma_addr_t des;
4489
4490 tx_q = &priv->dma_conf.tx_queue[queue];
4491 txq_stats = &priv->xstats.txq_stats[queue];
4492 first_tx = tx_q->cur_tx;
4493
4494 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4495 stmmac_disable_eee_mode(priv);
4496
4497 /* Manage oversized TCP frames for GMAC4 device */
4498 if (skb_is_gso(skb) && priv->tso) {
4499 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4500 return stmmac_tso_xmit(skb, dev);
4501 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4502 return stmmac_tso_xmit(skb, dev);
4503 }
4504
4505 if (priv->est && priv->est->enable &&
4506 priv->est->max_sdu[queue] &&
4507 skb->len > priv->est->max_sdu[queue]){
4508 priv->xstats.max_sdu_txq_drop[queue]++;
4509 goto max_sdu_err;
4510 }
4511
4512 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4513 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4514 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4515 queue));
4516 /* This is a hard error, log it. */
4517 netdev_err(priv->dev,
4518 "%s: Tx Ring full when queue awake\n",
4519 __func__);
4520 }
4521 return NETDEV_TX_BUSY;
4522 }
4523
4524 /* Check if VLAN can be inserted by HW */
4525 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4526
4527 entry = tx_q->cur_tx;
4528 first_entry = entry;
4529 WARN_ON(tx_q->tx_skbuff[first_entry]);
4530
4531 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4532 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4533 * queues. In that case, checksum offloading for those queues that don't
4534 * support tx coe needs to fallback to software checksum calculation.
4535 *
4536 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4537 * also have to be checksummed in software.
4538 */
4539 if (csum_insertion &&
4540 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4541 !stmmac_has_ip_ethertype(skb))) {
4542 if (unlikely(skb_checksum_help(skb)))
4543 goto dma_map_err;
4544 csum_insertion = !csum_insertion;
4545 }
4546
4547 if (likely(priv->extend_desc))
4548 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4549 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4550 desc = &tx_q->dma_entx[entry].basic;
4551 else
4552 desc = tx_q->dma_tx + entry;
4553
4554 first = desc;
4555
4556 if (has_vlan)
4557 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4558
4559 enh_desc = priv->plat->enh_desc;
4560 /* To program the descriptors according to the size of the frame */
4561 if (enh_desc)
4562 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4563
4564 if (unlikely(is_jumbo)) {
4565 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4566 if (unlikely(entry < 0) && (entry != -EINVAL))
4567 goto dma_map_err;
4568 }
4569
4570 for (i = 0; i < nfrags; i++) {
4571 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4572 int len = skb_frag_size(frag);
4573 bool last_segment = (i == (nfrags - 1));
4574
4575 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4576 WARN_ON(tx_q->tx_skbuff[entry]);
4577
4578 if (likely(priv->extend_desc))
4579 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4580 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4581 desc = &tx_q->dma_entx[entry].basic;
4582 else
4583 desc = tx_q->dma_tx + entry;
4584
4585 des = skb_frag_dma_map(priv->device, frag, 0, len,
4586 DMA_TO_DEVICE);
4587 if (dma_mapping_error(priv->device, des))
4588 goto dma_map_err; /* should reuse desc w/o issues */
4589
4590 tx_q->tx_skbuff_dma[entry].buf = des;
4591
4592 stmmac_set_desc_addr(priv, desc, des);
4593
4594 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4595 tx_q->tx_skbuff_dma[entry].len = len;
4596 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4597 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4598
4599 /* Prepare the descriptor and set the own bit too */
4600 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4601 priv->mode, 1, last_segment, skb->len);
4602 }
4603
4604 /* Only the last descriptor gets to point to the skb. */
4605 tx_q->tx_skbuff[entry] = skb;
4606 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4607
4608 /* According to the coalesce parameter the IC bit for the latest
4609 * segment is reset and the timer re-started to clean the tx status.
4610 * This approach takes care about the fragments: desc is the first
4611 * element in case of no SG.
4612 */
4613 tx_packets = (entry + 1) - first_tx;
4614 tx_q->tx_count_frames += tx_packets;
4615
4616 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4617 set_ic = true;
4618 else if (!priv->tx_coal_frames[queue])
4619 set_ic = false;
4620 else if (tx_packets > priv->tx_coal_frames[queue])
4621 set_ic = true;
4622 else if ((tx_q->tx_count_frames %
4623 priv->tx_coal_frames[queue]) < tx_packets)
4624 set_ic = true;
4625 else
4626 set_ic = false;
4627
4628 if (set_ic) {
4629 if (likely(priv->extend_desc))
4630 desc = &tx_q->dma_etx[entry].basic;
4631 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4632 desc = &tx_q->dma_entx[entry].basic;
4633 else
4634 desc = &tx_q->dma_tx[entry];
4635
4636 tx_q->tx_count_frames = 0;
4637 stmmac_set_tx_ic(priv, desc);
4638 }
4639
4640 /* We've used all descriptors we need for this skb, however,
4641 * advance cur_tx so that it references a fresh descriptor.
4642 * ndo_start_xmit will fill this descriptor the next time it's
4643 * called and stmmac_tx_clean may clean up to this descriptor.
4644 */
4645 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4646 tx_q->cur_tx = entry;
4647
4648 if (netif_msg_pktdata(priv)) {
4649 netdev_dbg(priv->dev,
4650 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4651 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4652 entry, first, nfrags);
4653
4654 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4655 print_pkt(skb->data, skb->len);
4656 }
4657
4658 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4659 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4660 __func__);
4661 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4662 }
4663
4664 u64_stats_update_begin(&txq_stats->q_syncp);
4665 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4666 if (set_ic)
4667 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4668 u64_stats_update_end(&txq_stats->q_syncp);
4669
4670 if (priv->sarc_type)
4671 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4672
4673 skb_tx_timestamp(skb);
4674
4675 /* Ready to fill the first descriptor and set the OWN bit w/o any
4676 * problems because all the descriptors are actually ready to be
4677 * passed to the DMA engine.
4678 */
4679 if (likely(!is_jumbo)) {
4680 bool last_segment = (nfrags == 0);
4681
4682 des = dma_map_single(priv->device, skb->data,
4683 nopaged_len, DMA_TO_DEVICE);
4684 if (dma_mapping_error(priv->device, des))
4685 goto dma_map_err;
4686
4687 tx_q->tx_skbuff_dma[first_entry].buf = des;
4688 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4689 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4690
4691 stmmac_set_desc_addr(priv, first, des);
4692
4693 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4694 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4695
4696 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4697 priv->hwts_tx_en)) {
4698 /* declare that device is doing timestamping */
4699 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4700 stmmac_enable_tx_timestamp(priv, first);
4701 }
4702
4703 /* Prepare the first descriptor setting the OWN bit too */
4704 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4705 csum_insertion, priv->mode, 0, last_segment,
4706 skb->len);
4707 }
4708
4709 if (tx_q->tbs & STMMAC_TBS_EN) {
4710 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4711
4712 tbs_desc = &tx_q->dma_entx[first_entry];
4713 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4714 }
4715
4716 stmmac_set_tx_owner(priv, first);
4717
4718 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4719
4720 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4721
4722 stmmac_flush_tx_descriptors(priv, queue);
4723 stmmac_tx_timer_arm(priv, queue);
4724
4725 return NETDEV_TX_OK;
4726
4727dma_map_err:
4728 netdev_err(priv->dev, "Tx DMA map failed\n");
4729max_sdu_err:
4730 dev_kfree_skb(skb);
4731 priv->xstats.tx_dropped++;
4732 return NETDEV_TX_OK;
4733}
4734
4735static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4736{
4737 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4738 __be16 vlan_proto = veth->h_vlan_proto;
4739 u16 vlanid;
4740
4741 if ((vlan_proto == htons(ETH_P_8021Q) &&
4742 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4743 (vlan_proto == htons(ETH_P_8021AD) &&
4744 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4745 /* pop the vlan tag */
4746 vlanid = ntohs(veth->h_vlan_TCI);
4747 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4748 skb_pull(skb, VLAN_HLEN);
4749 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4750 }
4751}
4752
4753/**
4754 * stmmac_rx_refill - refill used skb preallocated buffers
4755 * @priv: driver private structure
4756 * @queue: RX queue index
4757 * Description : this is to reallocate the skb for the reception process
4758 * that is based on zero-copy.
4759 */
4760static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4761{
4762 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4763 int dirty = stmmac_rx_dirty(priv, queue);
4764 unsigned int entry = rx_q->dirty_rx;
4765 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4766
4767 if (priv->dma_cap.host_dma_width <= 32)
4768 gfp |= GFP_DMA32;
4769
4770 while (dirty-- > 0) {
4771 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4772 struct dma_desc *p;
4773 bool use_rx_wd;
4774
4775 if (priv->extend_desc)
4776 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4777 else
4778 p = rx_q->dma_rx + entry;
4779
4780 if (!buf->page) {
4781 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4782 if (!buf->page)
4783 break;
4784 }
4785
4786 if (priv->sph && !buf->sec_page) {
4787 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4788 if (!buf->sec_page)
4789 break;
4790
4791 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4792 }
4793
4794 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4795
4796 stmmac_set_desc_addr(priv, p, buf->addr);
4797 if (priv->sph)
4798 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4799 else
4800 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4801 stmmac_refill_desc3(priv, rx_q, p);
4802
4803 rx_q->rx_count_frames++;
4804 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4805 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4806 rx_q->rx_count_frames = 0;
4807
4808 use_rx_wd = !priv->rx_coal_frames[queue];
4809 use_rx_wd |= rx_q->rx_count_frames > 0;
4810 if (!priv->use_riwt)
4811 use_rx_wd = false;
4812
4813 dma_wmb();
4814 stmmac_set_rx_owner(priv, p, use_rx_wd);
4815
4816 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4817 }
4818 rx_q->dirty_rx = entry;
4819 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4820 (rx_q->dirty_rx * sizeof(struct dma_desc));
4821 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4822}
4823
4824static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4825 struct dma_desc *p,
4826 int status, unsigned int len)
4827{
4828 unsigned int plen = 0, hlen = 0;
4829 int coe = priv->hw->rx_csum;
4830
4831 /* Not first descriptor, buffer is always zero */
4832 if (priv->sph && len)
4833 return 0;
4834
4835 /* First descriptor, get split header length */
4836 stmmac_get_rx_header_len(priv, p, &hlen);
4837 if (priv->sph && hlen) {
4838 priv->xstats.rx_split_hdr_pkt_n++;
4839 return hlen;
4840 }
4841
4842 /* First descriptor, not last descriptor and not split header */
4843 if (status & rx_not_ls)
4844 return priv->dma_conf.dma_buf_sz;
4845
4846 plen = stmmac_get_rx_frame_len(priv, p, coe);
4847
4848 /* First descriptor and last descriptor and not split header */
4849 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4850}
4851
4852static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4853 struct dma_desc *p,
4854 int status, unsigned int len)
4855{
4856 int coe = priv->hw->rx_csum;
4857 unsigned int plen = 0;
4858
4859 /* Not split header, buffer is not available */
4860 if (!priv->sph)
4861 return 0;
4862
4863 /* Not last descriptor */
4864 if (status & rx_not_ls)
4865 return priv->dma_conf.dma_buf_sz;
4866
4867 plen = stmmac_get_rx_frame_len(priv, p, coe);
4868
4869 /* Last descriptor */
4870 return plen - len;
4871}
4872
4873static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4874 struct xdp_frame *xdpf, bool dma_map)
4875{
4876 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4877 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4878 unsigned int entry = tx_q->cur_tx;
4879 struct dma_desc *tx_desc;
4880 dma_addr_t dma_addr;
4881 bool set_ic;
4882
4883 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4884 return STMMAC_XDP_CONSUMED;
4885
4886 if (priv->est && priv->est->enable &&
4887 priv->est->max_sdu[queue] &&
4888 xdpf->len > priv->est->max_sdu[queue]) {
4889 priv->xstats.max_sdu_txq_drop[queue]++;
4890 return STMMAC_XDP_CONSUMED;
4891 }
4892
4893 if (likely(priv->extend_desc))
4894 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4895 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4896 tx_desc = &tx_q->dma_entx[entry].basic;
4897 else
4898 tx_desc = tx_q->dma_tx + entry;
4899
4900 if (dma_map) {
4901 dma_addr = dma_map_single(priv->device, xdpf->data,
4902 xdpf->len, DMA_TO_DEVICE);
4903 if (dma_mapping_error(priv->device, dma_addr))
4904 return STMMAC_XDP_CONSUMED;
4905
4906 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4907 } else {
4908 struct page *page = virt_to_page(xdpf->data);
4909
4910 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4911 xdpf->headroom;
4912 dma_sync_single_for_device(priv->device, dma_addr,
4913 xdpf->len, DMA_BIDIRECTIONAL);
4914
4915 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4916 }
4917
4918 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4919 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4920 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4921 tx_q->tx_skbuff_dma[entry].last_segment = true;
4922 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4923
4924 tx_q->xdpf[entry] = xdpf;
4925
4926 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4927
4928 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4929 true, priv->mode, true, true,
4930 xdpf->len);
4931
4932 tx_q->tx_count_frames++;
4933
4934 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4935 set_ic = true;
4936 else
4937 set_ic = false;
4938
4939 if (set_ic) {
4940 tx_q->tx_count_frames = 0;
4941 stmmac_set_tx_ic(priv, tx_desc);
4942 u64_stats_update_begin(&txq_stats->q_syncp);
4943 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4944 u64_stats_update_end(&txq_stats->q_syncp);
4945 }
4946
4947 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4948
4949 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4950 tx_q->cur_tx = entry;
4951
4952 return STMMAC_XDP_TX;
4953}
4954
4955static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4956 int cpu)
4957{
4958 int index = cpu;
4959
4960 if (unlikely(index < 0))
4961 index = 0;
4962
4963 while (index >= priv->plat->tx_queues_to_use)
4964 index -= priv->plat->tx_queues_to_use;
4965
4966 return index;
4967}
4968
4969static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4970 struct xdp_buff *xdp)
4971{
4972 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4973 int cpu = smp_processor_id();
4974 struct netdev_queue *nq;
4975 int queue;
4976 int res;
4977
4978 if (unlikely(!xdpf))
4979 return STMMAC_XDP_CONSUMED;
4980
4981 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4982 nq = netdev_get_tx_queue(priv->dev, queue);
4983
4984 __netif_tx_lock(nq, cpu);
4985 /* Avoids TX time-out as we are sharing with slow path */
4986 txq_trans_cond_update(nq);
4987
4988 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4989 if (res == STMMAC_XDP_TX)
4990 stmmac_flush_tx_descriptors(priv, queue);
4991
4992 __netif_tx_unlock(nq);
4993
4994 return res;
4995}
4996
4997static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4998 struct bpf_prog *prog,
4999 struct xdp_buff *xdp)
5000{
5001 u32 act;
5002 int res;
5003
5004 act = bpf_prog_run_xdp(prog, xdp);
5005 switch (act) {
5006 case XDP_PASS:
5007 res = STMMAC_XDP_PASS;
5008 break;
5009 case XDP_TX:
5010 res = stmmac_xdp_xmit_back(priv, xdp);
5011 break;
5012 case XDP_REDIRECT:
5013 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5014 res = STMMAC_XDP_CONSUMED;
5015 else
5016 res = STMMAC_XDP_REDIRECT;
5017 break;
5018 default:
5019 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5020 fallthrough;
5021 case XDP_ABORTED:
5022 trace_xdp_exception(priv->dev, prog, act);
5023 fallthrough;
5024 case XDP_DROP:
5025 res = STMMAC_XDP_CONSUMED;
5026 break;
5027 }
5028
5029 return res;
5030}
5031
5032static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5033 struct xdp_buff *xdp)
5034{
5035 struct bpf_prog *prog;
5036 int res;
5037
5038 prog = READ_ONCE(priv->xdp_prog);
5039 if (!prog) {
5040 res = STMMAC_XDP_PASS;
5041 goto out;
5042 }
5043
5044 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5045out:
5046 return ERR_PTR(-res);
5047}
5048
5049static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5050 int xdp_status)
5051{
5052 int cpu = smp_processor_id();
5053 int queue;
5054
5055 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5056
5057 if (xdp_status & STMMAC_XDP_TX)
5058 stmmac_tx_timer_arm(priv, queue);
5059
5060 if (xdp_status & STMMAC_XDP_REDIRECT)
5061 xdp_do_flush();
5062}
5063
5064static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5065 struct xdp_buff *xdp)
5066{
5067 unsigned int metasize = xdp->data - xdp->data_meta;
5068 unsigned int datasize = xdp->data_end - xdp->data;
5069 struct sk_buff *skb;
5070
5071 skb = napi_alloc_skb(&ch->rxtx_napi,
5072 xdp->data_end - xdp->data_hard_start);
5073 if (unlikely(!skb))
5074 return NULL;
5075
5076 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5077 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5078 if (metasize)
5079 skb_metadata_set(skb, metasize);
5080
5081 return skb;
5082}
5083
5084static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5085 struct dma_desc *p, struct dma_desc *np,
5086 struct xdp_buff *xdp)
5087{
5088 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5089 struct stmmac_channel *ch = &priv->channel[queue];
5090 unsigned int len = xdp->data_end - xdp->data;
5091 enum pkt_hash_types hash_type;
5092 int coe = priv->hw->rx_csum;
5093 struct sk_buff *skb;
5094 u32 hash;
5095
5096 skb = stmmac_construct_skb_zc(ch, xdp);
5097 if (!skb) {
5098 priv->xstats.rx_dropped++;
5099 return;
5100 }
5101
5102 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5103 if (priv->hw->hw_vlan_en)
5104 /* MAC level stripping. */
5105 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5106 else
5107 /* Driver level stripping. */
5108 stmmac_rx_vlan(priv->dev, skb);
5109 skb->protocol = eth_type_trans(skb, priv->dev);
5110
5111 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5112 skb_checksum_none_assert(skb);
5113 else
5114 skb->ip_summed = CHECKSUM_UNNECESSARY;
5115
5116 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5117 skb_set_hash(skb, hash, hash_type);
5118
5119 skb_record_rx_queue(skb, queue);
5120 napi_gro_receive(&ch->rxtx_napi, skb);
5121
5122 u64_stats_update_begin(&rxq_stats->napi_syncp);
5123 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5124 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5125 u64_stats_update_end(&rxq_stats->napi_syncp);
5126}
5127
5128static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5129{
5130 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5131 unsigned int entry = rx_q->dirty_rx;
5132 struct dma_desc *rx_desc = NULL;
5133 bool ret = true;
5134
5135 budget = min(budget, stmmac_rx_dirty(priv, queue));
5136
5137 while (budget-- > 0 && entry != rx_q->cur_rx) {
5138 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5139 dma_addr_t dma_addr;
5140 bool use_rx_wd;
5141
5142 if (!buf->xdp) {
5143 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5144 if (!buf->xdp) {
5145 ret = false;
5146 break;
5147 }
5148 }
5149
5150 if (priv->extend_desc)
5151 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5152 else
5153 rx_desc = rx_q->dma_rx + entry;
5154
5155 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5156 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5157 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5158 stmmac_refill_desc3(priv, rx_q, rx_desc);
5159
5160 rx_q->rx_count_frames++;
5161 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5162 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5163 rx_q->rx_count_frames = 0;
5164
5165 use_rx_wd = !priv->rx_coal_frames[queue];
5166 use_rx_wd |= rx_q->rx_count_frames > 0;
5167 if (!priv->use_riwt)
5168 use_rx_wd = false;
5169
5170 dma_wmb();
5171 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5172
5173 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5174 }
5175
5176 if (rx_desc) {
5177 rx_q->dirty_rx = entry;
5178 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5179 (rx_q->dirty_rx * sizeof(struct dma_desc));
5180 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5181 }
5182
5183 return ret;
5184}
5185
5186static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5187{
5188 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5189 * to represent incoming packet, whereas cb field in the same structure
5190 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5191 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5192 */
5193 return (struct stmmac_xdp_buff *)xdp;
5194}
5195
5196static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5197{
5198 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5199 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5200 unsigned int count = 0, error = 0, len = 0;
5201 int dirty = stmmac_rx_dirty(priv, queue);
5202 unsigned int next_entry = rx_q->cur_rx;
5203 u32 rx_errors = 0, rx_dropped = 0;
5204 unsigned int desc_size;
5205 struct bpf_prog *prog;
5206 bool failure = false;
5207 int xdp_status = 0;
5208 int status = 0;
5209
5210 if (netif_msg_rx_status(priv)) {
5211 void *rx_head;
5212
5213 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5214 if (priv->extend_desc) {
5215 rx_head = (void *)rx_q->dma_erx;
5216 desc_size = sizeof(struct dma_extended_desc);
5217 } else {
5218 rx_head = (void *)rx_q->dma_rx;
5219 desc_size = sizeof(struct dma_desc);
5220 }
5221
5222 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5223 rx_q->dma_rx_phy, desc_size);
5224 }
5225 while (count < limit) {
5226 struct stmmac_rx_buffer *buf;
5227 struct stmmac_xdp_buff *ctx;
5228 unsigned int buf1_len = 0;
5229 struct dma_desc *np, *p;
5230 int entry;
5231 int res;
5232
5233 if (!count && rx_q->state_saved) {
5234 error = rx_q->state.error;
5235 len = rx_q->state.len;
5236 } else {
5237 rx_q->state_saved = false;
5238 error = 0;
5239 len = 0;
5240 }
5241
5242 if (count >= limit)
5243 break;
5244
5245read_again:
5246 buf1_len = 0;
5247 entry = next_entry;
5248 buf = &rx_q->buf_pool[entry];
5249
5250 if (dirty >= STMMAC_RX_FILL_BATCH) {
5251 failure = failure ||
5252 !stmmac_rx_refill_zc(priv, queue, dirty);
5253 dirty = 0;
5254 }
5255
5256 if (priv->extend_desc)
5257 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5258 else
5259 p = rx_q->dma_rx + entry;
5260
5261 /* read the status of the incoming frame */
5262 status = stmmac_rx_status(priv, &priv->xstats, p);
5263 /* check if managed by the DMA otherwise go ahead */
5264 if (unlikely(status & dma_own))
5265 break;
5266
5267 /* Prefetch the next RX descriptor */
5268 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5269 priv->dma_conf.dma_rx_size);
5270 next_entry = rx_q->cur_rx;
5271
5272 if (priv->extend_desc)
5273 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5274 else
5275 np = rx_q->dma_rx + next_entry;
5276
5277 prefetch(np);
5278
5279 /* Ensure a valid XSK buffer before proceed */
5280 if (!buf->xdp)
5281 break;
5282
5283 if (priv->extend_desc)
5284 stmmac_rx_extended_status(priv, &priv->xstats,
5285 rx_q->dma_erx + entry);
5286 if (unlikely(status == discard_frame)) {
5287 xsk_buff_free(buf->xdp);
5288 buf->xdp = NULL;
5289 dirty++;
5290 error = 1;
5291 if (!priv->hwts_rx_en)
5292 rx_errors++;
5293 }
5294
5295 if (unlikely(error && (status & rx_not_ls)))
5296 goto read_again;
5297 if (unlikely(error)) {
5298 count++;
5299 continue;
5300 }
5301
5302 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5303 if (likely(status & rx_not_ls)) {
5304 xsk_buff_free(buf->xdp);
5305 buf->xdp = NULL;
5306 dirty++;
5307 count++;
5308 goto read_again;
5309 }
5310
5311 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5312 ctx->priv = priv;
5313 ctx->desc = p;
5314 ctx->ndesc = np;
5315
5316 /* XDP ZC Frame only support primary buffers for now */
5317 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5318 len += buf1_len;
5319
5320 /* ACS is disabled; strip manually. */
5321 if (likely(!(status & rx_not_ls))) {
5322 buf1_len -= ETH_FCS_LEN;
5323 len -= ETH_FCS_LEN;
5324 }
5325
5326 /* RX buffer is good and fit into a XSK pool buffer */
5327 buf->xdp->data_end = buf->xdp->data + buf1_len;
5328 xsk_buff_dma_sync_for_cpu(buf->xdp);
5329
5330 prog = READ_ONCE(priv->xdp_prog);
5331 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5332
5333 switch (res) {
5334 case STMMAC_XDP_PASS:
5335 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5336 xsk_buff_free(buf->xdp);
5337 break;
5338 case STMMAC_XDP_CONSUMED:
5339 xsk_buff_free(buf->xdp);
5340 rx_dropped++;
5341 break;
5342 case STMMAC_XDP_TX:
5343 case STMMAC_XDP_REDIRECT:
5344 xdp_status |= res;
5345 break;
5346 }
5347
5348 buf->xdp = NULL;
5349 dirty++;
5350 count++;
5351 }
5352
5353 if (status & rx_not_ls) {
5354 rx_q->state_saved = true;
5355 rx_q->state.error = error;
5356 rx_q->state.len = len;
5357 }
5358
5359 stmmac_finalize_xdp_rx(priv, xdp_status);
5360
5361 u64_stats_update_begin(&rxq_stats->napi_syncp);
5362 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5363 u64_stats_update_end(&rxq_stats->napi_syncp);
5364
5365 priv->xstats.rx_dropped += rx_dropped;
5366 priv->xstats.rx_errors += rx_errors;
5367
5368 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5369 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5370 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5371 else
5372 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5373
5374 return (int)count;
5375 }
5376
5377 return failure ? limit : (int)count;
5378}
5379
5380/**
5381 * stmmac_rx - manage the receive process
5382 * @priv: driver private structure
5383 * @limit: napi bugget
5384 * @queue: RX queue index.
5385 * Description : this the function called by the napi poll method.
5386 * It gets all the frames inside the ring.
5387 */
5388static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5389{
5390 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5391 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5392 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5393 struct stmmac_channel *ch = &priv->channel[queue];
5394 unsigned int count = 0, error = 0, len = 0;
5395 int status = 0, coe = priv->hw->rx_csum;
5396 unsigned int next_entry = rx_q->cur_rx;
5397 enum dma_data_direction dma_dir;
5398 unsigned int desc_size;
5399 struct sk_buff *skb = NULL;
5400 struct stmmac_xdp_buff ctx;
5401 int xdp_status = 0;
5402 int buf_sz;
5403
5404 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5405 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5406 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5407
5408 if (netif_msg_rx_status(priv)) {
5409 void *rx_head;
5410
5411 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5412 if (priv->extend_desc) {
5413 rx_head = (void *)rx_q->dma_erx;
5414 desc_size = sizeof(struct dma_extended_desc);
5415 } else {
5416 rx_head = (void *)rx_q->dma_rx;
5417 desc_size = sizeof(struct dma_desc);
5418 }
5419
5420 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5421 rx_q->dma_rx_phy, desc_size);
5422 }
5423 while (count < limit) {
5424 unsigned int buf1_len = 0, buf2_len = 0;
5425 enum pkt_hash_types hash_type;
5426 struct stmmac_rx_buffer *buf;
5427 struct dma_desc *np, *p;
5428 int entry;
5429 u32 hash;
5430
5431 if (!count && rx_q->state_saved) {
5432 skb = rx_q->state.skb;
5433 error = rx_q->state.error;
5434 len = rx_q->state.len;
5435 } else {
5436 rx_q->state_saved = false;
5437 skb = NULL;
5438 error = 0;
5439 len = 0;
5440 }
5441
5442read_again:
5443 if (count >= limit)
5444 break;
5445
5446 buf1_len = 0;
5447 buf2_len = 0;
5448 entry = next_entry;
5449 buf = &rx_q->buf_pool[entry];
5450
5451 if (priv->extend_desc)
5452 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5453 else
5454 p = rx_q->dma_rx + entry;
5455
5456 /* read the status of the incoming frame */
5457 status = stmmac_rx_status(priv, &priv->xstats, p);
5458 /* check if managed by the DMA otherwise go ahead */
5459 if (unlikely(status & dma_own))
5460 break;
5461
5462 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5463 priv->dma_conf.dma_rx_size);
5464 next_entry = rx_q->cur_rx;
5465
5466 if (priv->extend_desc)
5467 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5468 else
5469 np = rx_q->dma_rx + next_entry;
5470
5471 prefetch(np);
5472
5473 if (priv->extend_desc)
5474 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5475 if (unlikely(status == discard_frame)) {
5476 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5477 buf->page = NULL;
5478 error = 1;
5479 if (!priv->hwts_rx_en)
5480 rx_errors++;
5481 }
5482
5483 if (unlikely(error && (status & rx_not_ls)))
5484 goto read_again;
5485 if (unlikely(error)) {
5486 dev_kfree_skb(skb);
5487 skb = NULL;
5488 count++;
5489 continue;
5490 }
5491
5492 /* Buffer is good. Go on. */
5493
5494 prefetch(page_address(buf->page) + buf->page_offset);
5495 if (buf->sec_page)
5496 prefetch(page_address(buf->sec_page));
5497
5498 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5499 len += buf1_len;
5500 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5501 len += buf2_len;
5502
5503 /* ACS is disabled; strip manually. */
5504 if (likely(!(status & rx_not_ls))) {
5505 if (buf2_len) {
5506 buf2_len -= ETH_FCS_LEN;
5507 len -= ETH_FCS_LEN;
5508 } else if (buf1_len) {
5509 buf1_len -= ETH_FCS_LEN;
5510 len -= ETH_FCS_LEN;
5511 }
5512 }
5513
5514 if (!skb) {
5515 unsigned int pre_len, sync_len;
5516
5517 dma_sync_single_for_cpu(priv->device, buf->addr,
5518 buf1_len, dma_dir);
5519
5520 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5521 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5522 buf->page_offset, buf1_len, true);
5523
5524 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5525 buf->page_offset;
5526
5527 ctx.priv = priv;
5528 ctx.desc = p;
5529 ctx.ndesc = np;
5530
5531 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5532 /* Due xdp_adjust_tail: DMA sync for_device
5533 * cover max len CPU touch
5534 */
5535 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5536 buf->page_offset;
5537 sync_len = max(sync_len, pre_len);
5538
5539 /* For Not XDP_PASS verdict */
5540 if (IS_ERR(skb)) {
5541 unsigned int xdp_res = -PTR_ERR(skb);
5542
5543 if (xdp_res & STMMAC_XDP_CONSUMED) {
5544 page_pool_put_page(rx_q->page_pool,
5545 virt_to_head_page(ctx.xdp.data),
5546 sync_len, true);
5547 buf->page = NULL;
5548 rx_dropped++;
5549
5550 /* Clear skb as it was set as
5551 * status by XDP program.
5552 */
5553 skb = NULL;
5554
5555 if (unlikely((status & rx_not_ls)))
5556 goto read_again;
5557
5558 count++;
5559 continue;
5560 } else if (xdp_res & (STMMAC_XDP_TX |
5561 STMMAC_XDP_REDIRECT)) {
5562 xdp_status |= xdp_res;
5563 buf->page = NULL;
5564 skb = NULL;
5565 count++;
5566 continue;
5567 }
5568 }
5569 }
5570
5571 if (!skb) {
5572 /* XDP program may expand or reduce tail */
5573 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5574
5575 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5576 if (!skb) {
5577 rx_dropped++;
5578 count++;
5579 goto drain_data;
5580 }
5581
5582 /* XDP program may adjust header */
5583 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5584 skb_put(skb, buf1_len);
5585
5586 /* Data payload copied into SKB, page ready for recycle */
5587 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5588 buf->page = NULL;
5589 } else if (buf1_len) {
5590 dma_sync_single_for_cpu(priv->device, buf->addr,
5591 buf1_len, dma_dir);
5592 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5593 buf->page, buf->page_offset, buf1_len,
5594 priv->dma_conf.dma_buf_sz);
5595
5596 /* Data payload appended into SKB */
5597 skb_mark_for_recycle(skb);
5598 buf->page = NULL;
5599 }
5600
5601 if (buf2_len) {
5602 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5603 buf2_len, dma_dir);
5604 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5605 buf->sec_page, 0, buf2_len,
5606 priv->dma_conf.dma_buf_sz);
5607
5608 /* Data payload appended into SKB */
5609 skb_mark_for_recycle(skb);
5610 buf->sec_page = NULL;
5611 }
5612
5613drain_data:
5614 if (likely(status & rx_not_ls))
5615 goto read_again;
5616 if (!skb)
5617 continue;
5618
5619 /* Got entire packet into SKB. Finish it. */
5620
5621 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5622
5623 if (priv->hw->hw_vlan_en)
5624 /* MAC level stripping. */
5625 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5626 else
5627 /* Driver level stripping. */
5628 stmmac_rx_vlan(priv->dev, skb);
5629
5630 skb->protocol = eth_type_trans(skb, priv->dev);
5631
5632 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5633 skb_checksum_none_assert(skb);
5634 else
5635 skb->ip_summed = CHECKSUM_UNNECESSARY;
5636
5637 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5638 skb_set_hash(skb, hash, hash_type);
5639
5640 skb_record_rx_queue(skb, queue);
5641 napi_gro_receive(&ch->rx_napi, skb);
5642 skb = NULL;
5643
5644 rx_packets++;
5645 rx_bytes += len;
5646 count++;
5647 }
5648
5649 if (status & rx_not_ls || skb) {
5650 rx_q->state_saved = true;
5651 rx_q->state.skb = skb;
5652 rx_q->state.error = error;
5653 rx_q->state.len = len;
5654 }
5655
5656 stmmac_finalize_xdp_rx(priv, xdp_status);
5657
5658 stmmac_rx_refill(priv, queue);
5659
5660 u64_stats_update_begin(&rxq_stats->napi_syncp);
5661 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5662 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5663 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5664 u64_stats_update_end(&rxq_stats->napi_syncp);
5665
5666 priv->xstats.rx_dropped += rx_dropped;
5667 priv->xstats.rx_errors += rx_errors;
5668
5669 return count;
5670}
5671
5672static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5673{
5674 struct stmmac_channel *ch =
5675 container_of(napi, struct stmmac_channel, rx_napi);
5676 struct stmmac_priv *priv = ch->priv_data;
5677 struct stmmac_rxq_stats *rxq_stats;
5678 u32 chan = ch->index;
5679 int work_done;
5680
5681 rxq_stats = &priv->xstats.rxq_stats[chan];
5682 u64_stats_update_begin(&rxq_stats->napi_syncp);
5683 u64_stats_inc(&rxq_stats->napi.poll);
5684 u64_stats_update_end(&rxq_stats->napi_syncp);
5685
5686 work_done = stmmac_rx(priv, budget, chan);
5687 if (work_done < budget && napi_complete_done(napi, work_done)) {
5688 unsigned long flags;
5689
5690 spin_lock_irqsave(&ch->lock, flags);
5691 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5692 spin_unlock_irqrestore(&ch->lock, flags);
5693 }
5694
5695 return work_done;
5696}
5697
5698static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5699{
5700 struct stmmac_channel *ch =
5701 container_of(napi, struct stmmac_channel, tx_napi);
5702 struct stmmac_priv *priv = ch->priv_data;
5703 struct stmmac_txq_stats *txq_stats;
5704 bool pending_packets = false;
5705 u32 chan = ch->index;
5706 int work_done;
5707
5708 txq_stats = &priv->xstats.txq_stats[chan];
5709 u64_stats_update_begin(&txq_stats->napi_syncp);
5710 u64_stats_inc(&txq_stats->napi.poll);
5711 u64_stats_update_end(&txq_stats->napi_syncp);
5712
5713 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5714 work_done = min(work_done, budget);
5715
5716 if (work_done < budget && napi_complete_done(napi, work_done)) {
5717 unsigned long flags;
5718
5719 spin_lock_irqsave(&ch->lock, flags);
5720 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5721 spin_unlock_irqrestore(&ch->lock, flags);
5722 }
5723
5724 /* TX still have packet to handle, check if we need to arm tx timer */
5725 if (pending_packets)
5726 stmmac_tx_timer_arm(priv, chan);
5727
5728 return work_done;
5729}
5730
5731static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5732{
5733 struct stmmac_channel *ch =
5734 container_of(napi, struct stmmac_channel, rxtx_napi);
5735 struct stmmac_priv *priv = ch->priv_data;
5736 bool tx_pending_packets = false;
5737 int rx_done, tx_done, rxtx_done;
5738 struct stmmac_rxq_stats *rxq_stats;
5739 struct stmmac_txq_stats *txq_stats;
5740 u32 chan = ch->index;
5741
5742 rxq_stats = &priv->xstats.rxq_stats[chan];
5743 u64_stats_update_begin(&rxq_stats->napi_syncp);
5744 u64_stats_inc(&rxq_stats->napi.poll);
5745 u64_stats_update_end(&rxq_stats->napi_syncp);
5746
5747 txq_stats = &priv->xstats.txq_stats[chan];
5748 u64_stats_update_begin(&txq_stats->napi_syncp);
5749 u64_stats_inc(&txq_stats->napi.poll);
5750 u64_stats_update_end(&txq_stats->napi_syncp);
5751
5752 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5753 tx_done = min(tx_done, budget);
5754
5755 rx_done = stmmac_rx_zc(priv, budget, chan);
5756
5757 rxtx_done = max(tx_done, rx_done);
5758
5759 /* If either TX or RX work is not complete, return budget
5760 * and keep pooling
5761 */
5762 if (rxtx_done >= budget)
5763 return budget;
5764
5765 /* all work done, exit the polling mode */
5766 if (napi_complete_done(napi, rxtx_done)) {
5767 unsigned long flags;
5768
5769 spin_lock_irqsave(&ch->lock, flags);
5770 /* Both RX and TX work done are compelte,
5771 * so enable both RX & TX IRQs.
5772 */
5773 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5774 spin_unlock_irqrestore(&ch->lock, flags);
5775 }
5776
5777 /* TX still have packet to handle, check if we need to arm tx timer */
5778 if (tx_pending_packets)
5779 stmmac_tx_timer_arm(priv, chan);
5780
5781 return min(rxtx_done, budget - 1);
5782}
5783
5784/**
5785 * stmmac_tx_timeout
5786 * @dev : Pointer to net device structure
5787 * @txqueue: the index of the hanging transmit queue
5788 * Description: this function is called when a packet transmission fails to
5789 * complete within a reasonable time. The driver will mark the error in the
5790 * netdev structure and arrange for the device to be reset to a sane state
5791 * in order to transmit a new packet.
5792 */
5793static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5794{
5795 struct stmmac_priv *priv = netdev_priv(dev);
5796
5797 stmmac_global_err(priv);
5798}
5799
5800/**
5801 * stmmac_set_rx_mode - entry point for multicast addressing
5802 * @dev : pointer to the device structure
5803 * Description:
5804 * This function is a driver entry point which gets called by the kernel
5805 * whenever multicast addresses must be enabled/disabled.
5806 * Return value:
5807 * void.
5808 */
5809static void stmmac_set_rx_mode(struct net_device *dev)
5810{
5811 struct stmmac_priv *priv = netdev_priv(dev);
5812
5813 stmmac_set_filter(priv, priv->hw, dev);
5814}
5815
5816/**
5817 * stmmac_change_mtu - entry point to change MTU size for the device.
5818 * @dev : device pointer.
5819 * @new_mtu : the new MTU size for the device.
5820 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5821 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5822 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5823 * Return value:
5824 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5825 * file on failure.
5826 */
5827static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5828{
5829 struct stmmac_priv *priv = netdev_priv(dev);
5830 int txfifosz = priv->plat->tx_fifo_size;
5831 struct stmmac_dma_conf *dma_conf;
5832 const int mtu = new_mtu;
5833 int ret;
5834
5835 if (txfifosz == 0)
5836 txfifosz = priv->dma_cap.tx_fifo_size;
5837
5838 txfifosz /= priv->plat->tx_queues_to_use;
5839
5840 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5841 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5842 return -EINVAL;
5843 }
5844
5845 new_mtu = STMMAC_ALIGN(new_mtu);
5846
5847 /* If condition true, FIFO is too small or MTU too large */
5848 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5849 return -EINVAL;
5850
5851 if (netif_running(dev)) {
5852 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5853 /* Try to allocate the new DMA conf with the new mtu */
5854 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5855 if (IS_ERR(dma_conf)) {
5856 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5857 mtu);
5858 return PTR_ERR(dma_conf);
5859 }
5860
5861 stmmac_release(dev);
5862
5863 ret = __stmmac_open(dev, dma_conf);
5864 if (ret) {
5865 free_dma_desc_resources(priv, dma_conf);
5866 kfree(dma_conf);
5867 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5868 return ret;
5869 }
5870
5871 kfree(dma_conf);
5872
5873 stmmac_set_rx_mode(dev);
5874 }
5875
5876 WRITE_ONCE(dev->mtu, mtu);
5877 netdev_update_features(dev);
5878
5879 return 0;
5880}
5881
5882static netdev_features_t stmmac_fix_features(struct net_device *dev,
5883 netdev_features_t features)
5884{
5885 struct stmmac_priv *priv = netdev_priv(dev);
5886
5887 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5888 features &= ~NETIF_F_RXCSUM;
5889
5890 if (!priv->plat->tx_coe)
5891 features &= ~NETIF_F_CSUM_MASK;
5892
5893 /* Some GMAC devices have a bugged Jumbo frame support that
5894 * needs to have the Tx COE disabled for oversized frames
5895 * (due to limited buffer sizes). In this case we disable
5896 * the TX csum insertion in the TDES and not use SF.
5897 */
5898 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5899 features &= ~NETIF_F_CSUM_MASK;
5900
5901 /* Disable tso if asked by ethtool */
5902 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5903 if (features & NETIF_F_TSO)
5904 priv->tso = true;
5905 else
5906 priv->tso = false;
5907 }
5908
5909 return features;
5910}
5911
5912static int stmmac_set_features(struct net_device *netdev,
5913 netdev_features_t features)
5914{
5915 struct stmmac_priv *priv = netdev_priv(netdev);
5916
5917 /* Keep the COE Type in case of csum is supporting */
5918 if (features & NETIF_F_RXCSUM)
5919 priv->hw->rx_csum = priv->plat->rx_coe;
5920 else
5921 priv->hw->rx_csum = 0;
5922 /* No check needed because rx_coe has been set before and it will be
5923 * fixed in case of issue.
5924 */
5925 stmmac_rx_ipc(priv, priv->hw);
5926
5927 if (priv->sph_cap) {
5928 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5929 u32 chan;
5930
5931 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5932 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5933 }
5934
5935 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5936 priv->hw->hw_vlan_en = true;
5937 else
5938 priv->hw->hw_vlan_en = false;
5939
5940 stmmac_set_hw_vlan_mode(priv, priv->hw);
5941
5942 return 0;
5943}
5944
5945static void stmmac_common_interrupt(struct stmmac_priv *priv)
5946{
5947 u32 rx_cnt = priv->plat->rx_queues_to_use;
5948 u32 tx_cnt = priv->plat->tx_queues_to_use;
5949 u32 queues_count;
5950 u32 queue;
5951 bool xmac;
5952
5953 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5954 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5955
5956 if (priv->irq_wake)
5957 pm_wakeup_event(priv->device, 0);
5958
5959 if (priv->dma_cap.estsel)
5960 stmmac_est_irq_status(priv, priv, priv->dev,
5961 &priv->xstats, tx_cnt);
5962
5963 if (stmmac_fpe_supported(priv))
5964 stmmac_fpe_irq_status(priv);
5965
5966 /* To handle GMAC own interrupts */
5967 if ((priv->plat->has_gmac) || xmac) {
5968 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5969
5970 if (unlikely(status)) {
5971 /* For LPI we need to save the tx status */
5972 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5973 priv->tx_path_in_lpi_mode = true;
5974 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5975 priv->tx_path_in_lpi_mode = false;
5976 }
5977
5978 for (queue = 0; queue < queues_count; queue++)
5979 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5980
5981 /* PCS link status */
5982 if (priv->hw->pcs &&
5983 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5984 if (priv->xstats.pcs_link)
5985 netif_carrier_on(priv->dev);
5986 else
5987 netif_carrier_off(priv->dev);
5988 }
5989
5990 stmmac_timestamp_interrupt(priv, priv);
5991 }
5992}
5993
5994/**
5995 * stmmac_interrupt - main ISR
5996 * @irq: interrupt number.
5997 * @dev_id: to pass the net device pointer.
5998 * Description: this is the main driver interrupt service routine.
5999 * It can call:
6000 * o DMA service routine (to manage incoming frame reception and transmission
6001 * status)
6002 * o Core interrupts to manage: remote wake-up, management counter, LPI
6003 * interrupts.
6004 */
6005static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6006{
6007 struct net_device *dev = (struct net_device *)dev_id;
6008 struct stmmac_priv *priv = netdev_priv(dev);
6009
6010 /* Check if adapter is up */
6011 if (test_bit(STMMAC_DOWN, &priv->state))
6012 return IRQ_HANDLED;
6013
6014 /* Check ASP error if it isn't delivered via an individual IRQ */
6015 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6016 return IRQ_HANDLED;
6017
6018 /* To handle Common interrupts */
6019 stmmac_common_interrupt(priv);
6020
6021 /* To handle DMA interrupts */
6022 stmmac_dma_interrupt(priv);
6023
6024 return IRQ_HANDLED;
6025}
6026
6027static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6028{
6029 struct net_device *dev = (struct net_device *)dev_id;
6030 struct stmmac_priv *priv = netdev_priv(dev);
6031
6032 /* Check if adapter is up */
6033 if (test_bit(STMMAC_DOWN, &priv->state))
6034 return IRQ_HANDLED;
6035
6036 /* To handle Common interrupts */
6037 stmmac_common_interrupt(priv);
6038
6039 return IRQ_HANDLED;
6040}
6041
6042static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6043{
6044 struct net_device *dev = (struct net_device *)dev_id;
6045 struct stmmac_priv *priv = netdev_priv(dev);
6046
6047 /* Check if adapter is up */
6048 if (test_bit(STMMAC_DOWN, &priv->state))
6049 return IRQ_HANDLED;
6050
6051 /* Check if a fatal error happened */
6052 stmmac_safety_feat_interrupt(priv);
6053
6054 return IRQ_HANDLED;
6055}
6056
6057static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6058{
6059 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6060 struct stmmac_dma_conf *dma_conf;
6061 int chan = tx_q->queue_index;
6062 struct stmmac_priv *priv;
6063 int status;
6064
6065 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6066 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6067
6068 /* Check if adapter is up */
6069 if (test_bit(STMMAC_DOWN, &priv->state))
6070 return IRQ_HANDLED;
6071
6072 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6073
6074 if (unlikely(status & tx_hard_error_bump_tc)) {
6075 /* Try to bump up the dma threshold on this failure */
6076 stmmac_bump_dma_threshold(priv, chan);
6077 } else if (unlikely(status == tx_hard_error)) {
6078 stmmac_tx_err(priv, chan);
6079 }
6080
6081 return IRQ_HANDLED;
6082}
6083
6084static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6085{
6086 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6087 struct stmmac_dma_conf *dma_conf;
6088 int chan = rx_q->queue_index;
6089 struct stmmac_priv *priv;
6090
6091 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6092 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6093
6094 /* Check if adapter is up */
6095 if (test_bit(STMMAC_DOWN, &priv->state))
6096 return IRQ_HANDLED;
6097
6098 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6099
6100 return IRQ_HANDLED;
6101}
6102
6103/**
6104 * stmmac_ioctl - Entry point for the Ioctl
6105 * @dev: Device pointer.
6106 * @rq: An IOCTL specefic structure, that can contain a pointer to
6107 * a proprietary structure used to pass information to the driver.
6108 * @cmd: IOCTL command
6109 * Description:
6110 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6111 */
6112static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6113{
6114 struct stmmac_priv *priv = netdev_priv (dev);
6115 int ret = -EOPNOTSUPP;
6116
6117 if (!netif_running(dev))
6118 return -EINVAL;
6119
6120 switch (cmd) {
6121 case SIOCGMIIPHY:
6122 case SIOCGMIIREG:
6123 case SIOCSMIIREG:
6124 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6125 break;
6126 case SIOCSHWTSTAMP:
6127 ret = stmmac_hwtstamp_set(dev, rq);
6128 break;
6129 case SIOCGHWTSTAMP:
6130 ret = stmmac_hwtstamp_get(dev, rq);
6131 break;
6132 default:
6133 break;
6134 }
6135
6136 return ret;
6137}
6138
6139static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6140 void *cb_priv)
6141{
6142 struct stmmac_priv *priv = cb_priv;
6143 int ret = -EOPNOTSUPP;
6144
6145 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6146 return ret;
6147
6148 __stmmac_disable_all_queues(priv);
6149
6150 switch (type) {
6151 case TC_SETUP_CLSU32:
6152 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6153 break;
6154 case TC_SETUP_CLSFLOWER:
6155 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6156 break;
6157 default:
6158 break;
6159 }
6160
6161 stmmac_enable_all_queues(priv);
6162 return ret;
6163}
6164
6165static LIST_HEAD(stmmac_block_cb_list);
6166
6167static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6168 void *type_data)
6169{
6170 struct stmmac_priv *priv = netdev_priv(ndev);
6171
6172 switch (type) {
6173 case TC_QUERY_CAPS:
6174 return stmmac_tc_query_caps(priv, priv, type_data);
6175 case TC_SETUP_QDISC_MQPRIO:
6176 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6177 case TC_SETUP_BLOCK:
6178 return flow_block_cb_setup_simple(type_data,
6179 &stmmac_block_cb_list,
6180 stmmac_setup_tc_block_cb,
6181 priv, priv, true);
6182 case TC_SETUP_QDISC_CBS:
6183 return stmmac_tc_setup_cbs(priv, priv, type_data);
6184 case TC_SETUP_QDISC_TAPRIO:
6185 return stmmac_tc_setup_taprio(priv, priv, type_data);
6186 case TC_SETUP_QDISC_ETF:
6187 return stmmac_tc_setup_etf(priv, priv, type_data);
6188 default:
6189 return -EOPNOTSUPP;
6190 }
6191}
6192
6193static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6194 struct net_device *sb_dev)
6195{
6196 int gso = skb_shinfo(skb)->gso_type;
6197
6198 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6199 /*
6200 * There is no way to determine the number of TSO/USO
6201 * capable Queues. Let's use always the Queue 0
6202 * because if TSO/USO is supported then at least this
6203 * one will be capable.
6204 */
6205 return 0;
6206 }
6207
6208 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6209}
6210
6211static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6212{
6213 struct stmmac_priv *priv = netdev_priv(ndev);
6214 int ret = 0;
6215
6216 ret = pm_runtime_resume_and_get(priv->device);
6217 if (ret < 0)
6218 return ret;
6219
6220 ret = eth_mac_addr(ndev, addr);
6221 if (ret)
6222 goto set_mac_error;
6223
6224 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6225
6226set_mac_error:
6227 pm_runtime_put(priv->device);
6228
6229 return ret;
6230}
6231
6232#ifdef CONFIG_DEBUG_FS
6233static struct dentry *stmmac_fs_dir;
6234
6235static void sysfs_display_ring(void *head, int size, int extend_desc,
6236 struct seq_file *seq, dma_addr_t dma_phy_addr)
6237{
6238 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6239 struct dma_desc *p = (struct dma_desc *)head;
6240 unsigned int desc_size;
6241 dma_addr_t dma_addr;
6242 int i;
6243
6244 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6245 for (i = 0; i < size; i++) {
6246 dma_addr = dma_phy_addr + i * desc_size;
6247 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6248 i, &dma_addr,
6249 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6250 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6251 if (extend_desc)
6252 p = &(++ep)->basic;
6253 else
6254 p++;
6255 }
6256}
6257
6258static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6259{
6260 struct net_device *dev = seq->private;
6261 struct stmmac_priv *priv = netdev_priv(dev);
6262 u32 rx_count = priv->plat->rx_queues_to_use;
6263 u32 tx_count = priv->plat->tx_queues_to_use;
6264 u32 queue;
6265
6266 if ((dev->flags & IFF_UP) == 0)
6267 return 0;
6268
6269 for (queue = 0; queue < rx_count; queue++) {
6270 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6271
6272 seq_printf(seq, "RX Queue %d:\n", queue);
6273
6274 if (priv->extend_desc) {
6275 seq_printf(seq, "Extended descriptor ring:\n");
6276 sysfs_display_ring((void *)rx_q->dma_erx,
6277 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6278 } else {
6279 seq_printf(seq, "Descriptor ring:\n");
6280 sysfs_display_ring((void *)rx_q->dma_rx,
6281 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6282 }
6283 }
6284
6285 for (queue = 0; queue < tx_count; queue++) {
6286 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6287
6288 seq_printf(seq, "TX Queue %d:\n", queue);
6289
6290 if (priv->extend_desc) {
6291 seq_printf(seq, "Extended descriptor ring:\n");
6292 sysfs_display_ring((void *)tx_q->dma_etx,
6293 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6294 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6295 seq_printf(seq, "Descriptor ring:\n");
6296 sysfs_display_ring((void *)tx_q->dma_tx,
6297 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6298 }
6299 }
6300
6301 return 0;
6302}
6303DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6304
6305static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6306{
6307 static const char * const dwxgmac_timestamp_source[] = {
6308 "None",
6309 "Internal",
6310 "External",
6311 "Both",
6312 };
6313 static const char * const dwxgmac_safety_feature_desc[] = {
6314 "No",
6315 "All Safety Features with ECC and Parity",
6316 "All Safety Features without ECC or Parity",
6317 "All Safety Features with Parity Only",
6318 "ECC Only",
6319 "UNDEFINED",
6320 "UNDEFINED",
6321 "UNDEFINED",
6322 };
6323 struct net_device *dev = seq->private;
6324 struct stmmac_priv *priv = netdev_priv(dev);
6325
6326 if (!priv->hw_cap_support) {
6327 seq_printf(seq, "DMA HW features not supported\n");
6328 return 0;
6329 }
6330
6331 seq_printf(seq, "==============================\n");
6332 seq_printf(seq, "\tDMA HW features\n");
6333 seq_printf(seq, "==============================\n");
6334
6335 seq_printf(seq, "\t10/100 Mbps: %s\n",
6336 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6337 seq_printf(seq, "\t1000 Mbps: %s\n",
6338 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6339 seq_printf(seq, "\tHalf duplex: %s\n",
6340 (priv->dma_cap.half_duplex) ? "Y" : "N");
6341 if (priv->plat->has_xgmac) {
6342 seq_printf(seq,
6343 "\tNumber of Additional MAC address registers: %d\n",
6344 priv->dma_cap.multi_addr);
6345 } else {
6346 seq_printf(seq, "\tHash Filter: %s\n",
6347 (priv->dma_cap.hash_filter) ? "Y" : "N");
6348 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6349 (priv->dma_cap.multi_addr) ? "Y" : "N");
6350 }
6351 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6352 (priv->dma_cap.pcs) ? "Y" : "N");
6353 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6354 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6355 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6356 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6357 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6358 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6359 seq_printf(seq, "\tRMON module: %s\n",
6360 (priv->dma_cap.rmon) ? "Y" : "N");
6361 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6362 (priv->dma_cap.time_stamp) ? "Y" : "N");
6363 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6364 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6365 if (priv->plat->has_xgmac)
6366 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6367 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6368 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6369 (priv->dma_cap.eee) ? "Y" : "N");
6370 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6371 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6372 (priv->dma_cap.tx_coe) ? "Y" : "N");
6373 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6374 priv->plat->has_xgmac) {
6375 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6376 (priv->dma_cap.rx_coe) ? "Y" : "N");
6377 } else {
6378 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6379 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6380 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6381 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6382 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6383 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6384 }
6385 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6386 priv->dma_cap.number_rx_channel);
6387 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6388 priv->dma_cap.number_tx_channel);
6389 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6390 priv->dma_cap.number_rx_queues);
6391 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6392 priv->dma_cap.number_tx_queues);
6393 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6394 (priv->dma_cap.enh_desc) ? "Y" : "N");
6395 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6396 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6397 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6398 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6399 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6400 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6401 priv->dma_cap.pps_out_num);
6402 seq_printf(seq, "\tSafety Features: %s\n",
6403 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6404 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6405 priv->dma_cap.frpsel ? "Y" : "N");
6406 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6407 priv->dma_cap.host_dma_width);
6408 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6409 priv->dma_cap.rssen ? "Y" : "N");
6410 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6411 priv->dma_cap.vlhash ? "Y" : "N");
6412 seq_printf(seq, "\tSplit Header: %s\n",
6413 priv->dma_cap.sphen ? "Y" : "N");
6414 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6415 priv->dma_cap.vlins ? "Y" : "N");
6416 seq_printf(seq, "\tDouble VLAN: %s\n",
6417 priv->dma_cap.dvlan ? "Y" : "N");
6418 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6419 priv->dma_cap.l3l4fnum);
6420 seq_printf(seq, "\tARP Offloading: %s\n",
6421 priv->dma_cap.arpoffsel ? "Y" : "N");
6422 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6423 priv->dma_cap.estsel ? "Y" : "N");
6424 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6425 priv->dma_cap.fpesel ? "Y" : "N");
6426 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6427 priv->dma_cap.tbssel ? "Y" : "N");
6428 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6429 priv->dma_cap.tbs_ch_num);
6430 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6431 priv->dma_cap.sgfsel ? "Y" : "N");
6432 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6433 BIT(priv->dma_cap.ttsfd) >> 1);
6434 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6435 priv->dma_cap.numtc);
6436 seq_printf(seq, "\tDCB Feature: %s\n",
6437 priv->dma_cap.dcben ? "Y" : "N");
6438 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6439 priv->dma_cap.advthword ? "Y" : "N");
6440 seq_printf(seq, "\tPTP Offload: %s\n",
6441 priv->dma_cap.ptoen ? "Y" : "N");
6442 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6443 priv->dma_cap.osten ? "Y" : "N");
6444 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6445 priv->dma_cap.pfcen ? "Y" : "N");
6446 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6447 BIT(priv->dma_cap.frpes) << 6);
6448 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6449 BIT(priv->dma_cap.frpbs) << 6);
6450 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6451 priv->dma_cap.frppipe_num);
6452 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6453 priv->dma_cap.nrvf_num ?
6454 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6455 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6456 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6457 seq_printf(seq, "\tDepth of GCL: %lu\n",
6458 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6459 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6460 priv->dma_cap.cbtisel ? "Y" : "N");
6461 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6462 priv->dma_cap.aux_snapshot_n);
6463 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6464 priv->dma_cap.pou_ost_en ? "Y" : "N");
6465 seq_printf(seq, "\tEnhanced DMA: %s\n",
6466 priv->dma_cap.edma ? "Y" : "N");
6467 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6468 priv->dma_cap.ediffc ? "Y" : "N");
6469 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6470 priv->dma_cap.vxn ? "Y" : "N");
6471 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6472 priv->dma_cap.dbgmem ? "Y" : "N");
6473 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6474 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6475 return 0;
6476}
6477DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6478
6479/* Use network device events to rename debugfs file entries.
6480 */
6481static int stmmac_device_event(struct notifier_block *unused,
6482 unsigned long event, void *ptr)
6483{
6484 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6485 struct stmmac_priv *priv = netdev_priv(dev);
6486
6487 if (dev->netdev_ops != &stmmac_netdev_ops)
6488 goto done;
6489
6490 switch (event) {
6491 case NETDEV_CHANGENAME:
6492 if (priv->dbgfs_dir)
6493 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6494 priv->dbgfs_dir,
6495 stmmac_fs_dir,
6496 dev->name);
6497 break;
6498 }
6499done:
6500 return NOTIFY_DONE;
6501}
6502
6503static struct notifier_block stmmac_notifier = {
6504 .notifier_call = stmmac_device_event,
6505};
6506
6507static void stmmac_init_fs(struct net_device *dev)
6508{
6509 struct stmmac_priv *priv = netdev_priv(dev);
6510
6511 rtnl_lock();
6512
6513 /* Create per netdev entries */
6514 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6515
6516 /* Entry to report DMA RX/TX rings */
6517 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6518 &stmmac_rings_status_fops);
6519
6520 /* Entry to report the DMA HW features */
6521 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6522 &stmmac_dma_cap_fops);
6523
6524 rtnl_unlock();
6525}
6526
6527static void stmmac_exit_fs(struct net_device *dev)
6528{
6529 struct stmmac_priv *priv = netdev_priv(dev);
6530
6531 debugfs_remove_recursive(priv->dbgfs_dir);
6532}
6533#endif /* CONFIG_DEBUG_FS */
6534
6535static u32 stmmac_vid_crc32_le(__le16 vid_le)
6536{
6537 unsigned char *data = (unsigned char *)&vid_le;
6538 unsigned char data_byte = 0;
6539 u32 crc = ~0x0;
6540 u32 temp = 0;
6541 int i, bits;
6542
6543 bits = get_bitmask_order(VLAN_VID_MASK);
6544 for (i = 0; i < bits; i++) {
6545 if ((i % 8) == 0)
6546 data_byte = data[i / 8];
6547
6548 temp = ((crc & 1) ^ data_byte) & 1;
6549 crc >>= 1;
6550 data_byte >>= 1;
6551
6552 if (temp)
6553 crc ^= 0xedb88320;
6554 }
6555
6556 return crc;
6557}
6558
6559static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6560{
6561 u32 crc, hash = 0;
6562 u16 pmatch = 0;
6563 int count = 0;
6564 u16 vid = 0;
6565
6566 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6567 __le16 vid_le = cpu_to_le16(vid);
6568 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6569 hash |= (1 << crc);
6570 count++;
6571 }
6572
6573 if (!priv->dma_cap.vlhash) {
6574 if (count > 2) /* VID = 0 always passes filter */
6575 return -EOPNOTSUPP;
6576
6577 pmatch = vid;
6578 hash = 0;
6579 }
6580
6581 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6582}
6583
6584static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6585{
6586 struct stmmac_priv *priv = netdev_priv(ndev);
6587 bool is_double = false;
6588 int ret;
6589
6590 ret = pm_runtime_resume_and_get(priv->device);
6591 if (ret < 0)
6592 return ret;
6593
6594 if (be16_to_cpu(proto) == ETH_P_8021AD)
6595 is_double = true;
6596
6597 set_bit(vid, priv->active_vlans);
6598 ret = stmmac_vlan_update(priv, is_double);
6599 if (ret) {
6600 clear_bit(vid, priv->active_vlans);
6601 goto err_pm_put;
6602 }
6603
6604 if (priv->hw->num_vlan) {
6605 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6606 if (ret)
6607 goto err_pm_put;
6608 }
6609err_pm_put:
6610 pm_runtime_put(priv->device);
6611
6612 return ret;
6613}
6614
6615static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6616{
6617 struct stmmac_priv *priv = netdev_priv(ndev);
6618 bool is_double = false;
6619 int ret;
6620
6621 ret = pm_runtime_resume_and_get(priv->device);
6622 if (ret < 0)
6623 return ret;
6624
6625 if (be16_to_cpu(proto) == ETH_P_8021AD)
6626 is_double = true;
6627
6628 clear_bit(vid, priv->active_vlans);
6629
6630 if (priv->hw->num_vlan) {
6631 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6632 if (ret)
6633 goto del_vlan_error;
6634 }
6635
6636 ret = stmmac_vlan_update(priv, is_double);
6637
6638del_vlan_error:
6639 pm_runtime_put(priv->device);
6640
6641 return ret;
6642}
6643
6644static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6645{
6646 struct stmmac_priv *priv = netdev_priv(dev);
6647
6648 switch (bpf->command) {
6649 case XDP_SETUP_PROG:
6650 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6651 case XDP_SETUP_XSK_POOL:
6652 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6653 bpf->xsk.queue_id);
6654 default:
6655 return -EOPNOTSUPP;
6656 }
6657}
6658
6659static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6660 struct xdp_frame **frames, u32 flags)
6661{
6662 struct stmmac_priv *priv = netdev_priv(dev);
6663 int cpu = smp_processor_id();
6664 struct netdev_queue *nq;
6665 int i, nxmit = 0;
6666 int queue;
6667
6668 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6669 return -ENETDOWN;
6670
6671 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6672 return -EINVAL;
6673
6674 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6675 nq = netdev_get_tx_queue(priv->dev, queue);
6676
6677 __netif_tx_lock(nq, cpu);
6678 /* Avoids TX time-out as we are sharing with slow path */
6679 txq_trans_cond_update(nq);
6680
6681 for (i = 0; i < num_frames; i++) {
6682 int res;
6683
6684 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6685 if (res == STMMAC_XDP_CONSUMED)
6686 break;
6687
6688 nxmit++;
6689 }
6690
6691 if (flags & XDP_XMIT_FLUSH) {
6692 stmmac_flush_tx_descriptors(priv, queue);
6693 stmmac_tx_timer_arm(priv, queue);
6694 }
6695
6696 __netif_tx_unlock(nq);
6697
6698 return nxmit;
6699}
6700
6701void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6702{
6703 struct stmmac_channel *ch = &priv->channel[queue];
6704 unsigned long flags;
6705
6706 spin_lock_irqsave(&ch->lock, flags);
6707 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6708 spin_unlock_irqrestore(&ch->lock, flags);
6709
6710 stmmac_stop_rx_dma(priv, queue);
6711 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6712}
6713
6714void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6715{
6716 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6717 struct stmmac_channel *ch = &priv->channel[queue];
6718 unsigned long flags;
6719 u32 buf_size;
6720 int ret;
6721
6722 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6723 if (ret) {
6724 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6725 return;
6726 }
6727
6728 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6729 if (ret) {
6730 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6731 netdev_err(priv->dev, "Failed to init RX desc.\n");
6732 return;
6733 }
6734
6735 stmmac_reset_rx_queue(priv, queue);
6736 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6737
6738 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6739 rx_q->dma_rx_phy, rx_q->queue_index);
6740
6741 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6742 sizeof(struct dma_desc));
6743 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6744 rx_q->rx_tail_addr, rx_q->queue_index);
6745
6746 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6747 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6748 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6749 buf_size,
6750 rx_q->queue_index);
6751 } else {
6752 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6753 priv->dma_conf.dma_buf_sz,
6754 rx_q->queue_index);
6755 }
6756
6757 stmmac_start_rx_dma(priv, queue);
6758
6759 spin_lock_irqsave(&ch->lock, flags);
6760 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6761 spin_unlock_irqrestore(&ch->lock, flags);
6762}
6763
6764void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6765{
6766 struct stmmac_channel *ch = &priv->channel[queue];
6767 unsigned long flags;
6768
6769 spin_lock_irqsave(&ch->lock, flags);
6770 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6771 spin_unlock_irqrestore(&ch->lock, flags);
6772
6773 stmmac_stop_tx_dma(priv, queue);
6774 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6775}
6776
6777void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6778{
6779 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6780 struct stmmac_channel *ch = &priv->channel[queue];
6781 unsigned long flags;
6782 int ret;
6783
6784 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6785 if (ret) {
6786 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6787 return;
6788 }
6789
6790 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6791 if (ret) {
6792 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6793 netdev_err(priv->dev, "Failed to init TX desc.\n");
6794 return;
6795 }
6796
6797 stmmac_reset_tx_queue(priv, queue);
6798 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6799
6800 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6801 tx_q->dma_tx_phy, tx_q->queue_index);
6802
6803 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6804 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6805
6806 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6807 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6808 tx_q->tx_tail_addr, tx_q->queue_index);
6809
6810 stmmac_start_tx_dma(priv, queue);
6811
6812 spin_lock_irqsave(&ch->lock, flags);
6813 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6814 spin_unlock_irqrestore(&ch->lock, flags);
6815}
6816
6817void stmmac_xdp_release(struct net_device *dev)
6818{
6819 struct stmmac_priv *priv = netdev_priv(dev);
6820 u32 chan;
6821
6822 /* Ensure tx function is not running */
6823 netif_tx_disable(dev);
6824
6825 /* Disable NAPI process */
6826 stmmac_disable_all_queues(priv);
6827
6828 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6829 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6830
6831 /* Free the IRQ lines */
6832 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6833
6834 /* Stop TX/RX DMA channels */
6835 stmmac_stop_all_dma(priv);
6836
6837 /* Release and free the Rx/Tx resources */
6838 free_dma_desc_resources(priv, &priv->dma_conf);
6839
6840 /* Disable the MAC Rx/Tx */
6841 stmmac_mac_set(priv, priv->ioaddr, false);
6842
6843 /* set trans_start so we don't get spurious
6844 * watchdogs during reset
6845 */
6846 netif_trans_update(dev);
6847 netif_carrier_off(dev);
6848}
6849
6850int stmmac_xdp_open(struct net_device *dev)
6851{
6852 struct stmmac_priv *priv = netdev_priv(dev);
6853 u32 rx_cnt = priv->plat->rx_queues_to_use;
6854 u32 tx_cnt = priv->plat->tx_queues_to_use;
6855 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6856 struct stmmac_rx_queue *rx_q;
6857 struct stmmac_tx_queue *tx_q;
6858 u32 buf_size;
6859 bool sph_en;
6860 u32 chan;
6861 int ret;
6862
6863 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6864 if (ret < 0) {
6865 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6866 __func__);
6867 goto dma_desc_error;
6868 }
6869
6870 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6871 if (ret < 0) {
6872 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6873 __func__);
6874 goto init_error;
6875 }
6876
6877 stmmac_reset_queues_param(priv);
6878
6879 /* DMA CSR Channel configuration */
6880 for (chan = 0; chan < dma_csr_ch; chan++) {
6881 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6882 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6883 }
6884
6885 /* Adjust Split header */
6886 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6887
6888 /* DMA RX Channel Configuration */
6889 for (chan = 0; chan < rx_cnt; chan++) {
6890 rx_q = &priv->dma_conf.rx_queue[chan];
6891
6892 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6893 rx_q->dma_rx_phy, chan);
6894
6895 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6896 (rx_q->buf_alloc_num *
6897 sizeof(struct dma_desc));
6898 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6899 rx_q->rx_tail_addr, chan);
6900
6901 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6902 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6903 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6904 buf_size,
6905 rx_q->queue_index);
6906 } else {
6907 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6908 priv->dma_conf.dma_buf_sz,
6909 rx_q->queue_index);
6910 }
6911
6912 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6913 }
6914
6915 /* DMA TX Channel Configuration */
6916 for (chan = 0; chan < tx_cnt; chan++) {
6917 tx_q = &priv->dma_conf.tx_queue[chan];
6918
6919 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6920 tx_q->dma_tx_phy, chan);
6921
6922 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6923 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6924 tx_q->tx_tail_addr, chan);
6925
6926 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6927 tx_q->txtimer.function = stmmac_tx_timer;
6928 }
6929
6930 /* Enable the MAC Rx/Tx */
6931 stmmac_mac_set(priv, priv->ioaddr, true);
6932
6933 /* Start Rx & Tx DMA Channels */
6934 stmmac_start_all_dma(priv);
6935
6936 ret = stmmac_request_irq(dev);
6937 if (ret)
6938 goto irq_error;
6939
6940 /* Enable NAPI process*/
6941 stmmac_enable_all_queues(priv);
6942 netif_carrier_on(dev);
6943 netif_tx_start_all_queues(dev);
6944 stmmac_enable_all_dma_irq(priv);
6945
6946 return 0;
6947
6948irq_error:
6949 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6950 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6951
6952 stmmac_hw_teardown(dev);
6953init_error:
6954 free_dma_desc_resources(priv, &priv->dma_conf);
6955dma_desc_error:
6956 return ret;
6957}
6958
6959int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6960{
6961 struct stmmac_priv *priv = netdev_priv(dev);
6962 struct stmmac_rx_queue *rx_q;
6963 struct stmmac_tx_queue *tx_q;
6964 struct stmmac_channel *ch;
6965
6966 if (test_bit(STMMAC_DOWN, &priv->state) ||
6967 !netif_carrier_ok(priv->dev))
6968 return -ENETDOWN;
6969
6970 if (!stmmac_xdp_is_enabled(priv))
6971 return -EINVAL;
6972
6973 if (queue >= priv->plat->rx_queues_to_use ||
6974 queue >= priv->plat->tx_queues_to_use)
6975 return -EINVAL;
6976
6977 rx_q = &priv->dma_conf.rx_queue[queue];
6978 tx_q = &priv->dma_conf.tx_queue[queue];
6979 ch = &priv->channel[queue];
6980
6981 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6982 return -EINVAL;
6983
6984 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6985 /* EQoS does not have per-DMA channel SW interrupt,
6986 * so we schedule RX Napi straight-away.
6987 */
6988 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6989 __napi_schedule(&ch->rxtx_napi);
6990 }
6991
6992 return 0;
6993}
6994
6995static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6996{
6997 struct stmmac_priv *priv = netdev_priv(dev);
6998 u32 tx_cnt = priv->plat->tx_queues_to_use;
6999 u32 rx_cnt = priv->plat->rx_queues_to_use;
7000 unsigned int start;
7001 int q;
7002
7003 for (q = 0; q < tx_cnt; q++) {
7004 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7005 u64 tx_packets;
7006 u64 tx_bytes;
7007
7008 do {
7009 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7010 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7011 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7012 do {
7013 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7014 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7015 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7016
7017 stats->tx_packets += tx_packets;
7018 stats->tx_bytes += tx_bytes;
7019 }
7020
7021 for (q = 0; q < rx_cnt; q++) {
7022 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7023 u64 rx_packets;
7024 u64 rx_bytes;
7025
7026 do {
7027 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7028 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7029 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7030 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7031
7032 stats->rx_packets += rx_packets;
7033 stats->rx_bytes += rx_bytes;
7034 }
7035
7036 stats->rx_dropped = priv->xstats.rx_dropped;
7037 stats->rx_errors = priv->xstats.rx_errors;
7038 stats->tx_dropped = priv->xstats.tx_dropped;
7039 stats->tx_errors = priv->xstats.tx_errors;
7040 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7041 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7042 stats->rx_length_errors = priv->xstats.rx_length;
7043 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7044 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7045 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7046}
7047
7048static const struct net_device_ops stmmac_netdev_ops = {
7049 .ndo_open = stmmac_open,
7050 .ndo_start_xmit = stmmac_xmit,
7051 .ndo_stop = stmmac_release,
7052 .ndo_change_mtu = stmmac_change_mtu,
7053 .ndo_fix_features = stmmac_fix_features,
7054 .ndo_set_features = stmmac_set_features,
7055 .ndo_set_rx_mode = stmmac_set_rx_mode,
7056 .ndo_tx_timeout = stmmac_tx_timeout,
7057 .ndo_eth_ioctl = stmmac_ioctl,
7058 .ndo_get_stats64 = stmmac_get_stats64,
7059 .ndo_setup_tc = stmmac_setup_tc,
7060 .ndo_select_queue = stmmac_select_queue,
7061 .ndo_set_mac_address = stmmac_set_mac_address,
7062 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7063 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7064 .ndo_bpf = stmmac_bpf,
7065 .ndo_xdp_xmit = stmmac_xdp_xmit,
7066 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7067};
7068
7069static void stmmac_reset_subtask(struct stmmac_priv *priv)
7070{
7071 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7072 return;
7073 if (test_bit(STMMAC_DOWN, &priv->state))
7074 return;
7075
7076 netdev_err(priv->dev, "Reset adapter.\n");
7077
7078 rtnl_lock();
7079 netif_trans_update(priv->dev);
7080 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7081 usleep_range(1000, 2000);
7082
7083 set_bit(STMMAC_DOWN, &priv->state);
7084 dev_close(priv->dev);
7085 dev_open(priv->dev, NULL);
7086 clear_bit(STMMAC_DOWN, &priv->state);
7087 clear_bit(STMMAC_RESETING, &priv->state);
7088 rtnl_unlock();
7089}
7090
7091static void stmmac_service_task(struct work_struct *work)
7092{
7093 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7094 service_task);
7095
7096 stmmac_reset_subtask(priv);
7097 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7098}
7099
7100/**
7101 * stmmac_hw_init - Init the MAC device
7102 * @priv: driver private structure
7103 * Description: this function is to configure the MAC device according to
7104 * some platform parameters or the HW capability register. It prepares the
7105 * driver to use either ring or chain modes and to setup either enhanced or
7106 * normal descriptors.
7107 */
7108static int stmmac_hw_init(struct stmmac_priv *priv)
7109{
7110 int ret;
7111
7112 /* dwmac-sun8i only work in chain mode */
7113 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7114 chain_mode = 1;
7115 priv->chain_mode = chain_mode;
7116
7117 /* Initialize HW Interface */
7118 ret = stmmac_hwif_init(priv);
7119 if (ret)
7120 return ret;
7121
7122 /* Get the HW capability (new GMAC newer than 3.50a) */
7123 priv->hw_cap_support = stmmac_get_hw_features(priv);
7124 if (priv->hw_cap_support) {
7125 dev_info(priv->device, "DMA HW capability register supported\n");
7126
7127 /* We can override some gmac/dma configuration fields: e.g.
7128 * enh_desc, tx_coe (e.g. that are passed through the
7129 * platform) with the values from the HW capability
7130 * register (if supported).
7131 */
7132 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7133 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7134 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7135 priv->hw->pmt = priv->plat->pmt;
7136 if (priv->dma_cap.hash_tb_sz) {
7137 priv->hw->multicast_filter_bins =
7138 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7139 priv->hw->mcast_bits_log2 =
7140 ilog2(priv->hw->multicast_filter_bins);
7141 }
7142
7143 /* TXCOE doesn't work in thresh DMA mode */
7144 if (priv->plat->force_thresh_dma_mode)
7145 priv->plat->tx_coe = 0;
7146 else
7147 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7148
7149 /* In case of GMAC4 rx_coe is from HW cap register. */
7150 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7151
7152 if (priv->dma_cap.rx_coe_type2)
7153 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7154 else if (priv->dma_cap.rx_coe_type1)
7155 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7156
7157 } else {
7158 dev_info(priv->device, "No HW DMA feature register supported\n");
7159 }
7160
7161 if (priv->plat->rx_coe) {
7162 priv->hw->rx_csum = priv->plat->rx_coe;
7163 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7164 if (priv->synopsys_id < DWMAC_CORE_4_00)
7165 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7166 }
7167 if (priv->plat->tx_coe)
7168 dev_info(priv->device, "TX Checksum insertion supported\n");
7169
7170 if (priv->plat->pmt) {
7171 dev_info(priv->device, "Wake-Up On Lan supported\n");
7172 device_set_wakeup_capable(priv->device, 1);
7173 }
7174
7175 if (priv->dma_cap.tsoen)
7176 dev_info(priv->device, "TSO supported\n");
7177
7178 if (priv->dma_cap.number_rx_queues &&
7179 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7180 dev_warn(priv->device,
7181 "Number of Rx queues (%u) exceeds dma capability\n",
7182 priv->plat->rx_queues_to_use);
7183 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7184 }
7185 if (priv->dma_cap.number_tx_queues &&
7186 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7187 dev_warn(priv->device,
7188 "Number of Tx queues (%u) exceeds dma capability\n",
7189 priv->plat->tx_queues_to_use);
7190 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7191 }
7192
7193 if (priv->dma_cap.rx_fifo_size &&
7194 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7195 dev_warn(priv->device,
7196 "Rx FIFO size (%u) exceeds dma capability\n",
7197 priv->plat->rx_fifo_size);
7198 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7199 }
7200 if (priv->dma_cap.tx_fifo_size &&
7201 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7202 dev_warn(priv->device,
7203 "Tx FIFO size (%u) exceeds dma capability\n",
7204 priv->plat->tx_fifo_size);
7205 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7206 }
7207
7208 priv->hw->vlan_fail_q_en =
7209 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7210 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7211
7212 /* Run HW quirks, if any */
7213 if (priv->hwif_quirks) {
7214 ret = priv->hwif_quirks(priv);
7215 if (ret)
7216 return ret;
7217 }
7218
7219 /* Rx Watchdog is available in the COREs newer than the 3.40.
7220 * In some case, for example on bugged HW this feature
7221 * has to be disable and this can be done by passing the
7222 * riwt_off field from the platform.
7223 */
7224 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7225 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7226 priv->use_riwt = 1;
7227 dev_info(priv->device,
7228 "Enable RX Mitigation via HW Watchdog Timer\n");
7229 }
7230
7231 return 0;
7232}
7233
7234static void stmmac_napi_add(struct net_device *dev)
7235{
7236 struct stmmac_priv *priv = netdev_priv(dev);
7237 u32 queue, maxq;
7238
7239 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7240
7241 for (queue = 0; queue < maxq; queue++) {
7242 struct stmmac_channel *ch = &priv->channel[queue];
7243
7244 ch->priv_data = priv;
7245 ch->index = queue;
7246 spin_lock_init(&ch->lock);
7247
7248 if (queue < priv->plat->rx_queues_to_use) {
7249 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7250 }
7251 if (queue < priv->plat->tx_queues_to_use) {
7252 netif_napi_add_tx(dev, &ch->tx_napi,
7253 stmmac_napi_poll_tx);
7254 }
7255 if (queue < priv->plat->rx_queues_to_use &&
7256 queue < priv->plat->tx_queues_to_use) {
7257 netif_napi_add(dev, &ch->rxtx_napi,
7258 stmmac_napi_poll_rxtx);
7259 }
7260 }
7261}
7262
7263static void stmmac_napi_del(struct net_device *dev)
7264{
7265 struct stmmac_priv *priv = netdev_priv(dev);
7266 u32 queue, maxq;
7267
7268 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7269
7270 for (queue = 0; queue < maxq; queue++) {
7271 struct stmmac_channel *ch = &priv->channel[queue];
7272
7273 if (queue < priv->plat->rx_queues_to_use)
7274 netif_napi_del(&ch->rx_napi);
7275 if (queue < priv->plat->tx_queues_to_use)
7276 netif_napi_del(&ch->tx_napi);
7277 if (queue < priv->plat->rx_queues_to_use &&
7278 queue < priv->plat->tx_queues_to_use) {
7279 netif_napi_del(&ch->rxtx_napi);
7280 }
7281 }
7282}
7283
7284int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7285{
7286 struct stmmac_priv *priv = netdev_priv(dev);
7287 int ret = 0, i;
7288
7289 if (netif_running(dev))
7290 stmmac_release(dev);
7291
7292 stmmac_napi_del(dev);
7293
7294 priv->plat->rx_queues_to_use = rx_cnt;
7295 priv->plat->tx_queues_to_use = tx_cnt;
7296 if (!netif_is_rxfh_configured(dev))
7297 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7298 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7299 rx_cnt);
7300
7301 stmmac_napi_add(dev);
7302
7303 if (netif_running(dev))
7304 ret = stmmac_open(dev);
7305
7306 return ret;
7307}
7308
7309int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7310{
7311 struct stmmac_priv *priv = netdev_priv(dev);
7312 int ret = 0;
7313
7314 if (netif_running(dev))
7315 stmmac_release(dev);
7316
7317 priv->dma_conf.dma_rx_size = rx_size;
7318 priv->dma_conf.dma_tx_size = tx_size;
7319
7320 if (netif_running(dev))
7321 ret = stmmac_open(dev);
7322
7323 return ret;
7324}
7325
7326static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7327{
7328 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7329 struct dma_desc *desc_contains_ts = ctx->desc;
7330 struct stmmac_priv *priv = ctx->priv;
7331 struct dma_desc *ndesc = ctx->ndesc;
7332 struct dma_desc *desc = ctx->desc;
7333 u64 ns = 0;
7334
7335 if (!priv->hwts_rx_en)
7336 return -ENODATA;
7337
7338 /* For GMAC4, the valid timestamp is from CTX next desc. */
7339 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7340 desc_contains_ts = ndesc;
7341
7342 /* Check if timestamp is available */
7343 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7344 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7345 ns -= priv->plat->cdc_error_adj;
7346 *timestamp = ns_to_ktime(ns);
7347 return 0;
7348 }
7349
7350 return -ENODATA;
7351}
7352
7353static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7354 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7355};
7356
7357/**
7358 * stmmac_dvr_probe
7359 * @device: device pointer
7360 * @plat_dat: platform data pointer
7361 * @res: stmmac resource pointer
7362 * Description: this is the main probe function used to
7363 * call the alloc_etherdev, allocate the priv structure.
7364 * Return:
7365 * returns 0 on success, otherwise errno.
7366 */
7367int stmmac_dvr_probe(struct device *device,
7368 struct plat_stmmacenet_data *plat_dat,
7369 struct stmmac_resources *res)
7370{
7371 struct net_device *ndev = NULL;
7372 struct stmmac_priv *priv;
7373 u32 rxq;
7374 int i, ret = 0;
7375
7376 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7377 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7378 if (!ndev)
7379 return -ENOMEM;
7380
7381 SET_NETDEV_DEV(ndev, device);
7382
7383 priv = netdev_priv(ndev);
7384 priv->device = device;
7385 priv->dev = ndev;
7386
7387 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7388 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7389 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7390 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7391 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7392 }
7393
7394 priv->xstats.pcpu_stats =
7395 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7396 if (!priv->xstats.pcpu_stats)
7397 return -ENOMEM;
7398
7399 stmmac_set_ethtool_ops(ndev);
7400 priv->pause = pause;
7401 priv->plat = plat_dat;
7402 priv->ioaddr = res->addr;
7403 priv->dev->base_addr = (unsigned long)res->addr;
7404 priv->plat->dma_cfg->multi_msi_en =
7405 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7406
7407 priv->dev->irq = res->irq;
7408 priv->wol_irq = res->wol_irq;
7409 priv->lpi_irq = res->lpi_irq;
7410 priv->sfty_irq = res->sfty_irq;
7411 priv->sfty_ce_irq = res->sfty_ce_irq;
7412 priv->sfty_ue_irq = res->sfty_ue_irq;
7413 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7414 priv->rx_irq[i] = res->rx_irq[i];
7415 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7416 priv->tx_irq[i] = res->tx_irq[i];
7417
7418 if (!is_zero_ether_addr(res->mac))
7419 eth_hw_addr_set(priv->dev, res->mac);
7420
7421 dev_set_drvdata(device, priv->dev);
7422
7423 /* Verify driver arguments */
7424 stmmac_verify_args();
7425
7426 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7427 if (!priv->af_xdp_zc_qps)
7428 return -ENOMEM;
7429
7430 /* Allocate workqueue */
7431 priv->wq = create_singlethread_workqueue("stmmac_wq");
7432 if (!priv->wq) {
7433 dev_err(priv->device, "failed to create workqueue\n");
7434 ret = -ENOMEM;
7435 goto error_wq_init;
7436 }
7437
7438 INIT_WORK(&priv->service_task, stmmac_service_task);
7439
7440 /* Override with kernel parameters if supplied XXX CRS XXX
7441 * this needs to have multiple instances
7442 */
7443 if ((phyaddr >= 0) && (phyaddr <= 31))
7444 priv->plat->phy_addr = phyaddr;
7445
7446 if (priv->plat->stmmac_rst) {
7447 ret = reset_control_assert(priv->plat->stmmac_rst);
7448 reset_control_deassert(priv->plat->stmmac_rst);
7449 /* Some reset controllers have only reset callback instead of
7450 * assert + deassert callbacks pair.
7451 */
7452 if (ret == -ENOTSUPP)
7453 reset_control_reset(priv->plat->stmmac_rst);
7454 }
7455
7456 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7457 if (ret == -ENOTSUPP)
7458 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7459 ERR_PTR(ret));
7460
7461 /* Wait a bit for the reset to take effect */
7462 udelay(10);
7463
7464 /* Init MAC and get the capabilities */
7465 ret = stmmac_hw_init(priv);
7466 if (ret)
7467 goto error_hw_init;
7468
7469 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7470 */
7471 if (priv->synopsys_id < DWMAC_CORE_5_20)
7472 priv->plat->dma_cfg->dche = false;
7473
7474 stmmac_check_ether_addr(priv);
7475
7476 ndev->netdev_ops = &stmmac_netdev_ops;
7477
7478 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7479 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7480
7481 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7482 NETIF_F_RXCSUM;
7483 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7484 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7485
7486 ret = stmmac_tc_init(priv, priv);
7487 if (!ret) {
7488 ndev->hw_features |= NETIF_F_HW_TC;
7489 }
7490
7491 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7492 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7493 if (priv->plat->has_gmac4)
7494 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7495 priv->tso = true;
7496 dev_info(priv->device, "TSO feature enabled\n");
7497 }
7498
7499 if (priv->dma_cap.sphen &&
7500 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7501 ndev->hw_features |= NETIF_F_GRO;
7502 priv->sph_cap = true;
7503 priv->sph = priv->sph_cap;
7504 dev_info(priv->device, "SPH feature enabled\n");
7505 }
7506
7507 /* Ideally our host DMA address width is the same as for the
7508 * device. However, it may differ and then we have to use our
7509 * host DMA width for allocation and the device DMA width for
7510 * register handling.
7511 */
7512 if (priv->plat->host_dma_width)
7513 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7514 else
7515 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7516
7517 if (priv->dma_cap.host_dma_width) {
7518 ret = dma_set_mask_and_coherent(device,
7519 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7520 if (!ret) {
7521 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7522 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7523
7524 /*
7525 * If more than 32 bits can be addressed, make sure to
7526 * enable enhanced addressing mode.
7527 */
7528 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7529 priv->plat->dma_cfg->eame = true;
7530 } else {
7531 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7532 if (ret) {
7533 dev_err(priv->device, "Failed to set DMA Mask\n");
7534 goto error_hw_init;
7535 }
7536
7537 priv->dma_cap.host_dma_width = 32;
7538 }
7539 }
7540
7541 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7542 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7543#ifdef STMMAC_VLAN_TAG_USED
7544 /* Both mac100 and gmac support receive VLAN tag detection */
7545 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7546 if (priv->plat->has_gmac4) {
7547 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7548 priv->hw->hw_vlan_en = true;
7549 }
7550 if (priv->dma_cap.vlhash) {
7551 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7552 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7553 }
7554 if (priv->dma_cap.vlins) {
7555 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7556 if (priv->dma_cap.dvlan)
7557 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7558 }
7559#endif
7560 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7561
7562 priv->xstats.threshold = tc;
7563
7564 /* Initialize RSS */
7565 rxq = priv->plat->rx_queues_to_use;
7566 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7567 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7568 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7569
7570 if (priv->dma_cap.rssen && priv->plat->rss_en)
7571 ndev->features |= NETIF_F_RXHASH;
7572
7573 ndev->vlan_features |= ndev->features;
7574
7575 /* MTU range: 46 - hw-specific max */
7576 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7577 if (priv->plat->has_xgmac)
7578 ndev->max_mtu = XGMAC_JUMBO_LEN;
7579 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7580 ndev->max_mtu = JUMBO_LEN;
7581 else
7582 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7583 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7584 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7585 */
7586 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7587 (priv->plat->maxmtu >= ndev->min_mtu))
7588 ndev->max_mtu = priv->plat->maxmtu;
7589 else if (priv->plat->maxmtu < ndev->min_mtu)
7590 dev_warn(priv->device,
7591 "%s: warning: maxmtu having invalid value (%d)\n",
7592 __func__, priv->plat->maxmtu);
7593
7594 if (flow_ctrl)
7595 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7596
7597 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7598
7599 /* Setup channels NAPI */
7600 stmmac_napi_add(ndev);
7601
7602 mutex_init(&priv->lock);
7603
7604 stmmac_fpe_init(priv);
7605
7606 /* If a specific clk_csr value is passed from the platform
7607 * this means that the CSR Clock Range selection cannot be
7608 * changed at run-time and it is fixed. Viceversa the driver'll try to
7609 * set the MDC clock dynamically according to the csr actual
7610 * clock input.
7611 */
7612 if (priv->plat->clk_csr >= 0)
7613 priv->clk_csr = priv->plat->clk_csr;
7614 else
7615 stmmac_clk_csr_set(priv);
7616
7617 stmmac_check_pcs_mode(priv);
7618
7619 pm_runtime_get_noresume(device);
7620 pm_runtime_set_active(device);
7621 if (!pm_runtime_enabled(device))
7622 pm_runtime_enable(device);
7623
7624 ret = stmmac_mdio_register(ndev);
7625 if (ret < 0) {
7626 dev_err_probe(priv->device, ret,
7627 "MDIO bus (id: %d) registration failed\n",
7628 priv->plat->bus_id);
7629 goto error_mdio_register;
7630 }
7631
7632 if (priv->plat->speed_mode_2500)
7633 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7634
7635 ret = stmmac_pcs_setup(ndev);
7636 if (ret)
7637 goto error_pcs_setup;
7638
7639 ret = stmmac_phy_setup(priv);
7640 if (ret) {
7641 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7642 goto error_phy_setup;
7643 }
7644
7645 ret = register_netdev(ndev);
7646 if (ret) {
7647 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7648 __func__, ret);
7649 goto error_netdev_register;
7650 }
7651
7652#ifdef CONFIG_DEBUG_FS
7653 stmmac_init_fs(ndev);
7654#endif
7655
7656 if (priv->plat->dump_debug_regs)
7657 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7658
7659 /* Let pm_runtime_put() disable the clocks.
7660 * If CONFIG_PM is not enabled, the clocks will stay powered.
7661 */
7662 pm_runtime_put(device);
7663
7664 return ret;
7665
7666error_netdev_register:
7667 phylink_destroy(priv->phylink);
7668error_phy_setup:
7669 stmmac_pcs_clean(ndev);
7670error_pcs_setup:
7671 stmmac_mdio_unregister(ndev);
7672error_mdio_register:
7673 stmmac_napi_del(ndev);
7674error_hw_init:
7675 destroy_workqueue(priv->wq);
7676error_wq_init:
7677 bitmap_free(priv->af_xdp_zc_qps);
7678
7679 return ret;
7680}
7681EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7682
7683/**
7684 * stmmac_dvr_remove
7685 * @dev: device pointer
7686 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7687 * changes the link status, releases the DMA descriptor rings.
7688 */
7689void stmmac_dvr_remove(struct device *dev)
7690{
7691 struct net_device *ndev = dev_get_drvdata(dev);
7692 struct stmmac_priv *priv = netdev_priv(ndev);
7693
7694 netdev_info(priv->dev, "%s: removing driver", __func__);
7695
7696 pm_runtime_get_sync(dev);
7697
7698 stmmac_stop_all_dma(priv);
7699 stmmac_mac_set(priv, priv->ioaddr, false);
7700 unregister_netdev(ndev);
7701
7702#ifdef CONFIG_DEBUG_FS
7703 stmmac_exit_fs(ndev);
7704#endif
7705 phylink_destroy(priv->phylink);
7706 if (priv->plat->stmmac_rst)
7707 reset_control_assert(priv->plat->stmmac_rst);
7708 reset_control_assert(priv->plat->stmmac_ahb_rst);
7709
7710 stmmac_pcs_clean(ndev);
7711 stmmac_mdio_unregister(ndev);
7712
7713 destroy_workqueue(priv->wq);
7714 mutex_destroy(&priv->lock);
7715 bitmap_free(priv->af_xdp_zc_qps);
7716
7717 pm_runtime_disable(dev);
7718 pm_runtime_put_noidle(dev);
7719}
7720EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7721
7722/**
7723 * stmmac_suspend - suspend callback
7724 * @dev: device pointer
7725 * Description: this is the function to suspend the device and it is called
7726 * by the platform driver to stop the network queue, release the resources,
7727 * program the PMT register (for WoL), clean and release driver resources.
7728 */
7729int stmmac_suspend(struct device *dev)
7730{
7731 struct net_device *ndev = dev_get_drvdata(dev);
7732 struct stmmac_priv *priv = netdev_priv(ndev);
7733 u32 chan;
7734
7735 if (!ndev || !netif_running(ndev))
7736 return 0;
7737
7738 mutex_lock(&priv->lock);
7739
7740 netif_device_detach(ndev);
7741
7742 stmmac_disable_all_queues(priv);
7743
7744 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7745 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7746
7747 if (priv->eee_enabled) {
7748 priv->tx_path_in_lpi_mode = false;
7749 del_timer_sync(&priv->eee_ctrl_timer);
7750 }
7751
7752 /* Stop TX/RX DMA */
7753 stmmac_stop_all_dma(priv);
7754
7755 if (priv->plat->serdes_powerdown)
7756 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7757
7758 /* Enable Power down mode by programming the PMT regs */
7759 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7760 stmmac_pmt(priv, priv->hw, priv->wolopts);
7761 priv->irq_wake = 1;
7762 } else {
7763 stmmac_mac_set(priv, priv->ioaddr, false);
7764 pinctrl_pm_select_sleep_state(priv->device);
7765 }
7766
7767 mutex_unlock(&priv->lock);
7768
7769 rtnl_lock();
7770 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7771 phylink_suspend(priv->phylink, true);
7772 } else {
7773 if (device_may_wakeup(priv->device))
7774 phylink_speed_down(priv->phylink, false);
7775 phylink_suspend(priv->phylink, false);
7776 }
7777 rtnl_unlock();
7778
7779 if (stmmac_fpe_supported(priv))
7780 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7781
7782 priv->speed = SPEED_UNKNOWN;
7783 return 0;
7784}
7785EXPORT_SYMBOL_GPL(stmmac_suspend);
7786
7787static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7788{
7789 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7790
7791 rx_q->cur_rx = 0;
7792 rx_q->dirty_rx = 0;
7793}
7794
7795static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7796{
7797 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7798
7799 tx_q->cur_tx = 0;
7800 tx_q->dirty_tx = 0;
7801 tx_q->mss = 0;
7802
7803 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7804}
7805
7806/**
7807 * stmmac_reset_queues_param - reset queue parameters
7808 * @priv: device pointer
7809 */
7810static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7811{
7812 u32 rx_cnt = priv->plat->rx_queues_to_use;
7813 u32 tx_cnt = priv->plat->tx_queues_to_use;
7814 u32 queue;
7815
7816 for (queue = 0; queue < rx_cnt; queue++)
7817 stmmac_reset_rx_queue(priv, queue);
7818
7819 for (queue = 0; queue < tx_cnt; queue++)
7820 stmmac_reset_tx_queue(priv, queue);
7821}
7822
7823/**
7824 * stmmac_resume - resume callback
7825 * @dev: device pointer
7826 * Description: when resume this function is invoked to setup the DMA and CORE
7827 * in a usable state.
7828 */
7829int stmmac_resume(struct device *dev)
7830{
7831 struct net_device *ndev = dev_get_drvdata(dev);
7832 struct stmmac_priv *priv = netdev_priv(ndev);
7833 int ret;
7834
7835 if (!netif_running(ndev))
7836 return 0;
7837
7838 /* Power Down bit, into the PM register, is cleared
7839 * automatically as soon as a magic packet or a Wake-up frame
7840 * is received. Anyway, it's better to manually clear
7841 * this bit because it can generate problems while resuming
7842 * from another devices (e.g. serial console).
7843 */
7844 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7845 mutex_lock(&priv->lock);
7846 stmmac_pmt(priv, priv->hw, 0);
7847 mutex_unlock(&priv->lock);
7848 priv->irq_wake = 0;
7849 } else {
7850 pinctrl_pm_select_default_state(priv->device);
7851 /* reset the phy so that it's ready */
7852 if (priv->mii)
7853 stmmac_mdio_reset(priv->mii);
7854 }
7855
7856 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7857 priv->plat->serdes_powerup) {
7858 ret = priv->plat->serdes_powerup(ndev,
7859 priv->plat->bsp_priv);
7860
7861 if (ret < 0)
7862 return ret;
7863 }
7864
7865 rtnl_lock();
7866 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7867 phylink_resume(priv->phylink);
7868 } else {
7869 phylink_resume(priv->phylink);
7870 if (device_may_wakeup(priv->device))
7871 phylink_speed_up(priv->phylink);
7872 }
7873 rtnl_unlock();
7874
7875 rtnl_lock();
7876 mutex_lock(&priv->lock);
7877
7878 stmmac_reset_queues_param(priv);
7879
7880 stmmac_free_tx_skbufs(priv);
7881 stmmac_clear_descriptors(priv, &priv->dma_conf);
7882
7883 stmmac_hw_setup(ndev, false);
7884 stmmac_init_coalesce(priv);
7885 stmmac_set_rx_mode(ndev);
7886
7887 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7888
7889 stmmac_enable_all_queues(priv);
7890 stmmac_enable_all_dma_irq(priv);
7891
7892 mutex_unlock(&priv->lock);
7893 rtnl_unlock();
7894
7895 netif_device_attach(ndev);
7896
7897 return 0;
7898}
7899EXPORT_SYMBOL_GPL(stmmac_resume);
7900
7901#ifndef MODULE
7902static int __init stmmac_cmdline_opt(char *str)
7903{
7904 char *opt;
7905
7906 if (!str || !*str)
7907 return 1;
7908 while ((opt = strsep(&str, ",")) != NULL) {
7909 if (!strncmp(opt, "debug:", 6)) {
7910 if (kstrtoint(opt + 6, 0, &debug))
7911 goto err;
7912 } else if (!strncmp(opt, "phyaddr:", 8)) {
7913 if (kstrtoint(opt + 8, 0, &phyaddr))
7914 goto err;
7915 } else if (!strncmp(opt, "buf_sz:", 7)) {
7916 if (kstrtoint(opt + 7, 0, &buf_sz))
7917 goto err;
7918 } else if (!strncmp(opt, "tc:", 3)) {
7919 if (kstrtoint(opt + 3, 0, &tc))
7920 goto err;
7921 } else if (!strncmp(opt, "watchdog:", 9)) {
7922 if (kstrtoint(opt + 9, 0, &watchdog))
7923 goto err;
7924 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7925 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7926 goto err;
7927 } else if (!strncmp(opt, "pause:", 6)) {
7928 if (kstrtoint(opt + 6, 0, &pause))
7929 goto err;
7930 } else if (!strncmp(opt, "eee_timer:", 10)) {
7931 if (kstrtoint(opt + 10, 0, &eee_timer))
7932 goto err;
7933 } else if (!strncmp(opt, "chain_mode:", 11)) {
7934 if (kstrtoint(opt + 11, 0, &chain_mode))
7935 goto err;
7936 }
7937 }
7938 return 1;
7939
7940err:
7941 pr_err("%s: ERROR broken module parameter conversion", __func__);
7942 return 1;
7943}
7944
7945__setup("stmmaceth=", stmmac_cmdline_opt);
7946#endif /* MODULE */
7947
7948static int __init stmmac_init(void)
7949{
7950#ifdef CONFIG_DEBUG_FS
7951 /* Create debugfs main directory if it doesn't exist yet */
7952 if (!stmmac_fs_dir)
7953 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7954 register_netdevice_notifier(&stmmac_notifier);
7955#endif
7956
7957 return 0;
7958}
7959
7960static void __exit stmmac_exit(void)
7961{
7962#ifdef CONFIG_DEBUG_FS
7963 unregister_netdevice_notifier(&stmmac_notifier);
7964 debugfs_remove_recursive(stmmac_fs_dir);
7965#endif
7966}
7967
7968module_init(stmmac_init)
7969module_exit(stmmac_exit)
7970
7971MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7972MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7973MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15*******************************************************************************/
16
17#include <linux/clk.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/if_ether.h>
25#include <linux/crc32.h>
26#include <linux/mii.h>
27#include <linux/if.h>
28#include <linux/if_vlan.h>
29#include <linux/dma-mapping.h>
30#include <linux/slab.h>
31#include <linux/prefetch.h>
32#include <linux/pinctrl/consumer.h>
33#ifdef CONFIG_DEBUG_FS
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36#endif /* CONFIG_DEBUG_FS */
37#include <linux/net_tstamp.h>
38#include <linux/phylink.h>
39#include <linux/udp.h>
40#include <net/pkt_cls.h>
41#include "stmmac_ptp.h"
42#include "stmmac.h"
43#include <linux/reset.h>
44#include <linux/of_mdio.h>
45#include "dwmac1000.h"
46#include "dwxgmac2.h"
47#include "hwif.h"
48
49#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
51
52/* Module parameters */
53#define TX_TIMEO 5000
54static int watchdog = TX_TIMEO;
55module_param(watchdog, int, 0644);
56MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
57
58static int debug = -1;
59module_param(debug, int, 0644);
60MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
61
62static int phyaddr = -1;
63module_param(phyaddr, int, 0444);
64MODULE_PARM_DESC(phyaddr, "Physical device address");
65
66#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
67#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
68
69static int flow_ctrl = FLOW_AUTO;
70module_param(flow_ctrl, int, 0644);
71MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
72
73static int pause = PAUSE_TIME;
74module_param(pause, int, 0644);
75MODULE_PARM_DESC(pause, "Flow Control Pause Time");
76
77#define TC_DEFAULT 64
78static int tc = TC_DEFAULT;
79module_param(tc, int, 0644);
80MODULE_PARM_DESC(tc, "DMA threshold control value");
81
82#define DEFAULT_BUFSIZE 1536
83static int buf_sz = DEFAULT_BUFSIZE;
84module_param(buf_sz, int, 0644);
85MODULE_PARM_DESC(buf_sz, "DMA buffer size");
86
87#define STMMAC_RX_COPYBREAK 256
88
89static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90 NETIF_MSG_LINK | NETIF_MSG_IFUP |
91 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
92
93#define STMMAC_DEFAULT_LPI_TIMER 1000
94static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95module_param(eee_timer, int, 0644);
96MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
98
99/* By default the driver will use the ring mode to manage tx and rx descriptors,
100 * but allow user to force to use the chain instead of the ring
101 */
102static unsigned int chain_mode;
103module_param(chain_mode, int, 0444);
104MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
105
106static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
107
108#ifdef CONFIG_DEBUG_FS
109static const struct net_device_ops stmmac_netdev_ops;
110static void stmmac_init_fs(struct net_device *dev);
111static void stmmac_exit_fs(struct net_device *dev);
112#endif
113
114#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
115
116/**
117 * stmmac_verify_args - verify the driver parameters.
118 * Description: it checks the driver parameters and set a default in case of
119 * errors.
120 */
121static void stmmac_verify_args(void)
122{
123 if (unlikely(watchdog < 0))
124 watchdog = TX_TIMEO;
125 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126 buf_sz = DEFAULT_BUFSIZE;
127 if (unlikely(flow_ctrl > 1))
128 flow_ctrl = FLOW_AUTO;
129 else if (likely(flow_ctrl < 0))
130 flow_ctrl = FLOW_OFF;
131 if (unlikely((pause < 0) || (pause > 0xffff)))
132 pause = PAUSE_TIME;
133 if (eee_timer < 0)
134 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
135}
136
137/**
138 * stmmac_disable_all_queues - Disable all queues
139 * @priv: driver private structure
140 */
141static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142{
143 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
144 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
145 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146 u32 queue;
147
148 for (queue = 0; queue < maxq; queue++) {
149 struct stmmac_channel *ch = &priv->channel[queue];
150
151 if (queue < rx_queues_cnt)
152 napi_disable(&ch->rx_napi);
153 if (queue < tx_queues_cnt)
154 napi_disable(&ch->tx_napi);
155 }
156}
157
158/**
159 * stmmac_enable_all_queues - Enable all queues
160 * @priv: driver private structure
161 */
162static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163{
164 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
166 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167 u32 queue;
168
169 for (queue = 0; queue < maxq; queue++) {
170 struct stmmac_channel *ch = &priv->channel[queue];
171
172 if (queue < rx_queues_cnt)
173 napi_enable(&ch->rx_napi);
174 if (queue < tx_queues_cnt)
175 napi_enable(&ch->tx_napi);
176 }
177}
178
179/**
180 * stmmac_stop_all_queues - Stop all queues
181 * @priv: driver private structure
182 */
183static void stmmac_stop_all_queues(struct stmmac_priv *priv)
184{
185 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
186 u32 queue;
187
188 for (queue = 0; queue < tx_queues_cnt; queue++)
189 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
190}
191
192/**
193 * stmmac_start_all_queues - Start all queues
194 * @priv: driver private structure
195 */
196static void stmmac_start_all_queues(struct stmmac_priv *priv)
197{
198 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
199 u32 queue;
200
201 for (queue = 0; queue < tx_queues_cnt; queue++)
202 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
203}
204
205static void stmmac_service_event_schedule(struct stmmac_priv *priv)
206{
207 if (!test_bit(STMMAC_DOWN, &priv->state) &&
208 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
209 queue_work(priv->wq, &priv->service_task);
210}
211
212static void stmmac_global_err(struct stmmac_priv *priv)
213{
214 netif_carrier_off(priv->dev);
215 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
216 stmmac_service_event_schedule(priv);
217}
218
219/**
220 * stmmac_clk_csr_set - dynamically set the MDC clock
221 * @priv: driver private structure
222 * Description: this is to dynamically set the MDC clock according to the csr
223 * clock input.
224 * Note:
225 * If a specific clk_csr value is passed from the platform
226 * this means that the CSR Clock Range selection cannot be
227 * changed at run-time and it is fixed (as reported in the driver
228 * documentation). Viceversa the driver will try to set the MDC
229 * clock dynamically according to the actual clock input.
230 */
231static void stmmac_clk_csr_set(struct stmmac_priv *priv)
232{
233 u32 clk_rate;
234
235 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
236
237 /* Platform provided default clk_csr would be assumed valid
238 * for all other cases except for the below mentioned ones.
239 * For values higher than the IEEE 802.3 specified frequency
240 * we can not estimate the proper divider as it is not known
241 * the frequency of clk_csr_i. So we do not change the default
242 * divider.
243 */
244 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
245 if (clk_rate < CSR_F_35M)
246 priv->clk_csr = STMMAC_CSR_20_35M;
247 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
248 priv->clk_csr = STMMAC_CSR_35_60M;
249 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
250 priv->clk_csr = STMMAC_CSR_60_100M;
251 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
252 priv->clk_csr = STMMAC_CSR_100_150M;
253 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
254 priv->clk_csr = STMMAC_CSR_150_250M;
255 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
256 priv->clk_csr = STMMAC_CSR_250_300M;
257 }
258
259 if (priv->plat->has_sun8i) {
260 if (clk_rate > 160000000)
261 priv->clk_csr = 0x03;
262 else if (clk_rate > 80000000)
263 priv->clk_csr = 0x02;
264 else if (clk_rate > 40000000)
265 priv->clk_csr = 0x01;
266 else
267 priv->clk_csr = 0;
268 }
269
270 if (priv->plat->has_xgmac) {
271 if (clk_rate > 400000000)
272 priv->clk_csr = 0x5;
273 else if (clk_rate > 350000000)
274 priv->clk_csr = 0x4;
275 else if (clk_rate > 300000000)
276 priv->clk_csr = 0x3;
277 else if (clk_rate > 250000000)
278 priv->clk_csr = 0x2;
279 else if (clk_rate > 150000000)
280 priv->clk_csr = 0x1;
281 else
282 priv->clk_csr = 0x0;
283 }
284}
285
286static void print_pkt(unsigned char *buf, int len)
287{
288 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
289 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
290}
291
292static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
293{
294 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
295 u32 avail;
296
297 if (tx_q->dirty_tx > tx_q->cur_tx)
298 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
299 else
300 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
301
302 return avail;
303}
304
305/**
306 * stmmac_rx_dirty - Get RX queue dirty
307 * @priv: driver private structure
308 * @queue: RX queue index
309 */
310static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
311{
312 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
313 u32 dirty;
314
315 if (rx_q->dirty_rx <= rx_q->cur_rx)
316 dirty = rx_q->cur_rx - rx_q->dirty_rx;
317 else
318 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
319
320 return dirty;
321}
322
323/**
324 * stmmac_enable_eee_mode - check and enter in LPI mode
325 * @priv: driver private structure
326 * Description: this function is to verify and enter in LPI mode in case of
327 * EEE.
328 */
329static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
330{
331 u32 tx_cnt = priv->plat->tx_queues_to_use;
332 u32 queue;
333
334 /* check if all TX queues have the work finished */
335 for (queue = 0; queue < tx_cnt; queue++) {
336 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
337
338 if (tx_q->dirty_tx != tx_q->cur_tx)
339 return; /* still unfinished work */
340 }
341
342 /* Check and enter in LPI mode */
343 if (!priv->tx_path_in_lpi_mode)
344 stmmac_set_eee_mode(priv, priv->hw,
345 priv->plat->en_tx_lpi_clockgating);
346}
347
348/**
349 * stmmac_disable_eee_mode - disable and exit from LPI mode
350 * @priv: driver private structure
351 * Description: this function is to exit and disable EEE in case of
352 * LPI state is true. This is called by the xmit.
353 */
354void stmmac_disable_eee_mode(struct stmmac_priv *priv)
355{
356 stmmac_reset_eee_mode(priv, priv->hw);
357 del_timer_sync(&priv->eee_ctrl_timer);
358 priv->tx_path_in_lpi_mode = false;
359}
360
361/**
362 * stmmac_eee_ctrl_timer - EEE TX SW timer.
363 * @arg : data hook
364 * Description:
365 * if there is no data transfer and if we are not in LPI state,
366 * then MAC Transmitter can be moved to LPI state.
367 */
368static void stmmac_eee_ctrl_timer(struct timer_list *t)
369{
370 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
371
372 stmmac_enable_eee_mode(priv);
373 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
374}
375
376/**
377 * stmmac_eee_init - init EEE
378 * @priv: driver private structure
379 * Description:
380 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
381 * can also manage EEE, this function enable the LPI state and start related
382 * timer.
383 */
384bool stmmac_eee_init(struct stmmac_priv *priv)
385{
386 int eee_tw_timer = priv->eee_tw_timer;
387
388 /* Using PCS we cannot dial with the phy registers at this stage
389 * so we do not support extra feature like EEE.
390 */
391 if (priv->hw->pcs == STMMAC_PCS_TBI ||
392 priv->hw->pcs == STMMAC_PCS_RTBI)
393 return false;
394
395 /* Check if MAC core supports the EEE feature. */
396 if (!priv->dma_cap.eee)
397 return false;
398
399 mutex_lock(&priv->lock);
400
401 /* Check if it needs to be deactivated */
402 if (!priv->eee_active) {
403 if (priv->eee_enabled) {
404 netdev_dbg(priv->dev, "disable EEE\n");
405 del_timer_sync(&priv->eee_ctrl_timer);
406 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
407 }
408 mutex_unlock(&priv->lock);
409 return false;
410 }
411
412 if (priv->eee_active && !priv->eee_enabled) {
413 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
414 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
415 eee_tw_timer);
416 }
417
418 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
419
420 mutex_unlock(&priv->lock);
421 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
422 return true;
423}
424
425/* stmmac_get_tx_hwtstamp - get HW TX timestamps
426 * @priv: driver private structure
427 * @p : descriptor pointer
428 * @skb : the socket buffer
429 * Description :
430 * This function will read timestamp from the descriptor & pass it to stack.
431 * and also perform some sanity checks.
432 */
433static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434 struct dma_desc *p, struct sk_buff *skb)
435{
436 struct skb_shared_hwtstamps shhwtstamp;
437 bool found = false;
438 u64 ns = 0;
439
440 if (!priv->hwts_tx_en)
441 return;
442
443 /* exit if skb doesn't support hw tstamp */
444 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445 return;
446
447 /* check tx tstamp status */
448 if (stmmac_get_tx_timestamp_status(priv, p)) {
449 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
450 found = true;
451 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
452 found = true;
453 }
454
455 if (found) {
456 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
457 shhwtstamp.hwtstamp = ns_to_ktime(ns);
458
459 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
460 /* pass tstamp to stack */
461 skb_tstamp_tx(skb, &shhwtstamp);
462 }
463}
464
465/* stmmac_get_rx_hwtstamp - get HW RX timestamps
466 * @priv: driver private structure
467 * @p : descriptor pointer
468 * @np : next descriptor pointer
469 * @skb : the socket buffer
470 * Description :
471 * This function will read received packet's timestamp from the descriptor
472 * and pass it to stack. It also perform some sanity checks.
473 */
474static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
475 struct dma_desc *np, struct sk_buff *skb)
476{
477 struct skb_shared_hwtstamps *shhwtstamp = NULL;
478 struct dma_desc *desc = p;
479 u64 ns = 0;
480
481 if (!priv->hwts_rx_en)
482 return;
483 /* For GMAC4, the valid timestamp is from CTX next desc. */
484 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
485 desc = np;
486
487 /* Check if timestamp is available */
488 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
489 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
490 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
491 shhwtstamp = skb_hwtstamps(skb);
492 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
493 shhwtstamp->hwtstamp = ns_to_ktime(ns);
494 } else {
495 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
496 }
497}
498
499/**
500 * stmmac_hwtstamp_set - control hardware timestamping.
501 * @dev: device pointer.
502 * @ifr: An IOCTL specific structure, that can contain a pointer to
503 * a proprietary structure used to pass information to the driver.
504 * Description:
505 * This function configures the MAC to enable/disable both outgoing(TX)
506 * and incoming(RX) packets time stamping based on user input.
507 * Return Value:
508 * 0 on success and an appropriate -ve integer on failure.
509 */
510static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
511{
512 struct stmmac_priv *priv = netdev_priv(dev);
513 struct hwtstamp_config config;
514 struct timespec64 now;
515 u64 temp = 0;
516 u32 ptp_v2 = 0;
517 u32 tstamp_all = 0;
518 u32 ptp_over_ipv4_udp = 0;
519 u32 ptp_over_ipv6_udp = 0;
520 u32 ptp_over_ethernet = 0;
521 u32 snap_type_sel = 0;
522 u32 ts_master_en = 0;
523 u32 ts_event_en = 0;
524 u32 sec_inc = 0;
525 u32 value = 0;
526 bool xmac;
527
528 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
529
530 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
531 netdev_alert(priv->dev, "No support for HW time stamping\n");
532 priv->hwts_tx_en = 0;
533 priv->hwts_rx_en = 0;
534
535 return -EOPNOTSUPP;
536 }
537
538 if (copy_from_user(&config, ifr->ifr_data,
539 sizeof(config)))
540 return -EFAULT;
541
542 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
543 __func__, config.flags, config.tx_type, config.rx_filter);
544
545 /* reserved for future extensions */
546 if (config.flags)
547 return -EINVAL;
548
549 if (config.tx_type != HWTSTAMP_TX_OFF &&
550 config.tx_type != HWTSTAMP_TX_ON)
551 return -ERANGE;
552
553 if (priv->adv_ts) {
554 switch (config.rx_filter) {
555 case HWTSTAMP_FILTER_NONE:
556 /* time stamp no incoming packet at all */
557 config.rx_filter = HWTSTAMP_FILTER_NONE;
558 break;
559
560 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
561 /* PTP v1, UDP, any kind of event packet */
562 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
563 /* 'xmac' hardware can support Sync, Pdelay_Req and
564 * Pdelay_resp by setting bit14 and bits17/16 to 01
565 * This leaves Delay_Req timestamps out.
566 * Enable all events *and* general purpose message
567 * timestamping
568 */
569 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
570 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
571 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
572 break;
573
574 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
575 /* PTP v1, UDP, Sync packet */
576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
577 /* take time stamp for SYNC messages only */
578 ts_event_en = PTP_TCR_TSEVNTENA;
579
580 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
581 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
582 break;
583
584 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
585 /* PTP v1, UDP, Delay_req packet */
586 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
587 /* take time stamp for Delay_Req messages only */
588 ts_master_en = PTP_TCR_TSMSTRENA;
589 ts_event_en = PTP_TCR_TSEVNTENA;
590
591 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
592 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
593 break;
594
595 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
596 /* PTP v2, UDP, any kind of event packet */
597 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
598 ptp_v2 = PTP_TCR_TSVER2ENA;
599 /* take time stamp for all event messages */
600 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
601
602 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
603 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
604 break;
605
606 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
607 /* PTP v2, UDP, Sync packet */
608 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
609 ptp_v2 = PTP_TCR_TSVER2ENA;
610 /* take time stamp for SYNC messages only */
611 ts_event_en = PTP_TCR_TSEVNTENA;
612
613 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 break;
616
617 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
618 /* PTP v2, UDP, Delay_req packet */
619 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
620 ptp_v2 = PTP_TCR_TSVER2ENA;
621 /* take time stamp for Delay_Req messages only */
622 ts_master_en = PTP_TCR_TSMSTRENA;
623 ts_event_en = PTP_TCR_TSEVNTENA;
624
625 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
626 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
627 break;
628
629 case HWTSTAMP_FILTER_PTP_V2_EVENT:
630 /* PTP v2/802.AS1 any layer, any kind of event packet */
631 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
632 ptp_v2 = PTP_TCR_TSVER2ENA;
633 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
634 if (priv->synopsys_id != DWMAC_CORE_5_10)
635 ts_event_en = PTP_TCR_TSEVNTENA;
636 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
637 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
638 ptp_over_ethernet = PTP_TCR_TSIPENA;
639 break;
640
641 case HWTSTAMP_FILTER_PTP_V2_SYNC:
642 /* PTP v2/802.AS1, any layer, Sync packet */
643 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
644 ptp_v2 = PTP_TCR_TSVER2ENA;
645 /* take time stamp for SYNC messages only */
646 ts_event_en = PTP_TCR_TSEVNTENA;
647
648 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650 ptp_over_ethernet = PTP_TCR_TSIPENA;
651 break;
652
653 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
654 /* PTP v2/802.AS1, any layer, Delay_req packet */
655 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
656 ptp_v2 = PTP_TCR_TSVER2ENA;
657 /* take time stamp for Delay_Req messages only */
658 ts_master_en = PTP_TCR_TSMSTRENA;
659 ts_event_en = PTP_TCR_TSEVNTENA;
660
661 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 ptp_over_ethernet = PTP_TCR_TSIPENA;
664 break;
665
666 case HWTSTAMP_FILTER_NTP_ALL:
667 case HWTSTAMP_FILTER_ALL:
668 /* time stamp any incoming packet */
669 config.rx_filter = HWTSTAMP_FILTER_ALL;
670 tstamp_all = PTP_TCR_TSENALL;
671 break;
672
673 default:
674 return -ERANGE;
675 }
676 } else {
677 switch (config.rx_filter) {
678 case HWTSTAMP_FILTER_NONE:
679 config.rx_filter = HWTSTAMP_FILTER_NONE;
680 break;
681 default:
682 /* PTP v1, UDP, any kind of event packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
684 break;
685 }
686 }
687 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
688 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
689
690 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
691 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
692 else {
693 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
694 tstamp_all | ptp_v2 | ptp_over_ethernet |
695 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
696 ts_master_en | snap_type_sel);
697 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
698
699 /* program Sub Second Increment reg */
700 stmmac_config_sub_second_increment(priv,
701 priv->ptpaddr, priv->plat->clk_ptp_rate,
702 xmac, &sec_inc);
703 temp = div_u64(1000000000ULL, sec_inc);
704
705 /* Store sub second increment and flags for later use */
706 priv->sub_second_inc = sec_inc;
707 priv->systime_flags = value;
708
709 /* calculate default added value:
710 * formula is :
711 * addend = (2^32)/freq_div_ratio;
712 * where, freq_div_ratio = 1e9ns/sec_inc
713 */
714 temp = (u64)(temp << 32);
715 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
716 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
717
718 /* initialize system time */
719 ktime_get_real_ts64(&now);
720
721 /* lower 32 bits of tv_sec are safe until y2106 */
722 stmmac_init_systime(priv, priv->ptpaddr,
723 (u32)now.tv_sec, now.tv_nsec);
724 }
725
726 memcpy(&priv->tstamp_config, &config, sizeof(config));
727
728 return copy_to_user(ifr->ifr_data, &config,
729 sizeof(config)) ? -EFAULT : 0;
730}
731
732/**
733 * stmmac_hwtstamp_get - read hardware timestamping.
734 * @dev: device pointer.
735 * @ifr: An IOCTL specific structure, that can contain a pointer to
736 * a proprietary structure used to pass information to the driver.
737 * Description:
738 * This function obtain the current hardware timestamping settings
739 as requested.
740 */
741static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
742{
743 struct stmmac_priv *priv = netdev_priv(dev);
744 struct hwtstamp_config *config = &priv->tstamp_config;
745
746 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
747 return -EOPNOTSUPP;
748
749 return copy_to_user(ifr->ifr_data, config,
750 sizeof(*config)) ? -EFAULT : 0;
751}
752
753/**
754 * stmmac_init_ptp - init PTP
755 * @priv: driver private structure
756 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
757 * This is done by looking at the HW cap. register.
758 * This function also registers the ptp driver.
759 */
760static int stmmac_init_ptp(struct stmmac_priv *priv)
761{
762 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
763
764 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
765 return -EOPNOTSUPP;
766
767 priv->adv_ts = 0;
768 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
769 if (xmac && priv->dma_cap.atime_stamp)
770 priv->adv_ts = 1;
771 /* Dwmac 3.x core with extend_desc can support adv_ts */
772 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
773 priv->adv_ts = 1;
774
775 if (priv->dma_cap.time_stamp)
776 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
777
778 if (priv->adv_ts)
779 netdev_info(priv->dev,
780 "IEEE 1588-2008 Advanced Timestamp supported\n");
781
782 priv->hwts_tx_en = 0;
783 priv->hwts_rx_en = 0;
784
785 stmmac_ptp_register(priv);
786
787 return 0;
788}
789
790static void stmmac_release_ptp(struct stmmac_priv *priv)
791{
792 if (priv->plat->clk_ptp_ref)
793 clk_disable_unprepare(priv->plat->clk_ptp_ref);
794 stmmac_ptp_unregister(priv);
795}
796
797/**
798 * stmmac_mac_flow_ctrl - Configure flow control in all queues
799 * @priv: driver private structure
800 * Description: It is used for configuring the flow control in all queues
801 */
802static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
803{
804 u32 tx_cnt = priv->plat->tx_queues_to_use;
805
806 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
807 priv->pause, tx_cnt);
808}
809
810static void stmmac_validate(struct phylink_config *config,
811 unsigned long *supported,
812 struct phylink_link_state *state)
813{
814 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
815 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
816 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
817 int tx_cnt = priv->plat->tx_queues_to_use;
818 int max_speed = priv->plat->max_speed;
819
820 phylink_set(mac_supported, 10baseT_Half);
821 phylink_set(mac_supported, 10baseT_Full);
822 phylink_set(mac_supported, 100baseT_Half);
823 phylink_set(mac_supported, 100baseT_Full);
824 phylink_set(mac_supported, 1000baseT_Half);
825 phylink_set(mac_supported, 1000baseT_Full);
826 phylink_set(mac_supported, 1000baseKX_Full);
827
828 phylink_set(mac_supported, Autoneg);
829 phylink_set(mac_supported, Pause);
830 phylink_set(mac_supported, Asym_Pause);
831 phylink_set_port_modes(mac_supported);
832
833 /* Cut down 1G if asked to */
834 if ((max_speed > 0) && (max_speed < 1000)) {
835 phylink_set(mask, 1000baseT_Full);
836 phylink_set(mask, 1000baseX_Full);
837 } else if (priv->plat->has_xgmac) {
838 if (!max_speed || (max_speed >= 2500)) {
839 phylink_set(mac_supported, 2500baseT_Full);
840 phylink_set(mac_supported, 2500baseX_Full);
841 }
842 if (!max_speed || (max_speed >= 5000)) {
843 phylink_set(mac_supported, 5000baseT_Full);
844 }
845 if (!max_speed || (max_speed >= 10000)) {
846 phylink_set(mac_supported, 10000baseSR_Full);
847 phylink_set(mac_supported, 10000baseLR_Full);
848 phylink_set(mac_supported, 10000baseER_Full);
849 phylink_set(mac_supported, 10000baseLRM_Full);
850 phylink_set(mac_supported, 10000baseT_Full);
851 phylink_set(mac_supported, 10000baseKX4_Full);
852 phylink_set(mac_supported, 10000baseKR_Full);
853 }
854 if (!max_speed || (max_speed >= 25000)) {
855 phylink_set(mac_supported, 25000baseCR_Full);
856 phylink_set(mac_supported, 25000baseKR_Full);
857 phylink_set(mac_supported, 25000baseSR_Full);
858 }
859 if (!max_speed || (max_speed >= 40000)) {
860 phylink_set(mac_supported, 40000baseKR4_Full);
861 phylink_set(mac_supported, 40000baseCR4_Full);
862 phylink_set(mac_supported, 40000baseSR4_Full);
863 phylink_set(mac_supported, 40000baseLR4_Full);
864 }
865 if (!max_speed || (max_speed >= 50000)) {
866 phylink_set(mac_supported, 50000baseCR2_Full);
867 phylink_set(mac_supported, 50000baseKR2_Full);
868 phylink_set(mac_supported, 50000baseSR2_Full);
869 phylink_set(mac_supported, 50000baseKR_Full);
870 phylink_set(mac_supported, 50000baseSR_Full);
871 phylink_set(mac_supported, 50000baseCR_Full);
872 phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
873 phylink_set(mac_supported, 50000baseDR_Full);
874 }
875 if (!max_speed || (max_speed >= 100000)) {
876 phylink_set(mac_supported, 100000baseKR4_Full);
877 phylink_set(mac_supported, 100000baseSR4_Full);
878 phylink_set(mac_supported, 100000baseCR4_Full);
879 phylink_set(mac_supported, 100000baseLR4_ER4_Full);
880 phylink_set(mac_supported, 100000baseKR2_Full);
881 phylink_set(mac_supported, 100000baseSR2_Full);
882 phylink_set(mac_supported, 100000baseCR2_Full);
883 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
884 phylink_set(mac_supported, 100000baseDR2_Full);
885 }
886 }
887
888 /* Half-Duplex can only work with single queue */
889 if (tx_cnt > 1) {
890 phylink_set(mask, 10baseT_Half);
891 phylink_set(mask, 100baseT_Half);
892 phylink_set(mask, 1000baseT_Half);
893 }
894
895 linkmode_and(supported, supported, mac_supported);
896 linkmode_andnot(supported, supported, mask);
897
898 linkmode_and(state->advertising, state->advertising, mac_supported);
899 linkmode_andnot(state->advertising, state->advertising, mask);
900
901 /* If PCS is supported, check which modes it supports. */
902 stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
903}
904
905static void stmmac_mac_pcs_get_state(struct phylink_config *config,
906 struct phylink_link_state *state)
907{
908 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
909
910 state->link = 0;
911 stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
912}
913
914static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
915 const struct phylink_link_state *state)
916{
917 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
918
919 stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
920}
921
922static void stmmac_mac_an_restart(struct phylink_config *config)
923{
924 /* Not Supported */
925}
926
927static void stmmac_mac_link_down(struct phylink_config *config,
928 unsigned int mode, phy_interface_t interface)
929{
930 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
931
932 stmmac_mac_set(priv, priv->ioaddr, false);
933 priv->eee_active = false;
934 priv->tx_lpi_enabled = false;
935 stmmac_eee_init(priv);
936 stmmac_set_eee_pls(priv, priv->hw, false);
937}
938
939static void stmmac_mac_link_up(struct phylink_config *config,
940 struct phy_device *phy,
941 unsigned int mode, phy_interface_t interface,
942 int speed, int duplex,
943 bool tx_pause, bool rx_pause)
944{
945 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
946 u32 ctrl;
947
948 stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
949
950 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
951 ctrl &= ~priv->hw->link.speed_mask;
952
953 if (interface == PHY_INTERFACE_MODE_USXGMII) {
954 switch (speed) {
955 case SPEED_10000:
956 ctrl |= priv->hw->link.xgmii.speed10000;
957 break;
958 case SPEED_5000:
959 ctrl |= priv->hw->link.xgmii.speed5000;
960 break;
961 case SPEED_2500:
962 ctrl |= priv->hw->link.xgmii.speed2500;
963 break;
964 default:
965 return;
966 }
967 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
968 switch (speed) {
969 case SPEED_100000:
970 ctrl |= priv->hw->link.xlgmii.speed100000;
971 break;
972 case SPEED_50000:
973 ctrl |= priv->hw->link.xlgmii.speed50000;
974 break;
975 case SPEED_40000:
976 ctrl |= priv->hw->link.xlgmii.speed40000;
977 break;
978 case SPEED_25000:
979 ctrl |= priv->hw->link.xlgmii.speed25000;
980 break;
981 case SPEED_10000:
982 ctrl |= priv->hw->link.xgmii.speed10000;
983 break;
984 case SPEED_2500:
985 ctrl |= priv->hw->link.speed2500;
986 break;
987 case SPEED_1000:
988 ctrl |= priv->hw->link.speed1000;
989 break;
990 default:
991 return;
992 }
993 } else {
994 switch (speed) {
995 case SPEED_2500:
996 ctrl |= priv->hw->link.speed2500;
997 break;
998 case SPEED_1000:
999 ctrl |= priv->hw->link.speed1000;
1000 break;
1001 case SPEED_100:
1002 ctrl |= priv->hw->link.speed100;
1003 break;
1004 case SPEED_10:
1005 ctrl |= priv->hw->link.speed10;
1006 break;
1007 default:
1008 return;
1009 }
1010 }
1011
1012 priv->speed = speed;
1013
1014 if (priv->plat->fix_mac_speed)
1015 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1016
1017 if (!duplex)
1018 ctrl &= ~priv->hw->link.duplex;
1019 else
1020 ctrl |= priv->hw->link.duplex;
1021
1022 /* Flow Control operation */
1023 if (tx_pause && rx_pause)
1024 stmmac_mac_flow_ctrl(priv, duplex);
1025
1026 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1027
1028 stmmac_mac_set(priv, priv->ioaddr, true);
1029 if (phy && priv->dma_cap.eee) {
1030 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1031 priv->eee_enabled = stmmac_eee_init(priv);
1032 priv->tx_lpi_enabled = priv->eee_enabled;
1033 stmmac_set_eee_pls(priv, priv->hw, true);
1034 }
1035}
1036
1037static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1038 .validate = stmmac_validate,
1039 .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1040 .mac_config = stmmac_mac_config,
1041 .mac_an_restart = stmmac_mac_an_restart,
1042 .mac_link_down = stmmac_mac_link_down,
1043 .mac_link_up = stmmac_mac_link_up,
1044};
1045
1046/**
1047 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1048 * @priv: driver private structure
1049 * Description: this is to verify if the HW supports the PCS.
1050 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1051 * configured for the TBI, RTBI, or SGMII PHY interface.
1052 */
1053static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1054{
1055 int interface = priv->plat->interface;
1056
1057 if (priv->dma_cap.pcs) {
1058 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1059 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1060 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1061 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1062 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1063 priv->hw->pcs = STMMAC_PCS_RGMII;
1064 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1065 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1066 priv->hw->pcs = STMMAC_PCS_SGMII;
1067 }
1068 }
1069}
1070
1071/**
1072 * stmmac_init_phy - PHY initialization
1073 * @dev: net device structure
1074 * Description: it initializes the driver's PHY state, and attaches the PHY
1075 * to the mac driver.
1076 * Return value:
1077 * 0 on success
1078 */
1079static int stmmac_init_phy(struct net_device *dev)
1080{
1081 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1082 struct stmmac_priv *priv = netdev_priv(dev);
1083 struct device_node *node;
1084 int ret;
1085
1086 node = priv->plat->phylink_node;
1087
1088 if (node)
1089 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1090
1091 /* Some DT bindings do not set-up the PHY handle. Let's try to
1092 * manually parse it
1093 */
1094 if (!node || ret) {
1095 int addr = priv->plat->phy_addr;
1096 struct phy_device *phydev;
1097
1098 phydev = mdiobus_get_phy(priv->mii, addr);
1099 if (!phydev) {
1100 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1101 return -ENODEV;
1102 }
1103
1104 ret = phylink_connect_phy(priv->phylink, phydev);
1105 }
1106
1107 phylink_ethtool_get_wol(priv->phylink, &wol);
1108 device_set_wakeup_capable(priv->device, !!wol.supported);
1109
1110 return ret;
1111}
1112
1113static int stmmac_phy_setup(struct stmmac_priv *priv)
1114{
1115 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1116 int mode = priv->plat->phy_interface;
1117 struct phylink *phylink;
1118
1119 priv->phylink_config.dev = &priv->dev->dev;
1120 priv->phylink_config.type = PHYLINK_NETDEV;
1121 priv->phylink_config.pcs_poll = true;
1122
1123 if (!fwnode)
1124 fwnode = dev_fwnode(priv->device);
1125
1126 phylink = phylink_create(&priv->phylink_config, fwnode,
1127 mode, &stmmac_phylink_mac_ops);
1128 if (IS_ERR(phylink))
1129 return PTR_ERR(phylink);
1130
1131 priv->phylink = phylink;
1132 return 0;
1133}
1134
1135static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1136{
1137 u32 rx_cnt = priv->plat->rx_queues_to_use;
1138 void *head_rx;
1139 u32 queue;
1140
1141 /* Display RX rings */
1142 for (queue = 0; queue < rx_cnt; queue++) {
1143 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1144
1145 pr_info("\tRX Queue %u rings\n", queue);
1146
1147 if (priv->extend_desc)
1148 head_rx = (void *)rx_q->dma_erx;
1149 else
1150 head_rx = (void *)rx_q->dma_rx;
1151
1152 /* Display RX ring */
1153 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1154 }
1155}
1156
1157static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1158{
1159 u32 tx_cnt = priv->plat->tx_queues_to_use;
1160 void *head_tx;
1161 u32 queue;
1162
1163 /* Display TX rings */
1164 for (queue = 0; queue < tx_cnt; queue++) {
1165 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1166
1167 pr_info("\tTX Queue %d rings\n", queue);
1168
1169 if (priv->extend_desc)
1170 head_tx = (void *)tx_q->dma_etx;
1171 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1172 head_tx = (void *)tx_q->dma_entx;
1173 else
1174 head_tx = (void *)tx_q->dma_tx;
1175
1176 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1177 }
1178}
1179
1180static void stmmac_display_rings(struct stmmac_priv *priv)
1181{
1182 /* Display RX ring */
1183 stmmac_display_rx_rings(priv);
1184
1185 /* Display TX ring */
1186 stmmac_display_tx_rings(priv);
1187}
1188
1189static int stmmac_set_bfsize(int mtu, int bufsize)
1190{
1191 int ret = bufsize;
1192
1193 if (mtu >= BUF_SIZE_8KiB)
1194 ret = BUF_SIZE_16KiB;
1195 else if (mtu >= BUF_SIZE_4KiB)
1196 ret = BUF_SIZE_8KiB;
1197 else if (mtu >= BUF_SIZE_2KiB)
1198 ret = BUF_SIZE_4KiB;
1199 else if (mtu > DEFAULT_BUFSIZE)
1200 ret = BUF_SIZE_2KiB;
1201 else
1202 ret = DEFAULT_BUFSIZE;
1203
1204 return ret;
1205}
1206
1207/**
1208 * stmmac_clear_rx_descriptors - clear RX descriptors
1209 * @priv: driver private structure
1210 * @queue: RX queue index
1211 * Description: this function is called to clear the RX descriptors
1212 * in case of both basic and extended descriptors are used.
1213 */
1214static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1215{
1216 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1217 int i;
1218
1219 /* Clear the RX descriptors */
1220 for (i = 0; i < DMA_RX_SIZE; i++)
1221 if (priv->extend_desc)
1222 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1223 priv->use_riwt, priv->mode,
1224 (i == DMA_RX_SIZE - 1),
1225 priv->dma_buf_sz);
1226 else
1227 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1228 priv->use_riwt, priv->mode,
1229 (i == DMA_RX_SIZE - 1),
1230 priv->dma_buf_sz);
1231}
1232
1233/**
1234 * stmmac_clear_tx_descriptors - clear tx descriptors
1235 * @priv: driver private structure
1236 * @queue: TX queue index.
1237 * Description: this function is called to clear the TX descriptors
1238 * in case of both basic and extended descriptors are used.
1239 */
1240static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1241{
1242 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1243 int i;
1244
1245 /* Clear the TX descriptors */
1246 for (i = 0; i < DMA_TX_SIZE; i++) {
1247 int last = (i == (DMA_TX_SIZE - 1));
1248 struct dma_desc *p;
1249
1250 if (priv->extend_desc)
1251 p = &tx_q->dma_etx[i].basic;
1252 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1253 p = &tx_q->dma_entx[i].basic;
1254 else
1255 p = &tx_q->dma_tx[i];
1256
1257 stmmac_init_tx_desc(priv, p, priv->mode, last);
1258 }
1259}
1260
1261/**
1262 * stmmac_clear_descriptors - clear descriptors
1263 * @priv: driver private structure
1264 * Description: this function is called to clear the TX and RX descriptors
1265 * in case of both basic and extended descriptors are used.
1266 */
1267static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1268{
1269 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1270 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1271 u32 queue;
1272
1273 /* Clear the RX descriptors */
1274 for (queue = 0; queue < rx_queue_cnt; queue++)
1275 stmmac_clear_rx_descriptors(priv, queue);
1276
1277 /* Clear the TX descriptors */
1278 for (queue = 0; queue < tx_queue_cnt; queue++)
1279 stmmac_clear_tx_descriptors(priv, queue);
1280}
1281
1282/**
1283 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1284 * @priv: driver private structure
1285 * @p: descriptor pointer
1286 * @i: descriptor index
1287 * @flags: gfp flag
1288 * @queue: RX queue index
1289 * Description: this function is called to allocate a receive buffer, perform
1290 * the DMA mapping and init the descriptor.
1291 */
1292static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1293 int i, gfp_t flags, u32 queue)
1294{
1295 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1296 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1297
1298 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1299 if (!buf->page)
1300 return -ENOMEM;
1301
1302 if (priv->sph) {
1303 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1304 if (!buf->sec_page)
1305 return -ENOMEM;
1306
1307 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1308 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1309 } else {
1310 buf->sec_page = NULL;
1311 }
1312
1313 buf->addr = page_pool_get_dma_addr(buf->page);
1314 stmmac_set_desc_addr(priv, p, buf->addr);
1315 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1316 stmmac_init_desc3(priv, p);
1317
1318 return 0;
1319}
1320
1321/**
1322 * stmmac_free_rx_buffer - free RX dma buffers
1323 * @priv: private structure
1324 * @queue: RX queue index
1325 * @i: buffer index.
1326 */
1327static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1328{
1329 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1330 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1331
1332 if (buf->page)
1333 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1334 buf->page = NULL;
1335
1336 if (buf->sec_page)
1337 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1338 buf->sec_page = NULL;
1339}
1340
1341/**
1342 * stmmac_free_tx_buffer - free RX dma buffers
1343 * @priv: private structure
1344 * @queue: RX queue index
1345 * @i: buffer index.
1346 */
1347static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1348{
1349 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1350
1351 if (tx_q->tx_skbuff_dma[i].buf) {
1352 if (tx_q->tx_skbuff_dma[i].map_as_page)
1353 dma_unmap_page(priv->device,
1354 tx_q->tx_skbuff_dma[i].buf,
1355 tx_q->tx_skbuff_dma[i].len,
1356 DMA_TO_DEVICE);
1357 else
1358 dma_unmap_single(priv->device,
1359 tx_q->tx_skbuff_dma[i].buf,
1360 tx_q->tx_skbuff_dma[i].len,
1361 DMA_TO_DEVICE);
1362 }
1363
1364 if (tx_q->tx_skbuff[i]) {
1365 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1366 tx_q->tx_skbuff[i] = NULL;
1367 tx_q->tx_skbuff_dma[i].buf = 0;
1368 tx_q->tx_skbuff_dma[i].map_as_page = false;
1369 }
1370}
1371
1372/**
1373 * init_dma_rx_desc_rings - init the RX descriptor rings
1374 * @dev: net device structure
1375 * @flags: gfp flag.
1376 * Description: this function initializes the DMA RX descriptors
1377 * and allocates the socket buffers. It supports the chained and ring
1378 * modes.
1379 */
1380static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1381{
1382 struct stmmac_priv *priv = netdev_priv(dev);
1383 u32 rx_count = priv->plat->rx_queues_to_use;
1384 int ret = -ENOMEM;
1385 int queue;
1386 int i;
1387
1388 /* RX INITIALIZATION */
1389 netif_dbg(priv, probe, priv->dev,
1390 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1391
1392 for (queue = 0; queue < rx_count; queue++) {
1393 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1394
1395 netif_dbg(priv, probe, priv->dev,
1396 "(%s) dma_rx_phy=0x%08x\n", __func__,
1397 (u32)rx_q->dma_rx_phy);
1398
1399 stmmac_clear_rx_descriptors(priv, queue);
1400
1401 for (i = 0; i < DMA_RX_SIZE; i++) {
1402 struct dma_desc *p;
1403
1404 if (priv->extend_desc)
1405 p = &((rx_q->dma_erx + i)->basic);
1406 else
1407 p = rx_q->dma_rx + i;
1408
1409 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1410 queue);
1411 if (ret)
1412 goto err_init_rx_buffers;
1413 }
1414
1415 rx_q->cur_rx = 0;
1416 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1417
1418 /* Setup the chained descriptor addresses */
1419 if (priv->mode == STMMAC_CHAIN_MODE) {
1420 if (priv->extend_desc)
1421 stmmac_mode_init(priv, rx_q->dma_erx,
1422 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1423 else
1424 stmmac_mode_init(priv, rx_q->dma_rx,
1425 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1426 }
1427 }
1428
1429 return 0;
1430
1431err_init_rx_buffers:
1432 while (queue >= 0) {
1433 while (--i >= 0)
1434 stmmac_free_rx_buffer(priv, queue, i);
1435
1436 if (queue == 0)
1437 break;
1438
1439 i = DMA_RX_SIZE;
1440 queue--;
1441 }
1442
1443 return ret;
1444}
1445
1446/**
1447 * init_dma_tx_desc_rings - init the TX descriptor rings
1448 * @dev: net device structure.
1449 * Description: this function initializes the DMA TX descriptors
1450 * and allocates the socket buffers. It supports the chained and ring
1451 * modes.
1452 */
1453static int init_dma_tx_desc_rings(struct net_device *dev)
1454{
1455 struct stmmac_priv *priv = netdev_priv(dev);
1456 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1457 u32 queue;
1458 int i;
1459
1460 for (queue = 0; queue < tx_queue_cnt; queue++) {
1461 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1462
1463 netif_dbg(priv, probe, priv->dev,
1464 "(%s) dma_tx_phy=0x%08x\n", __func__,
1465 (u32)tx_q->dma_tx_phy);
1466
1467 /* Setup the chained descriptor addresses */
1468 if (priv->mode == STMMAC_CHAIN_MODE) {
1469 if (priv->extend_desc)
1470 stmmac_mode_init(priv, tx_q->dma_etx,
1471 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1472 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1473 stmmac_mode_init(priv, tx_q->dma_tx,
1474 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1475 }
1476
1477 for (i = 0; i < DMA_TX_SIZE; i++) {
1478 struct dma_desc *p;
1479 if (priv->extend_desc)
1480 p = &((tx_q->dma_etx + i)->basic);
1481 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1482 p = &((tx_q->dma_entx + i)->basic);
1483 else
1484 p = tx_q->dma_tx + i;
1485
1486 stmmac_clear_desc(priv, p);
1487
1488 tx_q->tx_skbuff_dma[i].buf = 0;
1489 tx_q->tx_skbuff_dma[i].map_as_page = false;
1490 tx_q->tx_skbuff_dma[i].len = 0;
1491 tx_q->tx_skbuff_dma[i].last_segment = false;
1492 tx_q->tx_skbuff[i] = NULL;
1493 }
1494
1495 tx_q->dirty_tx = 0;
1496 tx_q->cur_tx = 0;
1497 tx_q->mss = 0;
1498
1499 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1500 }
1501
1502 return 0;
1503}
1504
1505/**
1506 * init_dma_desc_rings - init the RX/TX descriptor rings
1507 * @dev: net device structure
1508 * @flags: gfp flag.
1509 * Description: this function initializes the DMA RX/TX descriptors
1510 * and allocates the socket buffers. It supports the chained and ring
1511 * modes.
1512 */
1513static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1514{
1515 struct stmmac_priv *priv = netdev_priv(dev);
1516 int ret;
1517
1518 ret = init_dma_rx_desc_rings(dev, flags);
1519 if (ret)
1520 return ret;
1521
1522 ret = init_dma_tx_desc_rings(dev);
1523
1524 stmmac_clear_descriptors(priv);
1525
1526 if (netif_msg_hw(priv))
1527 stmmac_display_rings(priv);
1528
1529 return ret;
1530}
1531
1532/**
1533 * dma_free_rx_skbufs - free RX dma buffers
1534 * @priv: private structure
1535 * @queue: RX queue index
1536 */
1537static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1538{
1539 int i;
1540
1541 for (i = 0; i < DMA_RX_SIZE; i++)
1542 stmmac_free_rx_buffer(priv, queue, i);
1543}
1544
1545/**
1546 * dma_free_tx_skbufs - free TX dma buffers
1547 * @priv: private structure
1548 * @queue: TX queue index
1549 */
1550static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1551{
1552 int i;
1553
1554 for (i = 0; i < DMA_TX_SIZE; i++)
1555 stmmac_free_tx_buffer(priv, queue, i);
1556}
1557
1558/**
1559 * free_dma_rx_desc_resources - free RX dma desc resources
1560 * @priv: private structure
1561 */
1562static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1563{
1564 u32 rx_count = priv->plat->rx_queues_to_use;
1565 u32 queue;
1566
1567 /* Free RX queue resources */
1568 for (queue = 0; queue < rx_count; queue++) {
1569 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1570
1571 /* Release the DMA RX socket buffers */
1572 dma_free_rx_skbufs(priv, queue);
1573
1574 /* Free DMA regions of consistent memory previously allocated */
1575 if (!priv->extend_desc)
1576 dma_free_coherent(priv->device,
1577 DMA_RX_SIZE * sizeof(struct dma_desc),
1578 rx_q->dma_rx, rx_q->dma_rx_phy);
1579 else
1580 dma_free_coherent(priv->device, DMA_RX_SIZE *
1581 sizeof(struct dma_extended_desc),
1582 rx_q->dma_erx, rx_q->dma_rx_phy);
1583
1584 kfree(rx_q->buf_pool);
1585 if (rx_q->page_pool)
1586 page_pool_destroy(rx_q->page_pool);
1587 }
1588}
1589
1590/**
1591 * free_dma_tx_desc_resources - free TX dma desc resources
1592 * @priv: private structure
1593 */
1594static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1595{
1596 u32 tx_count = priv->plat->tx_queues_to_use;
1597 u32 queue;
1598
1599 /* Free TX queue resources */
1600 for (queue = 0; queue < tx_count; queue++) {
1601 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1602 size_t size;
1603 void *addr;
1604
1605 /* Release the DMA TX socket buffers */
1606 dma_free_tx_skbufs(priv, queue);
1607
1608 if (priv->extend_desc) {
1609 size = sizeof(struct dma_extended_desc);
1610 addr = tx_q->dma_etx;
1611 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1612 size = sizeof(struct dma_edesc);
1613 addr = tx_q->dma_entx;
1614 } else {
1615 size = sizeof(struct dma_desc);
1616 addr = tx_q->dma_tx;
1617 }
1618
1619 size *= DMA_TX_SIZE;
1620
1621 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1622
1623 kfree(tx_q->tx_skbuff_dma);
1624 kfree(tx_q->tx_skbuff);
1625 }
1626}
1627
1628/**
1629 * alloc_dma_rx_desc_resources - alloc RX resources.
1630 * @priv: private structure
1631 * Description: according to which descriptor can be used (extend or basic)
1632 * this function allocates the resources for TX and RX paths. In case of
1633 * reception, for example, it pre-allocated the RX socket buffer in order to
1634 * allow zero-copy mechanism.
1635 */
1636static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1637{
1638 u32 rx_count = priv->plat->rx_queues_to_use;
1639 int ret = -ENOMEM;
1640 u32 queue;
1641
1642 /* RX queues buffers and DMA */
1643 for (queue = 0; queue < rx_count; queue++) {
1644 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1645 struct page_pool_params pp_params = { 0 };
1646 unsigned int num_pages;
1647
1648 rx_q->queue_index = queue;
1649 rx_q->priv_data = priv;
1650
1651 pp_params.flags = PP_FLAG_DMA_MAP;
1652 pp_params.pool_size = DMA_RX_SIZE;
1653 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1654 pp_params.order = ilog2(num_pages);
1655 pp_params.nid = dev_to_node(priv->device);
1656 pp_params.dev = priv->device;
1657 pp_params.dma_dir = DMA_FROM_DEVICE;
1658
1659 rx_q->page_pool = page_pool_create(&pp_params);
1660 if (IS_ERR(rx_q->page_pool)) {
1661 ret = PTR_ERR(rx_q->page_pool);
1662 rx_q->page_pool = NULL;
1663 goto err_dma;
1664 }
1665
1666 rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1667 GFP_KERNEL);
1668 if (!rx_q->buf_pool)
1669 goto err_dma;
1670
1671 if (priv->extend_desc) {
1672 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1673 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1674 &rx_q->dma_rx_phy,
1675 GFP_KERNEL);
1676 if (!rx_q->dma_erx)
1677 goto err_dma;
1678
1679 } else {
1680 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1681 DMA_RX_SIZE * sizeof(struct dma_desc),
1682 &rx_q->dma_rx_phy,
1683 GFP_KERNEL);
1684 if (!rx_q->dma_rx)
1685 goto err_dma;
1686 }
1687 }
1688
1689 return 0;
1690
1691err_dma:
1692 free_dma_rx_desc_resources(priv);
1693
1694 return ret;
1695}
1696
1697/**
1698 * alloc_dma_tx_desc_resources - alloc TX resources.
1699 * @priv: private structure
1700 * Description: according to which descriptor can be used (extend or basic)
1701 * this function allocates the resources for TX and RX paths. In case of
1702 * reception, for example, it pre-allocated the RX socket buffer in order to
1703 * allow zero-copy mechanism.
1704 */
1705static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1706{
1707 u32 tx_count = priv->plat->tx_queues_to_use;
1708 int ret = -ENOMEM;
1709 u32 queue;
1710
1711 /* TX queues buffers and DMA */
1712 for (queue = 0; queue < tx_count; queue++) {
1713 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1714 size_t size;
1715 void *addr;
1716
1717 tx_q->queue_index = queue;
1718 tx_q->priv_data = priv;
1719
1720 tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1721 sizeof(*tx_q->tx_skbuff_dma),
1722 GFP_KERNEL);
1723 if (!tx_q->tx_skbuff_dma)
1724 goto err_dma;
1725
1726 tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1727 sizeof(struct sk_buff *),
1728 GFP_KERNEL);
1729 if (!tx_q->tx_skbuff)
1730 goto err_dma;
1731
1732 if (priv->extend_desc)
1733 size = sizeof(struct dma_extended_desc);
1734 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1735 size = sizeof(struct dma_edesc);
1736 else
1737 size = sizeof(struct dma_desc);
1738
1739 size *= DMA_TX_SIZE;
1740
1741 addr = dma_alloc_coherent(priv->device, size,
1742 &tx_q->dma_tx_phy, GFP_KERNEL);
1743 if (!addr)
1744 goto err_dma;
1745
1746 if (priv->extend_desc)
1747 tx_q->dma_etx = addr;
1748 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1749 tx_q->dma_entx = addr;
1750 else
1751 tx_q->dma_tx = addr;
1752 }
1753
1754 return 0;
1755
1756err_dma:
1757 free_dma_tx_desc_resources(priv);
1758 return ret;
1759}
1760
1761/**
1762 * alloc_dma_desc_resources - alloc TX/RX resources.
1763 * @priv: private structure
1764 * Description: according to which descriptor can be used (extend or basic)
1765 * this function allocates the resources for TX and RX paths. In case of
1766 * reception, for example, it pre-allocated the RX socket buffer in order to
1767 * allow zero-copy mechanism.
1768 */
1769static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1770{
1771 /* RX Allocation */
1772 int ret = alloc_dma_rx_desc_resources(priv);
1773
1774 if (ret)
1775 return ret;
1776
1777 ret = alloc_dma_tx_desc_resources(priv);
1778
1779 return ret;
1780}
1781
1782/**
1783 * free_dma_desc_resources - free dma desc resources
1784 * @priv: private structure
1785 */
1786static void free_dma_desc_resources(struct stmmac_priv *priv)
1787{
1788 /* Release the DMA RX socket buffers */
1789 free_dma_rx_desc_resources(priv);
1790
1791 /* Release the DMA TX socket buffers */
1792 free_dma_tx_desc_resources(priv);
1793}
1794
1795/**
1796 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1797 * @priv: driver private structure
1798 * Description: It is used for enabling the rx queues in the MAC
1799 */
1800static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1801{
1802 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1803 int queue;
1804 u8 mode;
1805
1806 for (queue = 0; queue < rx_queues_count; queue++) {
1807 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1808 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1809 }
1810}
1811
1812/**
1813 * stmmac_start_rx_dma - start RX DMA channel
1814 * @priv: driver private structure
1815 * @chan: RX channel index
1816 * Description:
1817 * This starts a RX DMA channel
1818 */
1819static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1820{
1821 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1822 stmmac_start_rx(priv, priv->ioaddr, chan);
1823}
1824
1825/**
1826 * stmmac_start_tx_dma - start TX DMA channel
1827 * @priv: driver private structure
1828 * @chan: TX channel index
1829 * Description:
1830 * This starts a TX DMA channel
1831 */
1832static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1833{
1834 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1835 stmmac_start_tx(priv, priv->ioaddr, chan);
1836}
1837
1838/**
1839 * stmmac_stop_rx_dma - stop RX DMA channel
1840 * @priv: driver private structure
1841 * @chan: RX channel index
1842 * Description:
1843 * This stops a RX DMA channel
1844 */
1845static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1846{
1847 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1848 stmmac_stop_rx(priv, priv->ioaddr, chan);
1849}
1850
1851/**
1852 * stmmac_stop_tx_dma - stop TX DMA channel
1853 * @priv: driver private structure
1854 * @chan: TX channel index
1855 * Description:
1856 * This stops a TX DMA channel
1857 */
1858static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1859{
1860 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1861 stmmac_stop_tx(priv, priv->ioaddr, chan);
1862}
1863
1864/**
1865 * stmmac_start_all_dma - start all RX and TX DMA channels
1866 * @priv: driver private structure
1867 * Description:
1868 * This starts all the RX and TX DMA channels
1869 */
1870static void stmmac_start_all_dma(struct stmmac_priv *priv)
1871{
1872 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1873 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1874 u32 chan = 0;
1875
1876 for (chan = 0; chan < rx_channels_count; chan++)
1877 stmmac_start_rx_dma(priv, chan);
1878
1879 for (chan = 0; chan < tx_channels_count; chan++)
1880 stmmac_start_tx_dma(priv, chan);
1881}
1882
1883/**
1884 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1885 * @priv: driver private structure
1886 * Description:
1887 * This stops the RX and TX DMA channels
1888 */
1889static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1890{
1891 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1892 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1893 u32 chan = 0;
1894
1895 for (chan = 0; chan < rx_channels_count; chan++)
1896 stmmac_stop_rx_dma(priv, chan);
1897
1898 for (chan = 0; chan < tx_channels_count; chan++)
1899 stmmac_stop_tx_dma(priv, chan);
1900}
1901
1902/**
1903 * stmmac_dma_operation_mode - HW DMA operation mode
1904 * @priv: driver private structure
1905 * Description: it is used for configuring the DMA operation mode register in
1906 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1907 */
1908static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1909{
1910 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1911 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1912 int rxfifosz = priv->plat->rx_fifo_size;
1913 int txfifosz = priv->plat->tx_fifo_size;
1914 u32 txmode = 0;
1915 u32 rxmode = 0;
1916 u32 chan = 0;
1917 u8 qmode = 0;
1918
1919 if (rxfifosz == 0)
1920 rxfifosz = priv->dma_cap.rx_fifo_size;
1921 if (txfifosz == 0)
1922 txfifosz = priv->dma_cap.tx_fifo_size;
1923
1924 /* Adjust for real per queue fifo size */
1925 rxfifosz /= rx_channels_count;
1926 txfifosz /= tx_channels_count;
1927
1928 if (priv->plat->force_thresh_dma_mode) {
1929 txmode = tc;
1930 rxmode = tc;
1931 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1932 /*
1933 * In case of GMAC, SF mode can be enabled
1934 * to perform the TX COE in HW. This depends on:
1935 * 1) TX COE if actually supported
1936 * 2) There is no bugged Jumbo frame support
1937 * that needs to not insert csum in the TDES.
1938 */
1939 txmode = SF_DMA_MODE;
1940 rxmode = SF_DMA_MODE;
1941 priv->xstats.threshold = SF_DMA_MODE;
1942 } else {
1943 txmode = tc;
1944 rxmode = SF_DMA_MODE;
1945 }
1946
1947 /* configure all channels */
1948 for (chan = 0; chan < rx_channels_count; chan++) {
1949 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1950
1951 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1952 rxfifosz, qmode);
1953 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1954 chan);
1955 }
1956
1957 for (chan = 0; chan < tx_channels_count; chan++) {
1958 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1959
1960 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1961 txfifosz, qmode);
1962 }
1963}
1964
1965/**
1966 * stmmac_tx_clean - to manage the transmission completion
1967 * @priv: driver private structure
1968 * @queue: TX queue index
1969 * Description: it reclaims the transmit resources after transmission completes.
1970 */
1971static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1972{
1973 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1974 unsigned int bytes_compl = 0, pkts_compl = 0;
1975 unsigned int entry, count = 0;
1976
1977 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1978
1979 priv->xstats.tx_clean++;
1980
1981 entry = tx_q->dirty_tx;
1982 while ((entry != tx_q->cur_tx) && (count < budget)) {
1983 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1984 struct dma_desc *p;
1985 int status;
1986
1987 if (priv->extend_desc)
1988 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1989 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1990 p = &tx_q->dma_entx[entry].basic;
1991 else
1992 p = tx_q->dma_tx + entry;
1993
1994 status = stmmac_tx_status(priv, &priv->dev->stats,
1995 &priv->xstats, p, priv->ioaddr);
1996 /* Check if the descriptor is owned by the DMA */
1997 if (unlikely(status & tx_dma_own))
1998 break;
1999
2000 count++;
2001
2002 /* Make sure descriptor fields are read after reading
2003 * the own bit.
2004 */
2005 dma_rmb();
2006
2007 /* Just consider the last segment and ...*/
2008 if (likely(!(status & tx_not_ls))) {
2009 /* ... verify the status error condition */
2010 if (unlikely(status & tx_err)) {
2011 priv->dev->stats.tx_errors++;
2012 } else {
2013 priv->dev->stats.tx_packets++;
2014 priv->xstats.tx_pkt_n++;
2015 }
2016 stmmac_get_tx_hwtstamp(priv, p, skb);
2017 }
2018
2019 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2020 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2021 dma_unmap_page(priv->device,
2022 tx_q->tx_skbuff_dma[entry].buf,
2023 tx_q->tx_skbuff_dma[entry].len,
2024 DMA_TO_DEVICE);
2025 else
2026 dma_unmap_single(priv->device,
2027 tx_q->tx_skbuff_dma[entry].buf,
2028 tx_q->tx_skbuff_dma[entry].len,
2029 DMA_TO_DEVICE);
2030 tx_q->tx_skbuff_dma[entry].buf = 0;
2031 tx_q->tx_skbuff_dma[entry].len = 0;
2032 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2033 }
2034
2035 stmmac_clean_desc3(priv, tx_q, p);
2036
2037 tx_q->tx_skbuff_dma[entry].last_segment = false;
2038 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2039
2040 if (likely(skb != NULL)) {
2041 pkts_compl++;
2042 bytes_compl += skb->len;
2043 dev_consume_skb_any(skb);
2044 tx_q->tx_skbuff[entry] = NULL;
2045 }
2046
2047 stmmac_release_tx_desc(priv, p, priv->mode);
2048
2049 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2050 }
2051 tx_q->dirty_tx = entry;
2052
2053 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2054 pkts_compl, bytes_compl);
2055
2056 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2057 queue))) &&
2058 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2059
2060 netif_dbg(priv, tx_done, priv->dev,
2061 "%s: restart transmit\n", __func__);
2062 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2063 }
2064
2065 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2066 stmmac_enable_eee_mode(priv);
2067 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2068 }
2069
2070 /* We still have pending packets, let's call for a new scheduling */
2071 if (tx_q->dirty_tx != tx_q->cur_tx)
2072 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2073
2074 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2075
2076 return count;
2077}
2078
2079/**
2080 * stmmac_tx_err - to manage the tx error
2081 * @priv: driver private structure
2082 * @chan: channel index
2083 * Description: it cleans the descriptors and restarts the transmission
2084 * in case of transmission errors.
2085 */
2086static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2087{
2088 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2089
2090 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2091
2092 stmmac_stop_tx_dma(priv, chan);
2093 dma_free_tx_skbufs(priv, chan);
2094 stmmac_clear_tx_descriptors(priv, chan);
2095 tx_q->dirty_tx = 0;
2096 tx_q->cur_tx = 0;
2097 tx_q->mss = 0;
2098 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2099 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2100 tx_q->dma_tx_phy, chan);
2101 stmmac_start_tx_dma(priv, chan);
2102
2103 priv->dev->stats.tx_errors++;
2104 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2105}
2106
2107/**
2108 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2109 * @priv: driver private structure
2110 * @txmode: TX operating mode
2111 * @rxmode: RX operating mode
2112 * @chan: channel index
2113 * Description: it is used for configuring of the DMA operation mode in
2114 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2115 * mode.
2116 */
2117static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2118 u32 rxmode, u32 chan)
2119{
2120 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2121 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2122 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2123 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2124 int rxfifosz = priv->plat->rx_fifo_size;
2125 int txfifosz = priv->plat->tx_fifo_size;
2126
2127 if (rxfifosz == 0)
2128 rxfifosz = priv->dma_cap.rx_fifo_size;
2129 if (txfifosz == 0)
2130 txfifosz = priv->dma_cap.tx_fifo_size;
2131
2132 /* Adjust for real per queue fifo size */
2133 rxfifosz /= rx_channels_count;
2134 txfifosz /= tx_channels_count;
2135
2136 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2137 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2138}
2139
2140static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2141{
2142 int ret;
2143
2144 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2145 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2146 if (ret && (ret != -EINVAL)) {
2147 stmmac_global_err(priv);
2148 return true;
2149 }
2150
2151 return false;
2152}
2153
2154static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2155{
2156 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2157 &priv->xstats, chan);
2158 struct stmmac_channel *ch = &priv->channel[chan];
2159 unsigned long flags;
2160
2161 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2162 if (napi_schedule_prep(&ch->rx_napi)) {
2163 spin_lock_irqsave(&ch->lock, flags);
2164 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2165 spin_unlock_irqrestore(&ch->lock, flags);
2166 __napi_schedule_irqoff(&ch->rx_napi);
2167 }
2168 }
2169
2170 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2171 if (napi_schedule_prep(&ch->tx_napi)) {
2172 spin_lock_irqsave(&ch->lock, flags);
2173 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2174 spin_unlock_irqrestore(&ch->lock, flags);
2175 __napi_schedule_irqoff(&ch->tx_napi);
2176 }
2177 }
2178
2179 return status;
2180}
2181
2182/**
2183 * stmmac_dma_interrupt - DMA ISR
2184 * @priv: driver private structure
2185 * Description: this is the DMA ISR. It is called by the main ISR.
2186 * It calls the dwmac dma routine and schedule poll method in case of some
2187 * work can be done.
2188 */
2189static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2190{
2191 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2192 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2193 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2194 tx_channel_count : rx_channel_count;
2195 u32 chan;
2196 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2197
2198 /* Make sure we never check beyond our status buffer. */
2199 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2200 channels_to_check = ARRAY_SIZE(status);
2201
2202 for (chan = 0; chan < channels_to_check; chan++)
2203 status[chan] = stmmac_napi_check(priv, chan);
2204
2205 for (chan = 0; chan < tx_channel_count; chan++) {
2206 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2207 /* Try to bump up the dma threshold on this failure */
2208 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2209 (tc <= 256)) {
2210 tc += 64;
2211 if (priv->plat->force_thresh_dma_mode)
2212 stmmac_set_dma_operation_mode(priv,
2213 tc,
2214 tc,
2215 chan);
2216 else
2217 stmmac_set_dma_operation_mode(priv,
2218 tc,
2219 SF_DMA_MODE,
2220 chan);
2221 priv->xstats.threshold = tc;
2222 }
2223 } else if (unlikely(status[chan] == tx_hard_error)) {
2224 stmmac_tx_err(priv, chan);
2225 }
2226 }
2227}
2228
2229/**
2230 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2231 * @priv: driver private structure
2232 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2233 */
2234static void stmmac_mmc_setup(struct stmmac_priv *priv)
2235{
2236 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2237 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2238
2239 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2240
2241 if (priv->dma_cap.rmon) {
2242 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2243 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2244 } else
2245 netdev_info(priv->dev, "No MAC Management Counters available\n");
2246}
2247
2248/**
2249 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2250 * @priv: driver private structure
2251 * Description:
2252 * new GMAC chip generations have a new register to indicate the
2253 * presence of the optional feature/functions.
2254 * This can be also used to override the value passed through the
2255 * platform and necessary for old MAC10/100 and GMAC chips.
2256 */
2257static int stmmac_get_hw_features(struct stmmac_priv *priv)
2258{
2259 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2260}
2261
2262/**
2263 * stmmac_check_ether_addr - check if the MAC addr is valid
2264 * @priv: driver private structure
2265 * Description:
2266 * it is to verify if the MAC address is valid, in case of failures it
2267 * generates a random MAC address
2268 */
2269static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2270{
2271 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2272 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2273 if (!is_valid_ether_addr(priv->dev->dev_addr))
2274 eth_hw_addr_random(priv->dev);
2275 dev_info(priv->device, "device MAC address %pM\n",
2276 priv->dev->dev_addr);
2277 }
2278}
2279
2280/**
2281 * stmmac_init_dma_engine - DMA init.
2282 * @priv: driver private structure
2283 * Description:
2284 * It inits the DMA invoking the specific MAC/GMAC callback.
2285 * Some DMA parameters can be passed from the platform;
2286 * in case of these are not passed a default is kept for the MAC or GMAC.
2287 */
2288static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2289{
2290 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2291 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2292 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2293 struct stmmac_rx_queue *rx_q;
2294 struct stmmac_tx_queue *tx_q;
2295 u32 chan = 0;
2296 int atds = 0;
2297 int ret = 0;
2298
2299 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2300 dev_err(priv->device, "Invalid DMA configuration\n");
2301 return -EINVAL;
2302 }
2303
2304 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2305 atds = 1;
2306
2307 ret = stmmac_reset(priv, priv->ioaddr);
2308 if (ret) {
2309 dev_err(priv->device, "Failed to reset the dma\n");
2310 return ret;
2311 }
2312
2313 /* DMA Configuration */
2314 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2315
2316 if (priv->plat->axi)
2317 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2318
2319 /* DMA CSR Channel configuration */
2320 for (chan = 0; chan < dma_csr_ch; chan++)
2321 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2322
2323 /* DMA RX Channel Configuration */
2324 for (chan = 0; chan < rx_channels_count; chan++) {
2325 rx_q = &priv->rx_queue[chan];
2326
2327 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2328 rx_q->dma_rx_phy, chan);
2329
2330 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2331 (DMA_RX_SIZE * sizeof(struct dma_desc));
2332 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2333 rx_q->rx_tail_addr, chan);
2334 }
2335
2336 /* DMA TX Channel Configuration */
2337 for (chan = 0; chan < tx_channels_count; chan++) {
2338 tx_q = &priv->tx_queue[chan];
2339
2340 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2341 tx_q->dma_tx_phy, chan);
2342
2343 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2344 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2345 tx_q->tx_tail_addr, chan);
2346 }
2347
2348 return ret;
2349}
2350
2351static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2352{
2353 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2354
2355 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2356}
2357
2358/**
2359 * stmmac_tx_timer - mitigation sw timer for tx.
2360 * @data: data pointer
2361 * Description:
2362 * This is the timer handler to directly invoke the stmmac_tx_clean.
2363 */
2364static void stmmac_tx_timer(struct timer_list *t)
2365{
2366 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2367 struct stmmac_priv *priv = tx_q->priv_data;
2368 struct stmmac_channel *ch;
2369
2370 ch = &priv->channel[tx_q->queue_index];
2371
2372 if (likely(napi_schedule_prep(&ch->tx_napi))) {
2373 unsigned long flags;
2374
2375 spin_lock_irqsave(&ch->lock, flags);
2376 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2377 spin_unlock_irqrestore(&ch->lock, flags);
2378 __napi_schedule(&ch->tx_napi);
2379 }
2380}
2381
2382/**
2383 * stmmac_init_coalesce - init mitigation options.
2384 * @priv: driver private structure
2385 * Description:
2386 * This inits the coalesce parameters: i.e. timer rate,
2387 * timer handler and default threshold used for enabling the
2388 * interrupt on completion bit.
2389 */
2390static void stmmac_init_coalesce(struct stmmac_priv *priv)
2391{
2392 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2393 u32 chan;
2394
2395 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2396 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2397 priv->rx_coal_frames = STMMAC_RX_FRAMES;
2398
2399 for (chan = 0; chan < tx_channel_count; chan++) {
2400 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2401
2402 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2403 }
2404}
2405
2406static void stmmac_set_rings_length(struct stmmac_priv *priv)
2407{
2408 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2409 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2410 u32 chan;
2411
2412 /* set TX ring length */
2413 for (chan = 0; chan < tx_channels_count; chan++)
2414 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2415 (DMA_TX_SIZE - 1), chan);
2416
2417 /* set RX ring length */
2418 for (chan = 0; chan < rx_channels_count; chan++)
2419 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2420 (DMA_RX_SIZE - 1), chan);
2421}
2422
2423/**
2424 * stmmac_set_tx_queue_weight - Set TX queue weight
2425 * @priv: driver private structure
2426 * Description: It is used for setting TX queues weight
2427 */
2428static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2429{
2430 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2431 u32 weight;
2432 u32 queue;
2433
2434 for (queue = 0; queue < tx_queues_count; queue++) {
2435 weight = priv->plat->tx_queues_cfg[queue].weight;
2436 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2437 }
2438}
2439
2440/**
2441 * stmmac_configure_cbs - Configure CBS in TX queue
2442 * @priv: driver private structure
2443 * Description: It is used for configuring CBS in AVB TX queues
2444 */
2445static void stmmac_configure_cbs(struct stmmac_priv *priv)
2446{
2447 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2448 u32 mode_to_use;
2449 u32 queue;
2450
2451 /* queue 0 is reserved for legacy traffic */
2452 for (queue = 1; queue < tx_queues_count; queue++) {
2453 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2454 if (mode_to_use == MTL_QUEUE_DCB)
2455 continue;
2456
2457 stmmac_config_cbs(priv, priv->hw,
2458 priv->plat->tx_queues_cfg[queue].send_slope,
2459 priv->plat->tx_queues_cfg[queue].idle_slope,
2460 priv->plat->tx_queues_cfg[queue].high_credit,
2461 priv->plat->tx_queues_cfg[queue].low_credit,
2462 queue);
2463 }
2464}
2465
2466/**
2467 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2468 * @priv: driver private structure
2469 * Description: It is used for mapping RX queues to RX dma channels
2470 */
2471static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2472{
2473 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2474 u32 queue;
2475 u32 chan;
2476
2477 for (queue = 0; queue < rx_queues_count; queue++) {
2478 chan = priv->plat->rx_queues_cfg[queue].chan;
2479 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2480 }
2481}
2482
2483/**
2484 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2485 * @priv: driver private structure
2486 * Description: It is used for configuring the RX Queue Priority
2487 */
2488static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2489{
2490 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2491 u32 queue;
2492 u32 prio;
2493
2494 for (queue = 0; queue < rx_queues_count; queue++) {
2495 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2496 continue;
2497
2498 prio = priv->plat->rx_queues_cfg[queue].prio;
2499 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2500 }
2501}
2502
2503/**
2504 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2505 * @priv: driver private structure
2506 * Description: It is used for configuring the TX Queue Priority
2507 */
2508static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2509{
2510 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2511 u32 queue;
2512 u32 prio;
2513
2514 for (queue = 0; queue < tx_queues_count; queue++) {
2515 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2516 continue;
2517
2518 prio = priv->plat->tx_queues_cfg[queue].prio;
2519 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2520 }
2521}
2522
2523/**
2524 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2525 * @priv: driver private structure
2526 * Description: It is used for configuring the RX queue routing
2527 */
2528static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2529{
2530 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2531 u32 queue;
2532 u8 packet;
2533
2534 for (queue = 0; queue < rx_queues_count; queue++) {
2535 /* no specific packet type routing specified for the queue */
2536 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2537 continue;
2538
2539 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2540 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2541 }
2542}
2543
2544static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2545{
2546 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2547 priv->rss.enable = false;
2548 return;
2549 }
2550
2551 if (priv->dev->features & NETIF_F_RXHASH)
2552 priv->rss.enable = true;
2553 else
2554 priv->rss.enable = false;
2555
2556 stmmac_rss_configure(priv, priv->hw, &priv->rss,
2557 priv->plat->rx_queues_to_use);
2558}
2559
2560/**
2561 * stmmac_mtl_configuration - Configure MTL
2562 * @priv: driver private structure
2563 * Description: It is used for configurring MTL
2564 */
2565static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2566{
2567 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2568 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2569
2570 if (tx_queues_count > 1)
2571 stmmac_set_tx_queue_weight(priv);
2572
2573 /* Configure MTL RX algorithms */
2574 if (rx_queues_count > 1)
2575 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2576 priv->plat->rx_sched_algorithm);
2577
2578 /* Configure MTL TX algorithms */
2579 if (tx_queues_count > 1)
2580 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2581 priv->plat->tx_sched_algorithm);
2582
2583 /* Configure CBS in AVB TX queues */
2584 if (tx_queues_count > 1)
2585 stmmac_configure_cbs(priv);
2586
2587 /* Map RX MTL to DMA channels */
2588 stmmac_rx_queue_dma_chan_map(priv);
2589
2590 /* Enable MAC RX Queues */
2591 stmmac_mac_enable_rx_queues(priv);
2592
2593 /* Set RX priorities */
2594 if (rx_queues_count > 1)
2595 stmmac_mac_config_rx_queues_prio(priv);
2596
2597 /* Set TX priorities */
2598 if (tx_queues_count > 1)
2599 stmmac_mac_config_tx_queues_prio(priv);
2600
2601 /* Set RX routing */
2602 if (rx_queues_count > 1)
2603 stmmac_mac_config_rx_queues_routing(priv);
2604
2605 /* Receive Side Scaling */
2606 if (rx_queues_count > 1)
2607 stmmac_mac_config_rss(priv);
2608}
2609
2610static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2611{
2612 if (priv->dma_cap.asp) {
2613 netdev_info(priv->dev, "Enabling Safety Features\n");
2614 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2615 } else {
2616 netdev_info(priv->dev, "No Safety Features support found\n");
2617 }
2618}
2619
2620/**
2621 * stmmac_hw_setup - setup mac in a usable state.
2622 * @dev : pointer to the device structure.
2623 * Description:
2624 * this is the main function to setup the HW in a usable state because the
2625 * dma engine is reset, the core registers are configured (e.g. AXI,
2626 * Checksum features, timers). The DMA is ready to start receiving and
2627 * transmitting.
2628 * Return value:
2629 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2630 * file on failure.
2631 */
2632static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2633{
2634 struct stmmac_priv *priv = netdev_priv(dev);
2635 u32 rx_cnt = priv->plat->rx_queues_to_use;
2636 u32 tx_cnt = priv->plat->tx_queues_to_use;
2637 u32 chan;
2638 int ret;
2639
2640 /* DMA initialization and SW reset */
2641 ret = stmmac_init_dma_engine(priv);
2642 if (ret < 0) {
2643 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2644 __func__);
2645 return ret;
2646 }
2647
2648 /* Copy the MAC addr into the HW */
2649 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2650
2651 /* PS and related bits will be programmed according to the speed */
2652 if (priv->hw->pcs) {
2653 int speed = priv->plat->mac_port_sel_speed;
2654
2655 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2656 (speed == SPEED_1000)) {
2657 priv->hw->ps = speed;
2658 } else {
2659 dev_warn(priv->device, "invalid port speed\n");
2660 priv->hw->ps = 0;
2661 }
2662 }
2663
2664 /* Initialize the MAC Core */
2665 stmmac_core_init(priv, priv->hw, dev);
2666
2667 /* Initialize MTL*/
2668 stmmac_mtl_configuration(priv);
2669
2670 /* Initialize Safety Features */
2671 stmmac_safety_feat_configuration(priv);
2672
2673 ret = stmmac_rx_ipc(priv, priv->hw);
2674 if (!ret) {
2675 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2676 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2677 priv->hw->rx_csum = 0;
2678 }
2679
2680 /* Enable the MAC Rx/Tx */
2681 stmmac_mac_set(priv, priv->ioaddr, true);
2682
2683 /* Set the HW DMA mode and the COE */
2684 stmmac_dma_operation_mode(priv);
2685
2686 stmmac_mmc_setup(priv);
2687
2688 if (init_ptp) {
2689 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2690 if (ret < 0)
2691 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2692
2693 ret = stmmac_init_ptp(priv);
2694 if (ret == -EOPNOTSUPP)
2695 netdev_warn(priv->dev, "PTP not supported by HW\n");
2696 else if (ret)
2697 netdev_warn(priv->dev, "PTP init failed\n");
2698 }
2699
2700 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2701
2702 /* Convert the timer from msec to usec */
2703 if (!priv->tx_lpi_timer)
2704 priv->tx_lpi_timer = eee_timer * 1000;
2705
2706 if (priv->use_riwt) {
2707 if (!priv->rx_riwt)
2708 priv->rx_riwt = DEF_DMA_RIWT;
2709
2710 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2711 }
2712
2713 if (priv->hw->pcs)
2714 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2715
2716 /* set TX and RX rings length */
2717 stmmac_set_rings_length(priv);
2718
2719 /* Enable TSO */
2720 if (priv->tso) {
2721 for (chan = 0; chan < tx_cnt; chan++)
2722 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2723 }
2724
2725 /* Enable Split Header */
2726 if (priv->sph && priv->hw->rx_csum) {
2727 for (chan = 0; chan < rx_cnt; chan++)
2728 stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2729 }
2730
2731 /* VLAN Tag Insertion */
2732 if (priv->dma_cap.vlins)
2733 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2734
2735 /* TBS */
2736 for (chan = 0; chan < tx_cnt; chan++) {
2737 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2738 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2739
2740 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2741 }
2742
2743 /* Start the ball rolling... */
2744 stmmac_start_all_dma(priv);
2745
2746 return 0;
2747}
2748
2749static void stmmac_hw_teardown(struct net_device *dev)
2750{
2751 struct stmmac_priv *priv = netdev_priv(dev);
2752
2753 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2754}
2755
2756/**
2757 * stmmac_open - open entry point of the driver
2758 * @dev : pointer to the device structure.
2759 * Description:
2760 * This function is the open entry point of the driver.
2761 * Return value:
2762 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2763 * file on failure.
2764 */
2765static int stmmac_open(struct net_device *dev)
2766{
2767 struct stmmac_priv *priv = netdev_priv(dev);
2768 int bfsize = 0;
2769 u32 chan;
2770 int ret;
2771
2772 if (priv->hw->pcs != STMMAC_PCS_TBI &&
2773 priv->hw->pcs != STMMAC_PCS_RTBI &&
2774 priv->hw->xpcs == NULL) {
2775 ret = stmmac_init_phy(dev);
2776 if (ret) {
2777 netdev_err(priv->dev,
2778 "%s: Cannot attach to PHY (error: %d)\n",
2779 __func__, ret);
2780 return ret;
2781 }
2782 }
2783
2784 /* Extra statistics */
2785 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2786 priv->xstats.threshold = tc;
2787
2788 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2789 if (bfsize < 0)
2790 bfsize = 0;
2791
2792 if (bfsize < BUF_SIZE_16KiB)
2793 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2794
2795 priv->dma_buf_sz = bfsize;
2796 buf_sz = bfsize;
2797
2798 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2799
2800 /* Earlier check for TBS */
2801 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2802 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2803 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2804
2805 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2806 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2807 tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2808 }
2809
2810 ret = alloc_dma_desc_resources(priv);
2811 if (ret < 0) {
2812 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2813 __func__);
2814 goto dma_desc_error;
2815 }
2816
2817 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2818 if (ret < 0) {
2819 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2820 __func__);
2821 goto init_error;
2822 }
2823
2824 ret = stmmac_hw_setup(dev, true);
2825 if (ret < 0) {
2826 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2827 goto init_error;
2828 }
2829
2830 stmmac_init_coalesce(priv);
2831
2832 phylink_start(priv->phylink);
2833 /* We may have called phylink_speed_down before */
2834 phylink_speed_up(priv->phylink);
2835
2836 /* Request the IRQ lines */
2837 ret = request_irq(dev->irq, stmmac_interrupt,
2838 IRQF_SHARED, dev->name, dev);
2839 if (unlikely(ret < 0)) {
2840 netdev_err(priv->dev,
2841 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2842 __func__, dev->irq, ret);
2843 goto irq_error;
2844 }
2845
2846 /* Request the Wake IRQ in case of another line is used for WoL */
2847 if (priv->wol_irq != dev->irq) {
2848 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2849 IRQF_SHARED, dev->name, dev);
2850 if (unlikely(ret < 0)) {
2851 netdev_err(priv->dev,
2852 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2853 __func__, priv->wol_irq, ret);
2854 goto wolirq_error;
2855 }
2856 }
2857
2858 /* Request the IRQ lines */
2859 if (priv->lpi_irq > 0) {
2860 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2861 dev->name, dev);
2862 if (unlikely(ret < 0)) {
2863 netdev_err(priv->dev,
2864 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2865 __func__, priv->lpi_irq, ret);
2866 goto lpiirq_error;
2867 }
2868 }
2869
2870 stmmac_enable_all_queues(priv);
2871 stmmac_start_all_queues(priv);
2872
2873 return 0;
2874
2875lpiirq_error:
2876 if (priv->wol_irq != dev->irq)
2877 free_irq(priv->wol_irq, dev);
2878wolirq_error:
2879 free_irq(dev->irq, dev);
2880irq_error:
2881 phylink_stop(priv->phylink);
2882
2883 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2884 del_timer_sync(&priv->tx_queue[chan].txtimer);
2885
2886 stmmac_hw_teardown(dev);
2887init_error:
2888 free_dma_desc_resources(priv);
2889dma_desc_error:
2890 phylink_disconnect_phy(priv->phylink);
2891 return ret;
2892}
2893
2894/**
2895 * stmmac_release - close entry point of the driver
2896 * @dev : device pointer.
2897 * Description:
2898 * This is the stop entry point of the driver.
2899 */
2900static int stmmac_release(struct net_device *dev)
2901{
2902 struct stmmac_priv *priv = netdev_priv(dev);
2903 u32 chan;
2904
2905 if (priv->eee_enabled)
2906 del_timer_sync(&priv->eee_ctrl_timer);
2907
2908 if (device_may_wakeup(priv->device))
2909 phylink_speed_down(priv->phylink, false);
2910 /* Stop and disconnect the PHY */
2911 phylink_stop(priv->phylink);
2912 phylink_disconnect_phy(priv->phylink);
2913
2914 stmmac_stop_all_queues(priv);
2915
2916 stmmac_disable_all_queues(priv);
2917
2918 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2919 del_timer_sync(&priv->tx_queue[chan].txtimer);
2920
2921 /* Free the IRQ lines */
2922 free_irq(dev->irq, dev);
2923 if (priv->wol_irq != dev->irq)
2924 free_irq(priv->wol_irq, dev);
2925 if (priv->lpi_irq > 0)
2926 free_irq(priv->lpi_irq, dev);
2927
2928 /* Stop TX/RX DMA and clear the descriptors */
2929 stmmac_stop_all_dma(priv);
2930
2931 /* Release and free the Rx/Tx resources */
2932 free_dma_desc_resources(priv);
2933
2934 /* Disable the MAC Rx/Tx */
2935 stmmac_mac_set(priv, priv->ioaddr, false);
2936
2937 netif_carrier_off(dev);
2938
2939 stmmac_release_ptp(priv);
2940
2941 return 0;
2942}
2943
2944static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2945 struct stmmac_tx_queue *tx_q)
2946{
2947 u16 tag = 0x0, inner_tag = 0x0;
2948 u32 inner_type = 0x0;
2949 struct dma_desc *p;
2950
2951 if (!priv->dma_cap.vlins)
2952 return false;
2953 if (!skb_vlan_tag_present(skb))
2954 return false;
2955 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2956 inner_tag = skb_vlan_tag_get(skb);
2957 inner_type = STMMAC_VLAN_INSERT;
2958 }
2959
2960 tag = skb_vlan_tag_get(skb);
2961
2962 if (tx_q->tbs & STMMAC_TBS_AVAIL)
2963 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
2964 else
2965 p = &tx_q->dma_tx[tx_q->cur_tx];
2966
2967 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2968 return false;
2969
2970 stmmac_set_tx_owner(priv, p);
2971 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2972 return true;
2973}
2974
2975/**
2976 * stmmac_tso_allocator - close entry point of the driver
2977 * @priv: driver private structure
2978 * @des: buffer start address
2979 * @total_len: total length to fill in descriptors
2980 * @last_segmant: condition for the last descriptor
2981 * @queue: TX queue index
2982 * Description:
2983 * This function fills descriptor and request new descriptors according to
2984 * buffer length to fill
2985 */
2986static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2987 int total_len, bool last_segment, u32 queue)
2988{
2989 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2990 struct dma_desc *desc;
2991 u32 buff_size;
2992 int tmp_len;
2993
2994 tmp_len = total_len;
2995
2996 while (tmp_len > 0) {
2997 dma_addr_t curr_addr;
2998
2999 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3000 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3001
3002 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3003 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3004 else
3005 desc = &tx_q->dma_tx[tx_q->cur_tx];
3006
3007 curr_addr = des + (total_len - tmp_len);
3008 if (priv->dma_cap.addr64 <= 32)
3009 desc->des0 = cpu_to_le32(curr_addr);
3010 else
3011 stmmac_set_desc_addr(priv, desc, curr_addr);
3012
3013 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3014 TSO_MAX_BUFF_SIZE : tmp_len;
3015
3016 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3017 0, 1,
3018 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3019 0, 0);
3020
3021 tmp_len -= TSO_MAX_BUFF_SIZE;
3022 }
3023}
3024
3025/**
3026 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3027 * @skb : the socket buffer
3028 * @dev : device pointer
3029 * Description: this is the transmit function that is called on TSO frames
3030 * (support available on GMAC4 and newer chips).
3031 * Diagram below show the ring programming in case of TSO frames:
3032 *
3033 * First Descriptor
3034 * --------
3035 * | DES0 |---> buffer1 = L2/L3/L4 header
3036 * | DES1 |---> TCP Payload (can continue on next descr...)
3037 * | DES2 |---> buffer 1 and 2 len
3038 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3039 * --------
3040 * |
3041 * ...
3042 * |
3043 * --------
3044 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3045 * | DES1 | --|
3046 * | DES2 | --> buffer 1 and 2 len
3047 * | DES3 |
3048 * --------
3049 *
3050 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3051 */
3052static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3053{
3054 struct dma_desc *desc, *first, *mss_desc = NULL;
3055 struct stmmac_priv *priv = netdev_priv(dev);
3056 int desc_size, tmp_pay_len = 0, first_tx;
3057 int nfrags = skb_shinfo(skb)->nr_frags;
3058 u32 queue = skb_get_queue_mapping(skb);
3059 unsigned int first_entry, tx_packets;
3060 struct stmmac_tx_queue *tx_q;
3061 bool has_vlan, set_ic;
3062 u8 proto_hdr_len, hdr;
3063 u32 pay_len, mss;
3064 dma_addr_t des;
3065 int i;
3066
3067 tx_q = &priv->tx_queue[queue];
3068 first_tx = tx_q->cur_tx;
3069
3070 /* Compute header lengths */
3071 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3072 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3073 hdr = sizeof(struct udphdr);
3074 } else {
3075 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3076 hdr = tcp_hdrlen(skb);
3077 }
3078
3079 /* Desc availability based on threshold should be enough safe */
3080 if (unlikely(stmmac_tx_avail(priv, queue) <
3081 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3082 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3083 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3084 queue));
3085 /* This is a hard error, log it. */
3086 netdev_err(priv->dev,
3087 "%s: Tx Ring full when queue awake\n",
3088 __func__);
3089 }
3090 return NETDEV_TX_BUSY;
3091 }
3092
3093 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3094
3095 mss = skb_shinfo(skb)->gso_size;
3096
3097 /* set new MSS value if needed */
3098 if (mss != tx_q->mss) {
3099 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3100 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3101 else
3102 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3103
3104 stmmac_set_mss(priv, mss_desc, mss);
3105 tx_q->mss = mss;
3106 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3107 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3108 }
3109
3110 if (netif_msg_tx_queued(priv)) {
3111 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3112 __func__, hdr, proto_hdr_len, pay_len, mss);
3113 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3114 skb->data_len);
3115 }
3116
3117 /* Check if VLAN can be inserted by HW */
3118 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3119
3120 first_entry = tx_q->cur_tx;
3121 WARN_ON(tx_q->tx_skbuff[first_entry]);
3122
3123 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3124 desc = &tx_q->dma_entx[first_entry].basic;
3125 else
3126 desc = &tx_q->dma_tx[first_entry];
3127 first = desc;
3128
3129 if (has_vlan)
3130 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3131
3132 /* first descriptor: fill Headers on Buf1 */
3133 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3134 DMA_TO_DEVICE);
3135 if (dma_mapping_error(priv->device, des))
3136 goto dma_map_err;
3137
3138 tx_q->tx_skbuff_dma[first_entry].buf = des;
3139 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3140
3141 if (priv->dma_cap.addr64 <= 32) {
3142 first->des0 = cpu_to_le32(des);
3143
3144 /* Fill start of payload in buff2 of first descriptor */
3145 if (pay_len)
3146 first->des1 = cpu_to_le32(des + proto_hdr_len);
3147
3148 /* If needed take extra descriptors to fill the remaining payload */
3149 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3150 } else {
3151 stmmac_set_desc_addr(priv, first, des);
3152 tmp_pay_len = pay_len;
3153 des += proto_hdr_len;
3154 pay_len = 0;
3155 }
3156
3157 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3158
3159 /* Prepare fragments */
3160 for (i = 0; i < nfrags; i++) {
3161 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3162
3163 des = skb_frag_dma_map(priv->device, frag, 0,
3164 skb_frag_size(frag),
3165 DMA_TO_DEVICE);
3166 if (dma_mapping_error(priv->device, des))
3167 goto dma_map_err;
3168
3169 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3170 (i == nfrags - 1), queue);
3171
3172 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3173 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3174 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3175 }
3176
3177 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3178
3179 /* Only the last descriptor gets to point to the skb. */
3180 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3181
3182 /* Manage tx mitigation */
3183 tx_packets = (tx_q->cur_tx + 1) - first_tx;
3184 tx_q->tx_count_frames += tx_packets;
3185
3186 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3187 set_ic = true;
3188 else if (!priv->tx_coal_frames)
3189 set_ic = false;
3190 else if (tx_packets > priv->tx_coal_frames)
3191 set_ic = true;
3192 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3193 set_ic = true;
3194 else
3195 set_ic = false;
3196
3197 if (set_ic) {
3198 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3199 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3200 else
3201 desc = &tx_q->dma_tx[tx_q->cur_tx];
3202
3203 tx_q->tx_count_frames = 0;
3204 stmmac_set_tx_ic(priv, desc);
3205 priv->xstats.tx_set_ic_bit++;
3206 }
3207
3208 /* We've used all descriptors we need for this skb, however,
3209 * advance cur_tx so that it references a fresh descriptor.
3210 * ndo_start_xmit will fill this descriptor the next time it's
3211 * called and stmmac_tx_clean may clean up to this descriptor.
3212 */
3213 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3214
3215 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3216 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3217 __func__);
3218 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3219 }
3220
3221 dev->stats.tx_bytes += skb->len;
3222 priv->xstats.tx_tso_frames++;
3223 priv->xstats.tx_tso_nfrags += nfrags;
3224
3225 if (priv->sarc_type)
3226 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3227
3228 skb_tx_timestamp(skb);
3229
3230 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3231 priv->hwts_tx_en)) {
3232 /* declare that device is doing timestamping */
3233 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3234 stmmac_enable_tx_timestamp(priv, first);
3235 }
3236
3237 /* Complete the first descriptor before granting the DMA */
3238 stmmac_prepare_tso_tx_desc(priv, first, 1,
3239 proto_hdr_len,
3240 pay_len,
3241 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3242 hdr / 4, (skb->len - proto_hdr_len));
3243
3244 /* If context desc is used to change MSS */
3245 if (mss_desc) {
3246 /* Make sure that first descriptor has been completely
3247 * written, including its own bit. This is because MSS is
3248 * actually before first descriptor, so we need to make
3249 * sure that MSS's own bit is the last thing written.
3250 */
3251 dma_wmb();
3252 stmmac_set_tx_owner(priv, mss_desc);
3253 }
3254
3255 /* The own bit must be the latest setting done when prepare the
3256 * descriptor and then barrier is needed to make sure that
3257 * all is coherent before granting the DMA engine.
3258 */
3259 wmb();
3260
3261 if (netif_msg_pktdata(priv)) {
3262 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3263 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3264 tx_q->cur_tx, first, nfrags);
3265 pr_info(">>> frame to be transmitted: ");
3266 print_pkt(skb->data, skb_headlen(skb));
3267 }
3268
3269 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3270
3271 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3272 desc_size = sizeof(struct dma_edesc);
3273 else
3274 desc_size = sizeof(struct dma_desc);
3275
3276 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3277 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3278 stmmac_tx_timer_arm(priv, queue);
3279
3280 return NETDEV_TX_OK;
3281
3282dma_map_err:
3283 dev_err(priv->device, "Tx dma map failed\n");
3284 dev_kfree_skb(skb);
3285 priv->dev->stats.tx_dropped++;
3286 return NETDEV_TX_OK;
3287}
3288
3289/**
3290 * stmmac_xmit - Tx entry point of the driver
3291 * @skb : the socket buffer
3292 * @dev : device pointer
3293 * Description : this is the tx entry point of the driver.
3294 * It programs the chain or the ring and supports oversized frames
3295 * and SG feature.
3296 */
3297static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3298{
3299 unsigned int first_entry, tx_packets, enh_desc;
3300 struct stmmac_priv *priv = netdev_priv(dev);
3301 unsigned int nopaged_len = skb_headlen(skb);
3302 int i, csum_insertion = 0, is_jumbo = 0;
3303 u32 queue = skb_get_queue_mapping(skb);
3304 int nfrags = skb_shinfo(skb)->nr_frags;
3305 int gso = skb_shinfo(skb)->gso_type;
3306 struct dma_edesc *tbs_desc = NULL;
3307 int entry, desc_size, first_tx;
3308 struct dma_desc *desc, *first;
3309 struct stmmac_tx_queue *tx_q;
3310 bool has_vlan, set_ic;
3311 dma_addr_t des;
3312
3313 tx_q = &priv->tx_queue[queue];
3314 first_tx = tx_q->cur_tx;
3315
3316 if (priv->tx_path_in_lpi_mode)
3317 stmmac_disable_eee_mode(priv);
3318
3319 /* Manage oversized TCP frames for GMAC4 device */
3320 if (skb_is_gso(skb) && priv->tso) {
3321 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3322 return stmmac_tso_xmit(skb, dev);
3323 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3324 return stmmac_tso_xmit(skb, dev);
3325 }
3326
3327 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3328 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3329 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3330 queue));
3331 /* This is a hard error, log it. */
3332 netdev_err(priv->dev,
3333 "%s: Tx Ring full when queue awake\n",
3334 __func__);
3335 }
3336 return NETDEV_TX_BUSY;
3337 }
3338
3339 /* Check if VLAN can be inserted by HW */
3340 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3341
3342 entry = tx_q->cur_tx;
3343 first_entry = entry;
3344 WARN_ON(tx_q->tx_skbuff[first_entry]);
3345
3346 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3347
3348 if (likely(priv->extend_desc))
3349 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3350 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3351 desc = &tx_q->dma_entx[entry].basic;
3352 else
3353 desc = tx_q->dma_tx + entry;
3354
3355 first = desc;
3356
3357 if (has_vlan)
3358 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3359
3360 enh_desc = priv->plat->enh_desc;
3361 /* To program the descriptors according to the size of the frame */
3362 if (enh_desc)
3363 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3364
3365 if (unlikely(is_jumbo)) {
3366 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3367 if (unlikely(entry < 0) && (entry != -EINVAL))
3368 goto dma_map_err;
3369 }
3370
3371 for (i = 0; i < nfrags; i++) {
3372 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3373 int len = skb_frag_size(frag);
3374 bool last_segment = (i == (nfrags - 1));
3375
3376 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3377 WARN_ON(tx_q->tx_skbuff[entry]);
3378
3379 if (likely(priv->extend_desc))
3380 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3381 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3382 desc = &tx_q->dma_entx[entry].basic;
3383 else
3384 desc = tx_q->dma_tx + entry;
3385
3386 des = skb_frag_dma_map(priv->device, frag, 0, len,
3387 DMA_TO_DEVICE);
3388 if (dma_mapping_error(priv->device, des))
3389 goto dma_map_err; /* should reuse desc w/o issues */
3390
3391 tx_q->tx_skbuff_dma[entry].buf = des;
3392
3393 stmmac_set_desc_addr(priv, desc, des);
3394
3395 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3396 tx_q->tx_skbuff_dma[entry].len = len;
3397 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3398
3399 /* Prepare the descriptor and set the own bit too */
3400 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3401 priv->mode, 1, last_segment, skb->len);
3402 }
3403
3404 /* Only the last descriptor gets to point to the skb. */
3405 tx_q->tx_skbuff[entry] = skb;
3406
3407 /* According to the coalesce parameter the IC bit for the latest
3408 * segment is reset and the timer re-started to clean the tx status.
3409 * This approach takes care about the fragments: desc is the first
3410 * element in case of no SG.
3411 */
3412 tx_packets = (entry + 1) - first_tx;
3413 tx_q->tx_count_frames += tx_packets;
3414
3415 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3416 set_ic = true;
3417 else if (!priv->tx_coal_frames)
3418 set_ic = false;
3419 else if (tx_packets > priv->tx_coal_frames)
3420 set_ic = true;
3421 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3422 set_ic = true;
3423 else
3424 set_ic = false;
3425
3426 if (set_ic) {
3427 if (likely(priv->extend_desc))
3428 desc = &tx_q->dma_etx[entry].basic;
3429 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3430 desc = &tx_q->dma_entx[entry].basic;
3431 else
3432 desc = &tx_q->dma_tx[entry];
3433
3434 tx_q->tx_count_frames = 0;
3435 stmmac_set_tx_ic(priv, desc);
3436 priv->xstats.tx_set_ic_bit++;
3437 }
3438
3439 /* We've used all descriptors we need for this skb, however,
3440 * advance cur_tx so that it references a fresh descriptor.
3441 * ndo_start_xmit will fill this descriptor the next time it's
3442 * called and stmmac_tx_clean may clean up to this descriptor.
3443 */
3444 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3445 tx_q->cur_tx = entry;
3446
3447 if (netif_msg_pktdata(priv)) {
3448 netdev_dbg(priv->dev,
3449 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3450 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3451 entry, first, nfrags);
3452
3453 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3454 print_pkt(skb->data, skb->len);
3455 }
3456
3457 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3458 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3459 __func__);
3460 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3461 }
3462
3463 dev->stats.tx_bytes += skb->len;
3464
3465 if (priv->sarc_type)
3466 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3467
3468 skb_tx_timestamp(skb);
3469
3470 /* Ready to fill the first descriptor and set the OWN bit w/o any
3471 * problems because all the descriptors are actually ready to be
3472 * passed to the DMA engine.
3473 */
3474 if (likely(!is_jumbo)) {
3475 bool last_segment = (nfrags == 0);
3476
3477 des = dma_map_single(priv->device, skb->data,
3478 nopaged_len, DMA_TO_DEVICE);
3479 if (dma_mapping_error(priv->device, des))
3480 goto dma_map_err;
3481
3482 tx_q->tx_skbuff_dma[first_entry].buf = des;
3483
3484 stmmac_set_desc_addr(priv, first, des);
3485
3486 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3487 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3488
3489 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3490 priv->hwts_tx_en)) {
3491 /* declare that device is doing timestamping */
3492 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3493 stmmac_enable_tx_timestamp(priv, first);
3494 }
3495
3496 /* Prepare the first descriptor setting the OWN bit too */
3497 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3498 csum_insertion, priv->mode, 0, last_segment,
3499 skb->len);
3500 }
3501
3502 if (tx_q->tbs & STMMAC_TBS_EN) {
3503 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3504
3505 tbs_desc = &tx_q->dma_entx[first_entry];
3506 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3507 }
3508
3509 stmmac_set_tx_owner(priv, first);
3510
3511 /* The own bit must be the latest setting done when prepare the
3512 * descriptor and then barrier is needed to make sure that
3513 * all is coherent before granting the DMA engine.
3514 */
3515 wmb();
3516
3517 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3518
3519 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3520
3521 if (likely(priv->extend_desc))
3522 desc_size = sizeof(struct dma_extended_desc);
3523 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3524 desc_size = sizeof(struct dma_edesc);
3525 else
3526 desc_size = sizeof(struct dma_desc);
3527
3528 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3529 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3530 stmmac_tx_timer_arm(priv, queue);
3531
3532 return NETDEV_TX_OK;
3533
3534dma_map_err:
3535 netdev_err(priv->dev, "Tx DMA map failed\n");
3536 dev_kfree_skb(skb);
3537 priv->dev->stats.tx_dropped++;
3538 return NETDEV_TX_OK;
3539}
3540
3541static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3542{
3543 struct vlan_ethhdr *veth;
3544 __be16 vlan_proto;
3545 u16 vlanid;
3546
3547 veth = (struct vlan_ethhdr *)skb->data;
3548 vlan_proto = veth->h_vlan_proto;
3549
3550 if ((vlan_proto == htons(ETH_P_8021Q) &&
3551 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3552 (vlan_proto == htons(ETH_P_8021AD) &&
3553 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3554 /* pop the vlan tag */
3555 vlanid = ntohs(veth->h_vlan_TCI);
3556 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3557 skb_pull(skb, VLAN_HLEN);
3558 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3559 }
3560}
3561
3562/**
3563 * stmmac_rx_refill - refill used skb preallocated buffers
3564 * @priv: driver private structure
3565 * @queue: RX queue index
3566 * Description : this is to reallocate the skb for the reception process
3567 * that is based on zero-copy.
3568 */
3569static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3570{
3571 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3572 int len, dirty = stmmac_rx_dirty(priv, queue);
3573 unsigned int entry = rx_q->dirty_rx;
3574
3575 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3576
3577 while (dirty-- > 0) {
3578 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3579 struct dma_desc *p;
3580 bool use_rx_wd;
3581
3582 if (priv->extend_desc)
3583 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3584 else
3585 p = rx_q->dma_rx + entry;
3586
3587 if (!buf->page) {
3588 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3589 if (!buf->page)
3590 break;
3591 }
3592
3593 if (priv->sph && !buf->sec_page) {
3594 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3595 if (!buf->sec_page)
3596 break;
3597
3598 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3599
3600 dma_sync_single_for_device(priv->device, buf->sec_addr,
3601 len, DMA_FROM_DEVICE);
3602 }
3603
3604 buf->addr = page_pool_get_dma_addr(buf->page);
3605
3606 /* Sync whole allocation to device. This will invalidate old
3607 * data.
3608 */
3609 dma_sync_single_for_device(priv->device, buf->addr, len,
3610 DMA_FROM_DEVICE);
3611
3612 stmmac_set_desc_addr(priv, p, buf->addr);
3613 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3614 stmmac_refill_desc3(priv, rx_q, p);
3615
3616 rx_q->rx_count_frames++;
3617 rx_q->rx_count_frames += priv->rx_coal_frames;
3618 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3619 rx_q->rx_count_frames = 0;
3620
3621 use_rx_wd = !priv->rx_coal_frames;
3622 use_rx_wd |= rx_q->rx_count_frames > 0;
3623 if (!priv->use_riwt)
3624 use_rx_wd = false;
3625
3626 dma_wmb();
3627 stmmac_set_rx_owner(priv, p, use_rx_wd);
3628
3629 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3630 }
3631 rx_q->dirty_rx = entry;
3632 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3633 (rx_q->dirty_rx * sizeof(struct dma_desc));
3634 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3635}
3636
3637static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3638 struct dma_desc *p,
3639 int status, unsigned int len)
3640{
3641 int ret, coe = priv->hw->rx_csum;
3642 unsigned int plen = 0, hlen = 0;
3643
3644 /* Not first descriptor, buffer is always zero */
3645 if (priv->sph && len)
3646 return 0;
3647
3648 /* First descriptor, get split header length */
3649 ret = stmmac_get_rx_header_len(priv, p, &hlen);
3650 if (priv->sph && hlen) {
3651 priv->xstats.rx_split_hdr_pkt_n++;
3652 return hlen;
3653 }
3654
3655 /* First descriptor, not last descriptor and not split header */
3656 if (status & rx_not_ls)
3657 return priv->dma_buf_sz;
3658
3659 plen = stmmac_get_rx_frame_len(priv, p, coe);
3660
3661 /* First descriptor and last descriptor and not split header */
3662 return min_t(unsigned int, priv->dma_buf_sz, plen);
3663}
3664
3665static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3666 struct dma_desc *p,
3667 int status, unsigned int len)
3668{
3669 int coe = priv->hw->rx_csum;
3670 unsigned int plen = 0;
3671
3672 /* Not split header, buffer is not available */
3673 if (!priv->sph)
3674 return 0;
3675
3676 /* Not last descriptor */
3677 if (status & rx_not_ls)
3678 return priv->dma_buf_sz;
3679
3680 plen = stmmac_get_rx_frame_len(priv, p, coe);
3681
3682 /* Last descriptor */
3683 return plen - len;
3684}
3685
3686/**
3687 * stmmac_rx - manage the receive process
3688 * @priv: driver private structure
3689 * @limit: napi bugget
3690 * @queue: RX queue index.
3691 * Description : this the function called by the napi poll method.
3692 * It gets all the frames inside the ring.
3693 */
3694static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3695{
3696 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3697 struct stmmac_channel *ch = &priv->channel[queue];
3698 unsigned int count = 0, error = 0, len = 0;
3699 int status = 0, coe = priv->hw->rx_csum;
3700 unsigned int next_entry = rx_q->cur_rx;
3701 struct sk_buff *skb = NULL;
3702
3703 if (netif_msg_rx_status(priv)) {
3704 void *rx_head;
3705
3706 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3707 if (priv->extend_desc)
3708 rx_head = (void *)rx_q->dma_erx;
3709 else
3710 rx_head = (void *)rx_q->dma_rx;
3711
3712 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3713 }
3714 while (count < limit) {
3715 unsigned int buf1_len = 0, buf2_len = 0;
3716 enum pkt_hash_types hash_type;
3717 struct stmmac_rx_buffer *buf;
3718 struct dma_desc *np, *p;
3719 int entry;
3720 u32 hash;
3721
3722 if (!count && rx_q->state_saved) {
3723 skb = rx_q->state.skb;
3724 error = rx_q->state.error;
3725 len = rx_q->state.len;
3726 } else {
3727 rx_q->state_saved = false;
3728 skb = NULL;
3729 error = 0;
3730 len = 0;
3731 }
3732
3733 if (count >= limit)
3734 break;
3735
3736read_again:
3737 buf1_len = 0;
3738 buf2_len = 0;
3739 entry = next_entry;
3740 buf = &rx_q->buf_pool[entry];
3741
3742 if (priv->extend_desc)
3743 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3744 else
3745 p = rx_q->dma_rx + entry;
3746
3747 /* read the status of the incoming frame */
3748 status = stmmac_rx_status(priv, &priv->dev->stats,
3749 &priv->xstats, p);
3750 /* check if managed by the DMA otherwise go ahead */
3751 if (unlikely(status & dma_own))
3752 break;
3753
3754 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3755 next_entry = rx_q->cur_rx;
3756
3757 if (priv->extend_desc)
3758 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3759 else
3760 np = rx_q->dma_rx + next_entry;
3761
3762 prefetch(np);
3763
3764 if (priv->extend_desc)
3765 stmmac_rx_extended_status(priv, &priv->dev->stats,
3766 &priv->xstats, rx_q->dma_erx + entry);
3767 if (unlikely(status == discard_frame)) {
3768 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3769 buf->page = NULL;
3770 error = 1;
3771 if (!priv->hwts_rx_en)
3772 priv->dev->stats.rx_errors++;
3773 }
3774
3775 if (unlikely(error && (status & rx_not_ls)))
3776 goto read_again;
3777 if (unlikely(error)) {
3778 dev_kfree_skb(skb);
3779 skb = NULL;
3780 count++;
3781 continue;
3782 }
3783
3784 /* Buffer is good. Go on. */
3785
3786 prefetch(page_address(buf->page));
3787 if (buf->sec_page)
3788 prefetch(page_address(buf->sec_page));
3789
3790 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3791 len += buf1_len;
3792 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3793 len += buf2_len;
3794
3795 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3796 * Type frames (LLC/LLC-SNAP)
3797 *
3798 * llc_snap is never checked in GMAC >= 4, so this ACS
3799 * feature is always disabled and packets need to be
3800 * stripped manually.
3801 */
3802 if (likely(!(status & rx_not_ls)) &&
3803 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3804 unlikely(status != llc_snap))) {
3805 if (buf2_len)
3806 buf2_len -= ETH_FCS_LEN;
3807 else
3808 buf1_len -= ETH_FCS_LEN;
3809
3810 len -= ETH_FCS_LEN;
3811 }
3812
3813 if (!skb) {
3814 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3815 if (!skb) {
3816 priv->dev->stats.rx_dropped++;
3817 count++;
3818 goto drain_data;
3819 }
3820
3821 dma_sync_single_for_cpu(priv->device, buf->addr,
3822 buf1_len, DMA_FROM_DEVICE);
3823 skb_copy_to_linear_data(skb, page_address(buf->page),
3824 buf1_len);
3825 skb_put(skb, buf1_len);
3826
3827 /* Data payload copied into SKB, page ready for recycle */
3828 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3829 buf->page = NULL;
3830 } else if (buf1_len) {
3831 dma_sync_single_for_cpu(priv->device, buf->addr,
3832 buf1_len, DMA_FROM_DEVICE);
3833 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3834 buf->page, 0, buf1_len,
3835 priv->dma_buf_sz);
3836
3837 /* Data payload appended into SKB */
3838 page_pool_release_page(rx_q->page_pool, buf->page);
3839 buf->page = NULL;
3840 }
3841
3842 if (buf2_len) {
3843 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3844 buf2_len, DMA_FROM_DEVICE);
3845 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3846 buf->sec_page, 0, buf2_len,
3847 priv->dma_buf_sz);
3848
3849 /* Data payload appended into SKB */
3850 page_pool_release_page(rx_q->page_pool, buf->sec_page);
3851 buf->sec_page = NULL;
3852 }
3853
3854drain_data:
3855 if (likely(status & rx_not_ls))
3856 goto read_again;
3857 if (!skb)
3858 continue;
3859
3860 /* Got entire packet into SKB. Finish it. */
3861
3862 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3863 stmmac_rx_vlan(priv->dev, skb);
3864 skb->protocol = eth_type_trans(skb, priv->dev);
3865
3866 if (unlikely(!coe))
3867 skb_checksum_none_assert(skb);
3868 else
3869 skb->ip_summed = CHECKSUM_UNNECESSARY;
3870
3871 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3872 skb_set_hash(skb, hash, hash_type);
3873
3874 skb_record_rx_queue(skb, queue);
3875 napi_gro_receive(&ch->rx_napi, skb);
3876 skb = NULL;
3877
3878 priv->dev->stats.rx_packets++;
3879 priv->dev->stats.rx_bytes += len;
3880 count++;
3881 }
3882
3883 if (status & rx_not_ls || skb) {
3884 rx_q->state_saved = true;
3885 rx_q->state.skb = skb;
3886 rx_q->state.error = error;
3887 rx_q->state.len = len;
3888 }
3889
3890 stmmac_rx_refill(priv, queue);
3891
3892 priv->xstats.rx_pkt_n += count;
3893
3894 return count;
3895}
3896
3897static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3898{
3899 struct stmmac_channel *ch =
3900 container_of(napi, struct stmmac_channel, rx_napi);
3901 struct stmmac_priv *priv = ch->priv_data;
3902 u32 chan = ch->index;
3903 int work_done;
3904
3905 priv->xstats.napi_poll++;
3906
3907 work_done = stmmac_rx(priv, budget, chan);
3908 if (work_done < budget && napi_complete_done(napi, work_done)) {
3909 unsigned long flags;
3910
3911 spin_lock_irqsave(&ch->lock, flags);
3912 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3913 spin_unlock_irqrestore(&ch->lock, flags);
3914 }
3915
3916 return work_done;
3917}
3918
3919static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3920{
3921 struct stmmac_channel *ch =
3922 container_of(napi, struct stmmac_channel, tx_napi);
3923 struct stmmac_priv *priv = ch->priv_data;
3924 u32 chan = ch->index;
3925 int work_done;
3926
3927 priv->xstats.napi_poll++;
3928
3929 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3930 work_done = min(work_done, budget);
3931
3932 if (work_done < budget && napi_complete_done(napi, work_done)) {
3933 unsigned long flags;
3934
3935 spin_lock_irqsave(&ch->lock, flags);
3936 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3937 spin_unlock_irqrestore(&ch->lock, flags);
3938 }
3939
3940 return work_done;
3941}
3942
3943/**
3944 * stmmac_tx_timeout
3945 * @dev : Pointer to net device structure
3946 * Description: this function is called when a packet transmission fails to
3947 * complete within a reasonable time. The driver will mark the error in the
3948 * netdev structure and arrange for the device to be reset to a sane state
3949 * in order to transmit a new packet.
3950 */
3951static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
3952{
3953 struct stmmac_priv *priv = netdev_priv(dev);
3954
3955 stmmac_global_err(priv);
3956}
3957
3958/**
3959 * stmmac_set_rx_mode - entry point for multicast addressing
3960 * @dev : pointer to the device structure
3961 * Description:
3962 * This function is a driver entry point which gets called by the kernel
3963 * whenever multicast addresses must be enabled/disabled.
3964 * Return value:
3965 * void.
3966 */
3967static void stmmac_set_rx_mode(struct net_device *dev)
3968{
3969 struct stmmac_priv *priv = netdev_priv(dev);
3970
3971 stmmac_set_filter(priv, priv->hw, dev);
3972}
3973
3974/**
3975 * stmmac_change_mtu - entry point to change MTU size for the device.
3976 * @dev : device pointer.
3977 * @new_mtu : the new MTU size for the device.
3978 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3979 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3980 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3981 * Return value:
3982 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3983 * file on failure.
3984 */
3985static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3986{
3987 struct stmmac_priv *priv = netdev_priv(dev);
3988 int txfifosz = priv->plat->tx_fifo_size;
3989
3990 if (txfifosz == 0)
3991 txfifosz = priv->dma_cap.tx_fifo_size;
3992
3993 txfifosz /= priv->plat->tx_queues_to_use;
3994
3995 if (netif_running(dev)) {
3996 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3997 return -EBUSY;
3998 }
3999
4000 new_mtu = STMMAC_ALIGN(new_mtu);
4001
4002 /* If condition true, FIFO is too small or MTU too large */
4003 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4004 return -EINVAL;
4005
4006 dev->mtu = new_mtu;
4007
4008 netdev_update_features(dev);
4009
4010 return 0;
4011}
4012
4013static netdev_features_t stmmac_fix_features(struct net_device *dev,
4014 netdev_features_t features)
4015{
4016 struct stmmac_priv *priv = netdev_priv(dev);
4017
4018 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4019 features &= ~NETIF_F_RXCSUM;
4020
4021 if (!priv->plat->tx_coe)
4022 features &= ~NETIF_F_CSUM_MASK;
4023
4024 /* Some GMAC devices have a bugged Jumbo frame support that
4025 * needs to have the Tx COE disabled for oversized frames
4026 * (due to limited buffer sizes). In this case we disable
4027 * the TX csum insertion in the TDES and not use SF.
4028 */
4029 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4030 features &= ~NETIF_F_CSUM_MASK;
4031
4032 /* Disable tso if asked by ethtool */
4033 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4034 if (features & NETIF_F_TSO)
4035 priv->tso = true;
4036 else
4037 priv->tso = false;
4038 }
4039
4040 return features;
4041}
4042
4043static int stmmac_set_features(struct net_device *netdev,
4044 netdev_features_t features)
4045{
4046 struct stmmac_priv *priv = netdev_priv(netdev);
4047 bool sph_en;
4048 u32 chan;
4049
4050 /* Keep the COE Type in case of csum is supporting */
4051 if (features & NETIF_F_RXCSUM)
4052 priv->hw->rx_csum = priv->plat->rx_coe;
4053 else
4054 priv->hw->rx_csum = 0;
4055 /* No check needed because rx_coe has been set before and it will be
4056 * fixed in case of issue.
4057 */
4058 stmmac_rx_ipc(priv, priv->hw);
4059
4060 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4061 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4062 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4063
4064 return 0;
4065}
4066
4067/**
4068 * stmmac_interrupt - main ISR
4069 * @irq: interrupt number.
4070 * @dev_id: to pass the net device pointer (must be valid).
4071 * Description: this is the main driver interrupt service routine.
4072 * It can call:
4073 * o DMA service routine (to manage incoming frame reception and transmission
4074 * status)
4075 * o Core interrupts to manage: remote wake-up, management counter, LPI
4076 * interrupts.
4077 */
4078static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4079{
4080 struct net_device *dev = (struct net_device *)dev_id;
4081 struct stmmac_priv *priv = netdev_priv(dev);
4082 u32 rx_cnt = priv->plat->rx_queues_to_use;
4083 u32 tx_cnt = priv->plat->tx_queues_to_use;
4084 u32 queues_count;
4085 u32 queue;
4086 bool xmac;
4087
4088 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4089 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4090
4091 if (priv->irq_wake)
4092 pm_wakeup_event(priv->device, 0);
4093
4094 /* Check if adapter is up */
4095 if (test_bit(STMMAC_DOWN, &priv->state))
4096 return IRQ_HANDLED;
4097 /* Check if a fatal error happened */
4098 if (stmmac_safety_feat_interrupt(priv))
4099 return IRQ_HANDLED;
4100
4101 /* To handle GMAC own interrupts */
4102 if ((priv->plat->has_gmac) || xmac) {
4103 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4104 int mtl_status;
4105
4106 if (unlikely(status)) {
4107 /* For LPI we need to save the tx status */
4108 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4109 priv->tx_path_in_lpi_mode = true;
4110 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4111 priv->tx_path_in_lpi_mode = false;
4112 }
4113
4114 for (queue = 0; queue < queues_count; queue++) {
4115 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4116
4117 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
4118 queue);
4119 if (mtl_status != -EINVAL)
4120 status |= mtl_status;
4121
4122 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
4123 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
4124 rx_q->rx_tail_addr,
4125 queue);
4126 }
4127
4128 /* PCS link status */
4129 if (priv->hw->pcs) {
4130 if (priv->xstats.pcs_link)
4131 netif_carrier_on(dev);
4132 else
4133 netif_carrier_off(dev);
4134 }
4135 }
4136
4137 /* To handle DMA interrupts */
4138 stmmac_dma_interrupt(priv);
4139
4140 return IRQ_HANDLED;
4141}
4142
4143#ifdef CONFIG_NET_POLL_CONTROLLER
4144/* Polling receive - used by NETCONSOLE and other diagnostic tools
4145 * to allow network I/O with interrupts disabled.
4146 */
4147static void stmmac_poll_controller(struct net_device *dev)
4148{
4149 disable_irq(dev->irq);
4150 stmmac_interrupt(dev->irq, dev);
4151 enable_irq(dev->irq);
4152}
4153#endif
4154
4155/**
4156 * stmmac_ioctl - Entry point for the Ioctl
4157 * @dev: Device pointer.
4158 * @rq: An IOCTL specefic structure, that can contain a pointer to
4159 * a proprietary structure used to pass information to the driver.
4160 * @cmd: IOCTL command
4161 * Description:
4162 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4163 */
4164static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4165{
4166 struct stmmac_priv *priv = netdev_priv (dev);
4167 int ret = -EOPNOTSUPP;
4168
4169 if (!netif_running(dev))
4170 return -EINVAL;
4171
4172 switch (cmd) {
4173 case SIOCGMIIPHY:
4174 case SIOCGMIIREG:
4175 case SIOCSMIIREG:
4176 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4177 break;
4178 case SIOCSHWTSTAMP:
4179 ret = stmmac_hwtstamp_set(dev, rq);
4180 break;
4181 case SIOCGHWTSTAMP:
4182 ret = stmmac_hwtstamp_get(dev, rq);
4183 break;
4184 default:
4185 break;
4186 }
4187
4188 return ret;
4189}
4190
4191static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4192 void *cb_priv)
4193{
4194 struct stmmac_priv *priv = cb_priv;
4195 int ret = -EOPNOTSUPP;
4196
4197 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4198 return ret;
4199
4200 stmmac_disable_all_queues(priv);
4201
4202 switch (type) {
4203 case TC_SETUP_CLSU32:
4204 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4205 break;
4206 case TC_SETUP_CLSFLOWER:
4207 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4208 break;
4209 default:
4210 break;
4211 }
4212
4213 stmmac_enable_all_queues(priv);
4214 return ret;
4215}
4216
4217static LIST_HEAD(stmmac_block_cb_list);
4218
4219static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4220 void *type_data)
4221{
4222 struct stmmac_priv *priv = netdev_priv(ndev);
4223
4224 switch (type) {
4225 case TC_SETUP_BLOCK:
4226 return flow_block_cb_setup_simple(type_data,
4227 &stmmac_block_cb_list,
4228 stmmac_setup_tc_block_cb,
4229 priv, priv, true);
4230 case TC_SETUP_QDISC_CBS:
4231 return stmmac_tc_setup_cbs(priv, priv, type_data);
4232 case TC_SETUP_QDISC_TAPRIO:
4233 return stmmac_tc_setup_taprio(priv, priv, type_data);
4234 case TC_SETUP_QDISC_ETF:
4235 return stmmac_tc_setup_etf(priv, priv, type_data);
4236 default:
4237 return -EOPNOTSUPP;
4238 }
4239}
4240
4241static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4242 struct net_device *sb_dev)
4243{
4244 int gso = skb_shinfo(skb)->gso_type;
4245
4246 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4247 /*
4248 * There is no way to determine the number of TSO/USO
4249 * capable Queues. Let's use always the Queue 0
4250 * because if TSO/USO is supported then at least this
4251 * one will be capable.
4252 */
4253 return 0;
4254 }
4255
4256 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4257}
4258
4259static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4260{
4261 struct stmmac_priv *priv = netdev_priv(ndev);
4262 int ret = 0;
4263
4264 ret = eth_mac_addr(ndev, addr);
4265 if (ret)
4266 return ret;
4267
4268 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4269
4270 return ret;
4271}
4272
4273#ifdef CONFIG_DEBUG_FS
4274static struct dentry *stmmac_fs_dir;
4275
4276static void sysfs_display_ring(void *head, int size, int extend_desc,
4277 struct seq_file *seq)
4278{
4279 int i;
4280 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4281 struct dma_desc *p = (struct dma_desc *)head;
4282
4283 for (i = 0; i < size; i++) {
4284 if (extend_desc) {
4285 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4286 i, (unsigned int)virt_to_phys(ep),
4287 le32_to_cpu(ep->basic.des0),
4288 le32_to_cpu(ep->basic.des1),
4289 le32_to_cpu(ep->basic.des2),
4290 le32_to_cpu(ep->basic.des3));
4291 ep++;
4292 } else {
4293 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4294 i, (unsigned int)virt_to_phys(p),
4295 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4296 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4297 p++;
4298 }
4299 seq_printf(seq, "\n");
4300 }
4301}
4302
4303static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4304{
4305 struct net_device *dev = seq->private;
4306 struct stmmac_priv *priv = netdev_priv(dev);
4307 u32 rx_count = priv->plat->rx_queues_to_use;
4308 u32 tx_count = priv->plat->tx_queues_to_use;
4309 u32 queue;
4310
4311 if ((dev->flags & IFF_UP) == 0)
4312 return 0;
4313
4314 for (queue = 0; queue < rx_count; queue++) {
4315 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4316
4317 seq_printf(seq, "RX Queue %d:\n", queue);
4318
4319 if (priv->extend_desc) {
4320 seq_printf(seq, "Extended descriptor ring:\n");
4321 sysfs_display_ring((void *)rx_q->dma_erx,
4322 DMA_RX_SIZE, 1, seq);
4323 } else {
4324 seq_printf(seq, "Descriptor ring:\n");
4325 sysfs_display_ring((void *)rx_q->dma_rx,
4326 DMA_RX_SIZE, 0, seq);
4327 }
4328 }
4329
4330 for (queue = 0; queue < tx_count; queue++) {
4331 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4332
4333 seq_printf(seq, "TX Queue %d:\n", queue);
4334
4335 if (priv->extend_desc) {
4336 seq_printf(seq, "Extended descriptor ring:\n");
4337 sysfs_display_ring((void *)tx_q->dma_etx,
4338 DMA_TX_SIZE, 1, seq);
4339 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4340 seq_printf(seq, "Descriptor ring:\n");
4341 sysfs_display_ring((void *)tx_q->dma_tx,
4342 DMA_TX_SIZE, 0, seq);
4343 }
4344 }
4345
4346 return 0;
4347}
4348DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4349
4350static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4351{
4352 struct net_device *dev = seq->private;
4353 struct stmmac_priv *priv = netdev_priv(dev);
4354
4355 if (!priv->hw_cap_support) {
4356 seq_printf(seq, "DMA HW features not supported\n");
4357 return 0;
4358 }
4359
4360 seq_printf(seq, "==============================\n");
4361 seq_printf(seq, "\tDMA HW features\n");
4362 seq_printf(seq, "==============================\n");
4363
4364 seq_printf(seq, "\t10/100 Mbps: %s\n",
4365 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4366 seq_printf(seq, "\t1000 Mbps: %s\n",
4367 (priv->dma_cap.mbps_1000) ? "Y" : "N");
4368 seq_printf(seq, "\tHalf duplex: %s\n",
4369 (priv->dma_cap.half_duplex) ? "Y" : "N");
4370 seq_printf(seq, "\tHash Filter: %s\n",
4371 (priv->dma_cap.hash_filter) ? "Y" : "N");
4372 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4373 (priv->dma_cap.multi_addr) ? "Y" : "N");
4374 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4375 (priv->dma_cap.pcs) ? "Y" : "N");
4376 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4377 (priv->dma_cap.sma_mdio) ? "Y" : "N");
4378 seq_printf(seq, "\tPMT Remote wake up: %s\n",
4379 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4380 seq_printf(seq, "\tPMT Magic Frame: %s\n",
4381 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4382 seq_printf(seq, "\tRMON module: %s\n",
4383 (priv->dma_cap.rmon) ? "Y" : "N");
4384 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4385 (priv->dma_cap.time_stamp) ? "Y" : "N");
4386 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4387 (priv->dma_cap.atime_stamp) ? "Y" : "N");
4388 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4389 (priv->dma_cap.eee) ? "Y" : "N");
4390 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4391 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4392 (priv->dma_cap.tx_coe) ? "Y" : "N");
4393 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4394 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4395 (priv->dma_cap.rx_coe) ? "Y" : "N");
4396 } else {
4397 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4398 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4399 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4400 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4401 }
4402 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4403 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4404 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4405 priv->dma_cap.number_rx_channel);
4406 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4407 priv->dma_cap.number_tx_channel);
4408 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4409 priv->dma_cap.number_rx_queues);
4410 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4411 priv->dma_cap.number_tx_queues);
4412 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4413 (priv->dma_cap.enh_desc) ? "Y" : "N");
4414 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4415 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4416 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4417 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4418 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4419 priv->dma_cap.pps_out_num);
4420 seq_printf(seq, "\tSafety Features: %s\n",
4421 priv->dma_cap.asp ? "Y" : "N");
4422 seq_printf(seq, "\tFlexible RX Parser: %s\n",
4423 priv->dma_cap.frpsel ? "Y" : "N");
4424 seq_printf(seq, "\tEnhanced Addressing: %d\n",
4425 priv->dma_cap.addr64);
4426 seq_printf(seq, "\tReceive Side Scaling: %s\n",
4427 priv->dma_cap.rssen ? "Y" : "N");
4428 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4429 priv->dma_cap.vlhash ? "Y" : "N");
4430 seq_printf(seq, "\tSplit Header: %s\n",
4431 priv->dma_cap.sphen ? "Y" : "N");
4432 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4433 priv->dma_cap.vlins ? "Y" : "N");
4434 seq_printf(seq, "\tDouble VLAN: %s\n",
4435 priv->dma_cap.dvlan ? "Y" : "N");
4436 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4437 priv->dma_cap.l3l4fnum);
4438 seq_printf(seq, "\tARP Offloading: %s\n",
4439 priv->dma_cap.arpoffsel ? "Y" : "N");
4440 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4441 priv->dma_cap.estsel ? "Y" : "N");
4442 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4443 priv->dma_cap.fpesel ? "Y" : "N");
4444 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4445 priv->dma_cap.tbssel ? "Y" : "N");
4446 return 0;
4447}
4448DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4449
4450/* Use network device events to rename debugfs file entries.
4451 */
4452static int stmmac_device_event(struct notifier_block *unused,
4453 unsigned long event, void *ptr)
4454{
4455 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4456 struct stmmac_priv *priv = netdev_priv(dev);
4457
4458 if (dev->netdev_ops != &stmmac_netdev_ops)
4459 goto done;
4460
4461 switch (event) {
4462 case NETDEV_CHANGENAME:
4463 if (priv->dbgfs_dir)
4464 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4465 priv->dbgfs_dir,
4466 stmmac_fs_dir,
4467 dev->name);
4468 break;
4469 }
4470done:
4471 return NOTIFY_DONE;
4472}
4473
4474static struct notifier_block stmmac_notifier = {
4475 .notifier_call = stmmac_device_event,
4476};
4477
4478static void stmmac_init_fs(struct net_device *dev)
4479{
4480 struct stmmac_priv *priv = netdev_priv(dev);
4481
4482 rtnl_lock();
4483
4484 /* Create per netdev entries */
4485 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4486
4487 /* Entry to report DMA RX/TX rings */
4488 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4489 &stmmac_rings_status_fops);
4490
4491 /* Entry to report the DMA HW features */
4492 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4493 &stmmac_dma_cap_fops);
4494
4495 rtnl_unlock();
4496}
4497
4498static void stmmac_exit_fs(struct net_device *dev)
4499{
4500 struct stmmac_priv *priv = netdev_priv(dev);
4501
4502 debugfs_remove_recursive(priv->dbgfs_dir);
4503}
4504#endif /* CONFIG_DEBUG_FS */
4505
4506static u32 stmmac_vid_crc32_le(__le16 vid_le)
4507{
4508 unsigned char *data = (unsigned char *)&vid_le;
4509 unsigned char data_byte = 0;
4510 u32 crc = ~0x0;
4511 u32 temp = 0;
4512 int i, bits;
4513
4514 bits = get_bitmask_order(VLAN_VID_MASK);
4515 for (i = 0; i < bits; i++) {
4516 if ((i % 8) == 0)
4517 data_byte = data[i / 8];
4518
4519 temp = ((crc & 1) ^ data_byte) & 1;
4520 crc >>= 1;
4521 data_byte >>= 1;
4522
4523 if (temp)
4524 crc ^= 0xedb88320;
4525 }
4526
4527 return crc;
4528}
4529
4530static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4531{
4532 u32 crc, hash = 0;
4533 __le16 pmatch = 0;
4534 int count = 0;
4535 u16 vid = 0;
4536
4537 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4538 __le16 vid_le = cpu_to_le16(vid);
4539 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4540 hash |= (1 << crc);
4541 count++;
4542 }
4543
4544 if (!priv->dma_cap.vlhash) {
4545 if (count > 2) /* VID = 0 always passes filter */
4546 return -EOPNOTSUPP;
4547
4548 pmatch = cpu_to_le16(vid);
4549 hash = 0;
4550 }
4551
4552 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4553}
4554
4555static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4556{
4557 struct stmmac_priv *priv = netdev_priv(ndev);
4558 bool is_double = false;
4559 int ret;
4560
4561 if (be16_to_cpu(proto) == ETH_P_8021AD)
4562 is_double = true;
4563
4564 set_bit(vid, priv->active_vlans);
4565 ret = stmmac_vlan_update(priv, is_double);
4566 if (ret) {
4567 clear_bit(vid, priv->active_vlans);
4568 return ret;
4569 }
4570
4571 if (priv->hw->num_vlan) {
4572 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4573 if (ret)
4574 return ret;
4575 }
4576
4577 return 0;
4578}
4579
4580static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4581{
4582 struct stmmac_priv *priv = netdev_priv(ndev);
4583 bool is_double = false;
4584 int ret;
4585
4586 if (be16_to_cpu(proto) == ETH_P_8021AD)
4587 is_double = true;
4588
4589 clear_bit(vid, priv->active_vlans);
4590
4591 if (priv->hw->num_vlan) {
4592 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4593 if (ret)
4594 return ret;
4595 }
4596
4597 return stmmac_vlan_update(priv, is_double);
4598}
4599
4600static const struct net_device_ops stmmac_netdev_ops = {
4601 .ndo_open = stmmac_open,
4602 .ndo_start_xmit = stmmac_xmit,
4603 .ndo_stop = stmmac_release,
4604 .ndo_change_mtu = stmmac_change_mtu,
4605 .ndo_fix_features = stmmac_fix_features,
4606 .ndo_set_features = stmmac_set_features,
4607 .ndo_set_rx_mode = stmmac_set_rx_mode,
4608 .ndo_tx_timeout = stmmac_tx_timeout,
4609 .ndo_do_ioctl = stmmac_ioctl,
4610 .ndo_setup_tc = stmmac_setup_tc,
4611 .ndo_select_queue = stmmac_select_queue,
4612#ifdef CONFIG_NET_POLL_CONTROLLER
4613 .ndo_poll_controller = stmmac_poll_controller,
4614#endif
4615 .ndo_set_mac_address = stmmac_set_mac_address,
4616 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4617 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4618};
4619
4620static void stmmac_reset_subtask(struct stmmac_priv *priv)
4621{
4622 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4623 return;
4624 if (test_bit(STMMAC_DOWN, &priv->state))
4625 return;
4626
4627 netdev_err(priv->dev, "Reset adapter.\n");
4628
4629 rtnl_lock();
4630 netif_trans_update(priv->dev);
4631 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4632 usleep_range(1000, 2000);
4633
4634 set_bit(STMMAC_DOWN, &priv->state);
4635 dev_close(priv->dev);
4636 dev_open(priv->dev, NULL);
4637 clear_bit(STMMAC_DOWN, &priv->state);
4638 clear_bit(STMMAC_RESETING, &priv->state);
4639 rtnl_unlock();
4640}
4641
4642static void stmmac_service_task(struct work_struct *work)
4643{
4644 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4645 service_task);
4646
4647 stmmac_reset_subtask(priv);
4648 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4649}
4650
4651/**
4652 * stmmac_hw_init - Init the MAC device
4653 * @priv: driver private structure
4654 * Description: this function is to configure the MAC device according to
4655 * some platform parameters or the HW capability register. It prepares the
4656 * driver to use either ring or chain modes and to setup either enhanced or
4657 * normal descriptors.
4658 */
4659static int stmmac_hw_init(struct stmmac_priv *priv)
4660{
4661 int ret;
4662
4663 /* dwmac-sun8i only work in chain mode */
4664 if (priv->plat->has_sun8i)
4665 chain_mode = 1;
4666 priv->chain_mode = chain_mode;
4667
4668 /* Initialize HW Interface */
4669 ret = stmmac_hwif_init(priv);
4670 if (ret)
4671 return ret;
4672
4673 /* Get the HW capability (new GMAC newer than 3.50a) */
4674 priv->hw_cap_support = stmmac_get_hw_features(priv);
4675 if (priv->hw_cap_support) {
4676 dev_info(priv->device, "DMA HW capability register supported\n");
4677
4678 /* We can override some gmac/dma configuration fields: e.g.
4679 * enh_desc, tx_coe (e.g. that are passed through the
4680 * platform) with the values from the HW capability
4681 * register (if supported).
4682 */
4683 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4684 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4685 priv->hw->pmt = priv->plat->pmt;
4686 if (priv->dma_cap.hash_tb_sz) {
4687 priv->hw->multicast_filter_bins =
4688 (BIT(priv->dma_cap.hash_tb_sz) << 5);
4689 priv->hw->mcast_bits_log2 =
4690 ilog2(priv->hw->multicast_filter_bins);
4691 }
4692
4693 /* TXCOE doesn't work in thresh DMA mode */
4694 if (priv->plat->force_thresh_dma_mode)
4695 priv->plat->tx_coe = 0;
4696 else
4697 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4698
4699 /* In case of GMAC4 rx_coe is from HW cap register. */
4700 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4701
4702 if (priv->dma_cap.rx_coe_type2)
4703 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4704 else if (priv->dma_cap.rx_coe_type1)
4705 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4706
4707 } else {
4708 dev_info(priv->device, "No HW DMA feature register supported\n");
4709 }
4710
4711 if (priv->plat->rx_coe) {
4712 priv->hw->rx_csum = priv->plat->rx_coe;
4713 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4714 if (priv->synopsys_id < DWMAC_CORE_4_00)
4715 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4716 }
4717 if (priv->plat->tx_coe)
4718 dev_info(priv->device, "TX Checksum insertion supported\n");
4719
4720 if (priv->plat->pmt) {
4721 dev_info(priv->device, "Wake-Up On Lan supported\n");
4722 device_set_wakeup_capable(priv->device, 1);
4723 }
4724
4725 if (priv->dma_cap.tsoen)
4726 dev_info(priv->device, "TSO supported\n");
4727
4728 /* Run HW quirks, if any */
4729 if (priv->hwif_quirks) {
4730 ret = priv->hwif_quirks(priv);
4731 if (ret)
4732 return ret;
4733 }
4734
4735 /* Rx Watchdog is available in the COREs newer than the 3.40.
4736 * In some case, for example on bugged HW this feature
4737 * has to be disable and this can be done by passing the
4738 * riwt_off field from the platform.
4739 */
4740 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4741 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4742 priv->use_riwt = 1;
4743 dev_info(priv->device,
4744 "Enable RX Mitigation via HW Watchdog Timer\n");
4745 }
4746
4747 return 0;
4748}
4749
4750/**
4751 * stmmac_dvr_probe
4752 * @device: device pointer
4753 * @plat_dat: platform data pointer
4754 * @res: stmmac resource pointer
4755 * Description: this is the main probe function used to
4756 * call the alloc_etherdev, allocate the priv structure.
4757 * Return:
4758 * returns 0 on success, otherwise errno.
4759 */
4760int stmmac_dvr_probe(struct device *device,
4761 struct plat_stmmacenet_data *plat_dat,
4762 struct stmmac_resources *res)
4763{
4764 struct net_device *ndev = NULL;
4765 struct stmmac_priv *priv;
4766 u32 queue, rxq, maxq;
4767 int i, ret = 0;
4768
4769 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4770 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4771 if (!ndev)
4772 return -ENOMEM;
4773
4774 SET_NETDEV_DEV(ndev, device);
4775
4776 priv = netdev_priv(ndev);
4777 priv->device = device;
4778 priv->dev = ndev;
4779
4780 stmmac_set_ethtool_ops(ndev);
4781 priv->pause = pause;
4782 priv->plat = plat_dat;
4783 priv->ioaddr = res->addr;
4784 priv->dev->base_addr = (unsigned long)res->addr;
4785
4786 priv->dev->irq = res->irq;
4787 priv->wol_irq = res->wol_irq;
4788 priv->lpi_irq = res->lpi_irq;
4789
4790 if (!IS_ERR_OR_NULL(res->mac))
4791 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4792
4793 dev_set_drvdata(device, priv->dev);
4794
4795 /* Verify driver arguments */
4796 stmmac_verify_args();
4797
4798 /* Allocate workqueue */
4799 priv->wq = create_singlethread_workqueue("stmmac_wq");
4800 if (!priv->wq) {
4801 dev_err(priv->device, "failed to create workqueue\n");
4802 return -ENOMEM;
4803 }
4804
4805 INIT_WORK(&priv->service_task, stmmac_service_task);
4806
4807 /* Override with kernel parameters if supplied XXX CRS XXX
4808 * this needs to have multiple instances
4809 */
4810 if ((phyaddr >= 0) && (phyaddr <= 31))
4811 priv->plat->phy_addr = phyaddr;
4812
4813 if (priv->plat->stmmac_rst) {
4814 ret = reset_control_assert(priv->plat->stmmac_rst);
4815 reset_control_deassert(priv->plat->stmmac_rst);
4816 /* Some reset controllers have only reset callback instead of
4817 * assert + deassert callbacks pair.
4818 */
4819 if (ret == -ENOTSUPP)
4820 reset_control_reset(priv->plat->stmmac_rst);
4821 }
4822
4823 /* Init MAC and get the capabilities */
4824 ret = stmmac_hw_init(priv);
4825 if (ret)
4826 goto error_hw_init;
4827
4828 stmmac_check_ether_addr(priv);
4829
4830 /* Configure real RX and TX queues */
4831 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4832 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4833
4834 ndev->netdev_ops = &stmmac_netdev_ops;
4835
4836 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4837 NETIF_F_RXCSUM;
4838
4839 ret = stmmac_tc_init(priv, priv);
4840 if (!ret) {
4841 ndev->hw_features |= NETIF_F_HW_TC;
4842 }
4843
4844 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4845 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4846 if (priv->plat->has_gmac4)
4847 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4848 priv->tso = true;
4849 dev_info(priv->device, "TSO feature enabled\n");
4850 }
4851
4852 if (priv->dma_cap.sphen) {
4853 ndev->hw_features |= NETIF_F_GRO;
4854 priv->sph = true;
4855 dev_info(priv->device, "SPH feature enabled\n");
4856 }
4857
4858 if (priv->dma_cap.addr64) {
4859 ret = dma_set_mask_and_coherent(device,
4860 DMA_BIT_MASK(priv->dma_cap.addr64));
4861 if (!ret) {
4862 dev_info(priv->device, "Using %d bits DMA width\n",
4863 priv->dma_cap.addr64);
4864
4865 /*
4866 * If more than 32 bits can be addressed, make sure to
4867 * enable enhanced addressing mode.
4868 */
4869 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4870 priv->plat->dma_cfg->eame = true;
4871 } else {
4872 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4873 if (ret) {
4874 dev_err(priv->device, "Failed to set DMA Mask\n");
4875 goto error_hw_init;
4876 }
4877
4878 priv->dma_cap.addr64 = 32;
4879 }
4880 }
4881
4882 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4883 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4884#ifdef STMMAC_VLAN_TAG_USED
4885 /* Both mac100 and gmac support receive VLAN tag detection */
4886 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4887 if (priv->dma_cap.vlhash) {
4888 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4889 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4890 }
4891 if (priv->dma_cap.vlins) {
4892 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4893 if (priv->dma_cap.dvlan)
4894 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4895 }
4896#endif
4897 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4898
4899 /* Initialize RSS */
4900 rxq = priv->plat->rx_queues_to_use;
4901 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4902 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4903 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4904
4905 if (priv->dma_cap.rssen && priv->plat->rss_en)
4906 ndev->features |= NETIF_F_RXHASH;
4907
4908 /* MTU range: 46 - hw-specific max */
4909 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4910 if (priv->plat->has_xgmac)
4911 ndev->max_mtu = XGMAC_JUMBO_LEN;
4912 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4913 ndev->max_mtu = JUMBO_LEN;
4914 else
4915 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4916 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4917 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4918 */
4919 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4920 (priv->plat->maxmtu >= ndev->min_mtu))
4921 ndev->max_mtu = priv->plat->maxmtu;
4922 else if (priv->plat->maxmtu < ndev->min_mtu)
4923 dev_warn(priv->device,
4924 "%s: warning: maxmtu having invalid value (%d)\n",
4925 __func__, priv->plat->maxmtu);
4926
4927 if (flow_ctrl)
4928 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4929
4930 /* Setup channels NAPI */
4931 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4932
4933 for (queue = 0; queue < maxq; queue++) {
4934 struct stmmac_channel *ch = &priv->channel[queue];
4935
4936 spin_lock_init(&ch->lock);
4937 ch->priv_data = priv;
4938 ch->index = queue;
4939
4940 if (queue < priv->plat->rx_queues_to_use) {
4941 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4942 NAPI_POLL_WEIGHT);
4943 }
4944 if (queue < priv->plat->tx_queues_to_use) {
4945 netif_tx_napi_add(ndev, &ch->tx_napi,
4946 stmmac_napi_poll_tx,
4947 NAPI_POLL_WEIGHT);
4948 }
4949 }
4950
4951 mutex_init(&priv->lock);
4952
4953 /* If a specific clk_csr value is passed from the platform
4954 * this means that the CSR Clock Range selection cannot be
4955 * changed at run-time and it is fixed. Viceversa the driver'll try to
4956 * set the MDC clock dynamically according to the csr actual
4957 * clock input.
4958 */
4959 if (priv->plat->clk_csr >= 0)
4960 priv->clk_csr = priv->plat->clk_csr;
4961 else
4962 stmmac_clk_csr_set(priv);
4963
4964 stmmac_check_pcs_mode(priv);
4965
4966 if (priv->hw->pcs != STMMAC_PCS_TBI &&
4967 priv->hw->pcs != STMMAC_PCS_RTBI) {
4968 /* MDIO bus Registration */
4969 ret = stmmac_mdio_register(ndev);
4970 if (ret < 0) {
4971 dev_err(priv->device,
4972 "%s: MDIO bus (id: %d) registration failed",
4973 __func__, priv->plat->bus_id);
4974 goto error_mdio_register;
4975 }
4976 }
4977
4978 ret = stmmac_phy_setup(priv);
4979 if (ret) {
4980 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
4981 goto error_phy_setup;
4982 }
4983
4984 ret = register_netdev(ndev);
4985 if (ret) {
4986 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4987 __func__, ret);
4988 goto error_netdev_register;
4989 }
4990
4991 if (priv->plat->serdes_powerup) {
4992 ret = priv->plat->serdes_powerup(ndev,
4993 priv->plat->bsp_priv);
4994
4995 if (ret < 0)
4996 goto error_serdes_powerup;
4997 }
4998
4999#ifdef CONFIG_DEBUG_FS
5000 stmmac_init_fs(ndev);
5001#endif
5002
5003 return ret;
5004
5005error_serdes_powerup:
5006 unregister_netdev(ndev);
5007error_netdev_register:
5008 phylink_destroy(priv->phylink);
5009error_phy_setup:
5010 if (priv->hw->pcs != STMMAC_PCS_TBI &&
5011 priv->hw->pcs != STMMAC_PCS_RTBI)
5012 stmmac_mdio_unregister(ndev);
5013error_mdio_register:
5014 for (queue = 0; queue < maxq; queue++) {
5015 struct stmmac_channel *ch = &priv->channel[queue];
5016
5017 if (queue < priv->plat->rx_queues_to_use)
5018 netif_napi_del(&ch->rx_napi);
5019 if (queue < priv->plat->tx_queues_to_use)
5020 netif_napi_del(&ch->tx_napi);
5021 }
5022error_hw_init:
5023 destroy_workqueue(priv->wq);
5024
5025 return ret;
5026}
5027EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5028
5029/**
5030 * stmmac_dvr_remove
5031 * @dev: device pointer
5032 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5033 * changes the link status, releases the DMA descriptor rings.
5034 */
5035int stmmac_dvr_remove(struct device *dev)
5036{
5037 struct net_device *ndev = dev_get_drvdata(dev);
5038 struct stmmac_priv *priv = netdev_priv(ndev);
5039
5040 netdev_info(priv->dev, "%s: removing driver", __func__);
5041
5042 stmmac_stop_all_dma(priv);
5043
5044 if (priv->plat->serdes_powerdown)
5045 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5046
5047 stmmac_mac_set(priv, priv->ioaddr, false);
5048 netif_carrier_off(ndev);
5049 unregister_netdev(ndev);
5050#ifdef CONFIG_DEBUG_FS
5051 stmmac_exit_fs(ndev);
5052#endif
5053 phylink_destroy(priv->phylink);
5054 if (priv->plat->stmmac_rst)
5055 reset_control_assert(priv->plat->stmmac_rst);
5056 clk_disable_unprepare(priv->plat->pclk);
5057 clk_disable_unprepare(priv->plat->stmmac_clk);
5058 if (priv->hw->pcs != STMMAC_PCS_TBI &&
5059 priv->hw->pcs != STMMAC_PCS_RTBI)
5060 stmmac_mdio_unregister(ndev);
5061 destroy_workqueue(priv->wq);
5062 mutex_destroy(&priv->lock);
5063
5064 return 0;
5065}
5066EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5067
5068/**
5069 * stmmac_suspend - suspend callback
5070 * @dev: device pointer
5071 * Description: this is the function to suspend the device and it is called
5072 * by the platform driver to stop the network queue, release the resources,
5073 * program the PMT register (for WoL), clean and release driver resources.
5074 */
5075int stmmac_suspend(struct device *dev)
5076{
5077 struct net_device *ndev = dev_get_drvdata(dev);
5078 struct stmmac_priv *priv = netdev_priv(ndev);
5079 u32 chan;
5080
5081 if (!ndev || !netif_running(ndev))
5082 return 0;
5083
5084 phylink_mac_change(priv->phylink, false);
5085
5086 mutex_lock(&priv->lock);
5087
5088 netif_device_detach(ndev);
5089 stmmac_stop_all_queues(priv);
5090
5091 stmmac_disable_all_queues(priv);
5092
5093 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5094 del_timer_sync(&priv->tx_queue[chan].txtimer);
5095
5096 /* Stop TX/RX DMA */
5097 stmmac_stop_all_dma(priv);
5098
5099 if (priv->plat->serdes_powerdown)
5100 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5101
5102 /* Enable Power down mode by programming the PMT regs */
5103 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5104 stmmac_pmt(priv, priv->hw, priv->wolopts);
5105 priv->irq_wake = 1;
5106 } else {
5107 mutex_unlock(&priv->lock);
5108 rtnl_lock();
5109 if (device_may_wakeup(priv->device))
5110 phylink_speed_down(priv->phylink, false);
5111 phylink_stop(priv->phylink);
5112 rtnl_unlock();
5113 mutex_lock(&priv->lock);
5114
5115 stmmac_mac_set(priv, priv->ioaddr, false);
5116 pinctrl_pm_select_sleep_state(priv->device);
5117 /* Disable clock in case of PWM is off */
5118 if (priv->plat->clk_ptp_ref)
5119 clk_disable_unprepare(priv->plat->clk_ptp_ref);
5120 clk_disable_unprepare(priv->plat->pclk);
5121 clk_disable_unprepare(priv->plat->stmmac_clk);
5122 }
5123 mutex_unlock(&priv->lock);
5124
5125 priv->speed = SPEED_UNKNOWN;
5126 return 0;
5127}
5128EXPORT_SYMBOL_GPL(stmmac_suspend);
5129
5130/**
5131 * stmmac_reset_queues_param - reset queue parameters
5132 * @dev: device pointer
5133 */
5134static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5135{
5136 u32 rx_cnt = priv->plat->rx_queues_to_use;
5137 u32 tx_cnt = priv->plat->tx_queues_to_use;
5138 u32 queue;
5139
5140 for (queue = 0; queue < rx_cnt; queue++) {
5141 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5142
5143 rx_q->cur_rx = 0;
5144 rx_q->dirty_rx = 0;
5145 }
5146
5147 for (queue = 0; queue < tx_cnt; queue++) {
5148 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5149
5150 tx_q->cur_tx = 0;
5151 tx_q->dirty_tx = 0;
5152 tx_q->mss = 0;
5153 }
5154}
5155
5156/**
5157 * stmmac_resume - resume callback
5158 * @dev: device pointer
5159 * Description: when resume this function is invoked to setup the DMA and CORE
5160 * in a usable state.
5161 */
5162int stmmac_resume(struct device *dev)
5163{
5164 struct net_device *ndev = dev_get_drvdata(dev);
5165 struct stmmac_priv *priv = netdev_priv(ndev);
5166 int ret;
5167
5168 if (!netif_running(ndev))
5169 return 0;
5170
5171 /* Power Down bit, into the PM register, is cleared
5172 * automatically as soon as a magic packet or a Wake-up frame
5173 * is received. Anyway, it's better to manually clear
5174 * this bit because it can generate problems while resuming
5175 * from another devices (e.g. serial console).
5176 */
5177 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5178 mutex_lock(&priv->lock);
5179 stmmac_pmt(priv, priv->hw, 0);
5180 mutex_unlock(&priv->lock);
5181 priv->irq_wake = 0;
5182 } else {
5183 pinctrl_pm_select_default_state(priv->device);
5184 /* enable the clk previously disabled */
5185 clk_prepare_enable(priv->plat->stmmac_clk);
5186 clk_prepare_enable(priv->plat->pclk);
5187 if (priv->plat->clk_ptp_ref)
5188 clk_prepare_enable(priv->plat->clk_ptp_ref);
5189 /* reset the phy so that it's ready */
5190 if (priv->mii)
5191 stmmac_mdio_reset(priv->mii);
5192 }
5193
5194 if (priv->plat->serdes_powerup) {
5195 ret = priv->plat->serdes_powerup(ndev,
5196 priv->plat->bsp_priv);
5197
5198 if (ret < 0)
5199 return ret;
5200 }
5201
5202 mutex_lock(&priv->lock);
5203
5204 stmmac_reset_queues_param(priv);
5205
5206 stmmac_clear_descriptors(priv);
5207
5208 stmmac_hw_setup(ndev, false);
5209 stmmac_init_coalesce(priv);
5210 stmmac_set_rx_mode(ndev);
5211
5212 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5213
5214 stmmac_enable_all_queues(priv);
5215
5216 stmmac_start_all_queues(priv);
5217
5218 mutex_unlock(&priv->lock);
5219
5220 if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5221 rtnl_lock();
5222 phylink_start(priv->phylink);
5223 /* We may have called phylink_speed_down before */
5224 phylink_speed_up(priv->phylink);
5225 rtnl_unlock();
5226 }
5227
5228 phylink_mac_change(priv->phylink, true);
5229
5230 netif_device_attach(ndev);
5231
5232 return 0;
5233}
5234EXPORT_SYMBOL_GPL(stmmac_resume);
5235
5236#ifndef MODULE
5237static int __init stmmac_cmdline_opt(char *str)
5238{
5239 char *opt;
5240
5241 if (!str || !*str)
5242 return -EINVAL;
5243 while ((opt = strsep(&str, ",")) != NULL) {
5244 if (!strncmp(opt, "debug:", 6)) {
5245 if (kstrtoint(opt + 6, 0, &debug))
5246 goto err;
5247 } else if (!strncmp(opt, "phyaddr:", 8)) {
5248 if (kstrtoint(opt + 8, 0, &phyaddr))
5249 goto err;
5250 } else if (!strncmp(opt, "buf_sz:", 7)) {
5251 if (kstrtoint(opt + 7, 0, &buf_sz))
5252 goto err;
5253 } else if (!strncmp(opt, "tc:", 3)) {
5254 if (kstrtoint(opt + 3, 0, &tc))
5255 goto err;
5256 } else if (!strncmp(opt, "watchdog:", 9)) {
5257 if (kstrtoint(opt + 9, 0, &watchdog))
5258 goto err;
5259 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
5260 if (kstrtoint(opt + 10, 0, &flow_ctrl))
5261 goto err;
5262 } else if (!strncmp(opt, "pause:", 6)) {
5263 if (kstrtoint(opt + 6, 0, &pause))
5264 goto err;
5265 } else if (!strncmp(opt, "eee_timer:", 10)) {
5266 if (kstrtoint(opt + 10, 0, &eee_timer))
5267 goto err;
5268 } else if (!strncmp(opt, "chain_mode:", 11)) {
5269 if (kstrtoint(opt + 11, 0, &chain_mode))
5270 goto err;
5271 }
5272 }
5273 return 0;
5274
5275err:
5276 pr_err("%s: ERROR broken module parameter conversion", __func__);
5277 return -EINVAL;
5278}
5279
5280__setup("stmmaceth=", stmmac_cmdline_opt);
5281#endif /* MODULE */
5282
5283static int __init stmmac_init(void)
5284{
5285#ifdef CONFIG_DEBUG_FS
5286 /* Create debugfs main directory if it doesn't exist yet */
5287 if (!stmmac_fs_dir)
5288 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5289 register_netdevice_notifier(&stmmac_notifier);
5290#endif
5291
5292 return 0;
5293}
5294
5295static void __exit stmmac_exit(void)
5296{
5297#ifdef CONFIG_DEBUG_FS
5298 unregister_netdevice_notifier(&stmmac_notifier);
5299 debugfs_remove_recursive(stmmac_fs_dir);
5300#endif
5301}
5302
5303module_init(stmmac_init)
5304module_exit(stmmac_exit)
5305
5306MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5307MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5308MODULE_LICENSE("GPL");