Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <linux/bpf.h>
  4#include <linux/filter.h>
  5#include <net/page_pool/helpers.h>
  6
  7#include "lan966x_main.h"
  8
  9static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
 10				      u64 *dataptr)
 11{
 12	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
 13	struct lan966x_rx *rx = &lan966x->rx;
 14	struct page *page;
 15
 16	page = page_pool_dev_alloc_pages(rx->page_pool);
 17	if (unlikely(!page))
 18		return -ENOMEM;
 19
 20	rx->page[dcb][db] = page;
 21	*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
 22
 23	return 0;
 24}
 25
 26static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
 27				      u64 *dataptr)
 28{
 29	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
 30
 31	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
 32
 33	return 0;
 34}
 35
 36static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
 37					  u64 *dataptr)
 38{
 39	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
 40
 41	*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
 
 
 42
 43	return 0;
 44}
 45
 46static int lan966x_fdma_channel_active(struct lan966x *lan966x)
 47{
 48	return lan_rd(lan966x, FDMA_CH_ACTIVE);
 49}
 50
 51static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
 52{
 53	struct fdma *fdma = &rx->fdma;
 54	int i, j;
 55
 56	for (i = 0; i < fdma->n_dcbs; ++i) {
 57		for (j = 0; j < fdma->n_dbs; ++j)
 58			page_pool_put_full_page(rx->page_pool,
 59						rx->page[i][j], false);
 60	}
 61}
 62
 63static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
 64{
 65	struct fdma *fdma = &rx->fdma;
 66	struct page *page;
 67
 68	page = rx->page[fdma->dcb_index][fdma->db_index];
 69	if (unlikely(!page))
 70		return;
 71
 72	page_pool_recycle_direct(rx->page_pool, page);
 73}
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
 76{
 77	struct lan966x *lan966x = rx->lan966x;
 78	struct page_pool_params pp_params = {
 79		.order = rx->page_order,
 80		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
 81		.pool_size = rx->fdma.n_dcbs,
 82		.nid = NUMA_NO_NODE,
 83		.dev = lan966x->dev,
 84		.dma_dir = DMA_FROM_DEVICE,
 85		.offset = XDP_PACKET_HEADROOM,
 86		.max_len = rx->max_mtu -
 87			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
 88	};
 89
 90	if (lan966x_xdp_present(lan966x))
 91		pp_params.dma_dir = DMA_BIDIRECTIONAL;
 92
 93	rx->page_pool = page_pool_create(&pp_params);
 94
 95	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
 96		struct lan966x_port *port;
 97
 98		if (!lan966x->ports[i])
 99			continue;
100
101		port = lan966x->ports[i];
102		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
104					   rx->page_pool);
105	}
106
107	return PTR_ERR_OR_ZERO(rx->page_pool);
108}
109
110static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
111{
112	struct lan966x *lan966x = rx->lan966x;
113	struct fdma *fdma = &rx->fdma;
114	int err;
 
 
 
115
116	if (lan966x_fdma_rx_alloc_page_pool(rx))
117		return PTR_ERR(rx->page_pool);
118
119	err = fdma_alloc_coherent(lan966x->dev, fdma);
120	if (err)
121		return err;
122
123	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124		       FDMA_DCB_STATUS_INTR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	return 0;
127}
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
130{
131	struct lan966x *lan966x = rx->lan966x;
132	struct fdma *fdma = &rx->fdma;
133	u32 mask;
134
135	/* When activating a channel, first is required to write the first DCB
136	 * address and then to activate it
137	 */
138	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
139	       FDMA_DCB_LLP(fdma->channel_id));
140	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
141	       FDMA_DCB_LLP1(fdma->channel_id));
142
143	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
144	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
145	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
146	       FDMA_CH_CFG_CH_MEM_SET(1),
147	       lan966x, FDMA_CH_CFG(fdma->channel_id));
148
149	/* Start fdma */
150	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
151		FDMA_PORT_CTRL_XTR_STOP,
152		lan966x, FDMA_PORT_CTRL(0));
153
154	/* Enable interrupts */
155	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
156	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
157	mask |= BIT(fdma->channel_id);
158	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
159		FDMA_INTR_DB_ENA_INTR_DB_ENA,
160		lan966x, FDMA_INTR_DB_ENA);
161
162	/* Activate the channel */
163	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
164		FDMA_CH_ACTIVATE_CH_ACTIVATE,
165		lan966x, FDMA_CH_ACTIVATE);
166}
167
168static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
169{
170	struct lan966x *lan966x = rx->lan966x;
171	struct fdma *fdma = &rx->fdma;
172	u32 val;
173
174	/* Disable the channel */
175	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
176		FDMA_CH_DISABLE_CH_DISABLE,
177		lan966x, FDMA_CH_DISABLE);
178
179	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
180				  val, !(val & BIT(fdma->channel_id)),
181				  READL_SLEEP_US, READL_TIMEOUT_US);
182
183	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
184		FDMA_CH_DB_DISCARD_DB_DISCARD,
185		lan966x, FDMA_CH_DB_DISCARD);
186}
187
188static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
189{
190	struct lan966x *lan966x = rx->lan966x;
191
192	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
193		FDMA_CH_RELOAD_CH_RELOAD,
194		lan966x, FDMA_CH_RELOAD);
195}
196
 
 
 
 
 
 
 
197static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
198{
199	struct lan966x *lan966x = tx->lan966x;
200	struct fdma *fdma = &tx->fdma;
201	int err;
 
 
202
203	tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
204			       GFP_KERNEL);
205	if (!tx->dcbs_buf)
206		return -ENOMEM;
207
208	err = fdma_alloc_coherent(lan966x->dev, fdma);
209	if (err)
 
 
 
210		goto out;
211
212	fdma_dcbs_init(fdma, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
213
214	return 0;
215
216out:
217	kfree(tx->dcbs_buf);
218	return -ENOMEM;
219}
220
221static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
222{
223	struct lan966x *lan966x = tx->lan966x;
 
224
225	kfree(tx->dcbs_buf);
226	fdma_free_coherent(lan966x->dev, &tx->fdma);
 
 
 
227}
228
229static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
230{
231	struct lan966x *lan966x = tx->lan966x;
232	struct fdma *fdma = &tx->fdma;
233	u32 mask;
234
235	/* When activating a channel, first is required to write the first DCB
236	 * address and then to activate it
237	 */
238	lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
239	       FDMA_DCB_LLP(fdma->channel_id));
240	lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
241	       FDMA_DCB_LLP1(fdma->channel_id));
242
243	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
244	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
245	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
246	       FDMA_CH_CFG_CH_MEM_SET(1),
247	       lan966x, FDMA_CH_CFG(fdma->channel_id));
248
249	/* Start fdma */
250	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
251		FDMA_PORT_CTRL_INJ_STOP,
252		lan966x, FDMA_PORT_CTRL(0));
253
254	/* Enable interrupts */
255	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
256	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
257	mask |= BIT(fdma->channel_id);
258	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
259		FDMA_INTR_DB_ENA_INTR_DB_ENA,
260		lan966x, FDMA_INTR_DB_ENA);
261
262	/* Activate the channel */
263	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
264		FDMA_CH_ACTIVATE_CH_ACTIVATE,
265		lan966x, FDMA_CH_ACTIVATE);
266}
267
268static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
269{
270	struct lan966x *lan966x = tx->lan966x;
271	struct fdma *fdma = &tx->fdma;
272	u32 val;
273
274	/* Disable the channel */
275	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
276		FDMA_CH_DISABLE_CH_DISABLE,
277		lan966x, FDMA_CH_DISABLE);
278
279	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
280				  val, !(val & BIT(fdma->channel_id)),
281				  READL_SLEEP_US, READL_TIMEOUT_US);
282
283	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
284		FDMA_CH_DB_DISCARD_DB_DISCARD,
285		lan966x, FDMA_CH_DB_DISCARD);
286
287	tx->activated = false;
 
288}
289
290static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
291{
292	struct lan966x *lan966x = tx->lan966x;
293
294	/* Write the registers to reload the channel */
295	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
296		FDMA_CH_RELOAD_CH_RELOAD,
297		lan966x, FDMA_CH_RELOAD);
298}
299
300static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
301{
302	struct lan966x_port *port;
303	int i;
304
305	for (i = 0; i < lan966x->num_phys_ports; ++i) {
306		port = lan966x->ports[i];
307		if (!port)
308			continue;
309
310		if (netif_queue_stopped(port->dev))
311			netif_wake_queue(port->dev);
312	}
313}
314
315static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
316{
317	struct lan966x_port *port;
318	int i;
319
320	for (i = 0; i < lan966x->num_phys_ports; ++i) {
321		port = lan966x->ports[i];
322		if (!port)
323			continue;
324
325		netif_stop_queue(port->dev);
326	}
327}
328
329static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
330{
331	struct lan966x_tx *tx = &lan966x->tx;
332	struct lan966x_rx *rx = &lan966x->rx;
333	struct lan966x_tx_dcb_buf *dcb_buf;
334	struct fdma *fdma = &tx->fdma;
335	struct xdp_frame_bulk bq;
 
336	unsigned long flags;
337	bool clear = false;
338	struct fdma_db *db;
339	int i;
340
341	xdp_frame_bulk_init(&bq);
342
343	spin_lock_irqsave(&lan966x->tx_lock, flags);
344	for (i = 0; i < fdma->n_dcbs; ++i) {
345		dcb_buf = &tx->dcbs_buf[i];
346
347		if (!dcb_buf->used)
348			continue;
349
350		db = fdma_db_get(fdma, i, 0);
351		if (!fdma_db_is_done(db))
352			continue;
353
354		dcb_buf->dev->stats.tx_packets++;
355		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
356
357		dcb_buf->used = false;
358		if (dcb_buf->use_skb) {
359			dma_unmap_single(lan966x->dev,
360					 dcb_buf->dma_addr,
361					 dcb_buf->len,
362					 DMA_TO_DEVICE);
363
364			if (!dcb_buf->ptp)
365				napi_consume_skb(dcb_buf->data.skb, weight);
366		} else {
367			if (dcb_buf->xdp_ndo)
368				dma_unmap_single(lan966x->dev,
369						 dcb_buf->dma_addr,
370						 dcb_buf->len,
371						 DMA_TO_DEVICE);
372
373			if (dcb_buf->xdp_ndo)
374				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
375			else
376				page_pool_recycle_direct(rx->page_pool,
377							 dcb_buf->data.page);
378		}
379
380		clear = true;
381	}
382
383	xdp_flush_frame_bulk(&bq);
384
385	if (clear)
386		lan966x_fdma_wakeup_netdev(lan966x);
387
388	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
389}
390
 
 
 
 
 
 
 
 
 
 
 
 
391static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
392{
393	struct lan966x *lan966x = rx->lan966x;
394	struct fdma *fdma = &rx->fdma;
395	struct lan966x_port *port;
396	struct fdma_db *db;
397	struct page *page;
398
399	db = fdma_db_next_get(fdma);
400	page = rx->page[fdma->dcb_index][fdma->db_index];
401	if (unlikely(!page))
402		return FDMA_ERROR;
403
404	dma_sync_single_for_cpu(lan966x->dev,
405				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
406				FDMA_DCB_STATUS_BLOCKL(db->status),
407				DMA_FROM_DEVICE);
408
409	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
410				 src_port);
411	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
412		return FDMA_ERROR;
413
414	port = lan966x->ports[*src_port];
415	if (!lan966x_xdp_port_present(port))
416		return FDMA_PASS;
417
418	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
419}
420
421static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
422						 u64 src_port)
423{
424	struct lan966x *lan966x = rx->lan966x;
425	struct fdma *fdma = &rx->fdma;
426	struct sk_buff *skb;
427	struct fdma_db *db;
428	struct page *page;
429	u64 timestamp;
430
431	/* Get the received frame and unmap it */
432	db = fdma_db_next_get(fdma);
433	page = rx->page[fdma->dcb_index][fdma->db_index];
434
435	skb = build_skb(page_address(page), fdma->db_size);
436	if (unlikely(!skb))
437		goto free_page;
438
439	skb_mark_for_recycle(skb);
440
441	skb_reserve(skb, XDP_PACKET_HEADROOM);
442	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
443
444	lan966x_ifh_get_timestamp(skb->data, &timestamp);
445
446	skb->dev = lan966x->ports[src_port]->dev;
447	skb_pull(skb, IFH_LEN_BYTES);
448
449	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
450		skb_trim(skb, skb->len - ETH_FCS_LEN);
451
452	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
453	skb->protocol = eth_type_trans(skb, skb->dev);
454
455	if (lan966x->bridge_mask & BIT(src_port)) {
456		skb->offload_fwd_mark = 1;
457
458		skb_reset_network_header(skb);
459		if (!lan966x_hw_offload(lan966x, src_port, skb))
460			skb->offload_fwd_mark = 0;
461	}
462
463	skb->dev->stats.rx_bytes += skb->len;
464	skb->dev->stats.rx_packets++;
465
466	return skb;
467
468free_page:
469	page_pool_recycle_direct(rx->page_pool, page);
470
471	return NULL;
472}
473
474static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
475{
476	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
477	struct lan966x_rx *rx = &lan966x->rx;
478	int old_dcb, dcb_reload, counter = 0;
479	struct fdma *fdma = &rx->fdma;
 
480	bool redirect = false;
481	struct sk_buff *skb;
 
 
482	u64 src_port;
483
484	dcb_reload = fdma->dcb_index;
485
486	lan966x_fdma_tx_clear_buf(lan966x, weight);
487
488	/* Get all received skb */
489	while (counter < weight) {
490		if (!fdma_has_frames(fdma))
491			break;
492
493		counter++;
494
495		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
496		case FDMA_PASS:
497			break;
498		case FDMA_ERROR:
499			lan966x_fdma_rx_free_page(rx);
500			fdma_dcb_advance(fdma);
501			goto allocate_new;
502		case FDMA_REDIRECT:
503			redirect = true;
504			fallthrough;
505		case FDMA_TX:
506			fdma_dcb_advance(fdma);
507			continue;
508		case FDMA_DROP:
509			lan966x_fdma_rx_free_page(rx);
510			fdma_dcb_advance(fdma);
511			continue;
512		}
513
514		skb = lan966x_fdma_rx_get_frame(rx, src_port);
515		fdma_dcb_advance(fdma);
516		if (!skb)
517			goto allocate_new;
518
519		napi_gro_receive(&lan966x->napi, skb);
520	}
521
522allocate_new:
523	/* Allocate new pages and map them */
524	while (dcb_reload != fdma->dcb_index) {
525		old_dcb = dcb_reload;
526		dcb_reload++;
527		dcb_reload &= fdma->n_dcbs - 1;
 
 
528
529		fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
530			     FDMA_DCB_STATUS_INTR);
 
531
 
 
 
532		lan966x_fdma_rx_reload(rx);
533	}
534
535	if (redirect)
536		xdp_do_flush();
537
538	if (counter < weight && napi_complete_done(napi, counter))
539		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
540
541	return counter;
542}
543
544irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
545{
546	struct lan966x *lan966x = args;
547	u32 db, err, err_type;
548
549	db = lan_rd(lan966x, FDMA_INTR_DB);
550	err = lan_rd(lan966x, FDMA_INTR_ERR);
551
552	if (db) {
553		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
554		lan_wr(db, lan966x, FDMA_INTR_DB);
555
556		napi_schedule(&lan966x->napi);
557	}
558
559	if (err) {
560		err_type = lan_rd(lan966x, FDMA_ERRORS);
561
562		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
563
564		lan_wr(err, lan966x, FDMA_INTR_ERR);
565		lan_wr(err_type, lan966x, FDMA_ERRORS);
566	}
567
568	return IRQ_HANDLED;
569}
570
571static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
572{
573	struct lan966x_tx_dcb_buf *dcb_buf;
574	struct fdma *fdma = &tx->fdma;
575	int i;
576
577	for (i = 0; i < fdma->n_dcbs; ++i) {
578		dcb_buf = &tx->dcbs_buf[i];
579		if (!dcb_buf->used &&
580		    !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
581			return i;
582	}
583
584	return -1;
585}
586
587static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588{
589	struct lan966x *lan966x = tx->lan966x;
 
590
591	if (likely(lan966x->tx.activated)) {
 
 
 
 
 
592		lan966x_fdma_tx_reload(tx);
593	} else {
594		/* Because it is first time, then just activate */
595		lan966x->tx.activated = true;
596		lan966x_fdma_tx_activate(tx);
597	}
 
 
 
598}
599
600int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
 
 
 
601{
602	struct lan966x *lan966x = port->lan966x;
603	struct lan966x_tx_dcb_buf *next_dcb_buf;
604	struct lan966x_tx *tx = &lan966x->tx;
605	struct xdp_frame *xdpf;
606	dma_addr_t dma_addr;
607	struct page *page;
608	int next_to_use;
609	__be32 *ifh;
610	int ret = 0;
611
612	spin_lock(&lan966x->tx_lock);
613
614	/* Get next index */
615	next_to_use = lan966x_fdma_get_next_dcb(tx);
616	if (next_to_use < 0) {
617		netif_stop_queue(port->dev);
618		ret = NETDEV_TX_BUSY;
619		goto out;
620	}
621
622	/* Get the next buffer */
623	next_dcb_buf = &tx->dcbs_buf[next_to_use];
624
625	/* Generate new IFH */
626	if (!len) {
627		xdpf = ptr;
628
629		if (xdpf->headroom < IFH_LEN_BYTES) {
630			ret = NETDEV_TX_OK;
631			goto out;
632		}
633
634		ifh = xdpf->data - IFH_LEN_BYTES;
635		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
636		lan966x_ifh_set_bypass(ifh, 1);
637		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
638
639		dma_addr = dma_map_single(lan966x->dev,
640					  xdpf->data - IFH_LEN_BYTES,
641					  xdpf->len + IFH_LEN_BYTES,
642					  DMA_TO_DEVICE);
643		if (dma_mapping_error(lan966x->dev, dma_addr)) {
644			ret = NETDEV_TX_OK;
645			goto out;
646		}
647
648		next_dcb_buf->data.xdpf = xdpf;
649		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
 
 
650	} else {
651		page = ptr;
652
653		ifh = page_address(page) + XDP_PACKET_HEADROOM;
654		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
655		lan966x_ifh_set_bypass(ifh, 1);
656		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
657
658		dma_addr = page_pool_get_dma_addr(page);
659		dma_sync_single_for_device(lan966x->dev,
660					   dma_addr + XDP_PACKET_HEADROOM,
661					   len + IFH_LEN_BYTES,
662					   DMA_TO_DEVICE);
663
664		next_dcb_buf->data.page = page;
665		next_dcb_buf->len = len + IFH_LEN_BYTES;
 
 
666	}
667
668	/* Fill up the buffer */
 
669	next_dcb_buf->use_skb = false;
670	next_dcb_buf->xdp_ndo = !len;
 
 
671	next_dcb_buf->dma_addr = dma_addr;
672	next_dcb_buf->used = true;
673	next_dcb_buf->ptp = false;
674	next_dcb_buf->dev = port->dev;
675
676	__fdma_dcb_add(&tx->fdma,
677		       next_to_use,
678		       0,
679		       FDMA_DCB_STATUS_INTR |
680		       FDMA_DCB_STATUS_SOF |
681		       FDMA_DCB_STATUS_EOF |
682		       FDMA_DCB_STATUS_BLOCKO(0) |
683		       FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
684		       &fdma_nextptr_cb,
685		       &lan966x_fdma_xdp_tx_dataptr_cb);
686
687	/* Start the transmission */
688	lan966x_fdma_tx_start(tx);
689
690out:
691	spin_unlock(&lan966x->tx_lock);
692
693	return ret;
694}
695
696int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
697{
698	struct lan966x_port *port = netdev_priv(dev);
699	struct lan966x *lan966x = port->lan966x;
700	struct lan966x_tx_dcb_buf *next_dcb_buf;
701	struct lan966x_tx *tx = &lan966x->tx;
702	int needed_headroom;
703	int needed_tailroom;
704	dma_addr_t dma_addr;
705	int next_to_use;
706	int err;
707
708	/* Get next index */
709	next_to_use = lan966x_fdma_get_next_dcb(tx);
710	if (next_to_use < 0) {
711		netif_stop_queue(dev);
712		return NETDEV_TX_BUSY;
713	}
714
715	if (skb_put_padto(skb, ETH_ZLEN)) {
716		dev->stats.tx_dropped++;
717		return NETDEV_TX_OK;
718	}
719
720	/* skb processing */
721	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
722	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
723	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
724		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
725				       GFP_ATOMIC);
726		if (unlikely(err)) {
727			dev->stats.tx_dropped++;
728			err = NETDEV_TX_OK;
729			goto release;
730		}
731	}
732
733	skb_tx_timestamp(skb);
734	skb_push(skb, IFH_LEN_BYTES);
735	memcpy(skb->data, ifh, IFH_LEN_BYTES);
736	skb_put(skb, 4);
737
738	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
739				  DMA_TO_DEVICE);
740	if (dma_mapping_error(lan966x->dev, dma_addr)) {
741		dev->stats.tx_dropped++;
742		err = NETDEV_TX_OK;
743		goto release;
744	}
745
 
 
 
746	/* Fill up the buffer */
747	next_dcb_buf = &tx->dcbs_buf[next_to_use];
748	next_dcb_buf->use_skb = true;
749	next_dcb_buf->data.skb = skb;
750	next_dcb_buf->xdp_ndo = false;
751	next_dcb_buf->len = skb->len;
752	next_dcb_buf->dma_addr = dma_addr;
753	next_dcb_buf->used = true;
754	next_dcb_buf->ptp = false;
755	next_dcb_buf->dev = dev;
756
757	fdma_dcb_add(&tx->fdma,
758		     next_to_use,
759		     0,
760		     FDMA_DCB_STATUS_INTR |
761		     FDMA_DCB_STATUS_SOF |
762		     FDMA_DCB_STATUS_EOF |
763		     FDMA_DCB_STATUS_BLOCKO(0) |
764		     FDMA_DCB_STATUS_BLOCKL(skb->len));
765
766	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
767	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
768		next_dcb_buf->ptp = true;
769
770	/* Start the transmission */
771	lan966x_fdma_tx_start(tx);
772
773	return NETDEV_TX_OK;
774
775release:
776	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
777	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
778		lan966x_ptp_txtstamp_release(port, skb);
779
780	dev_kfree_skb_any(skb);
781	return err;
782}
783
784static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
785{
786	int max_mtu = 0;
787	int i;
788
789	for (i = 0; i < lan966x->num_phys_ports; ++i) {
790		struct lan966x_port *port;
791		int mtu;
792
793		port = lan966x->ports[i];
794		if (!port)
795			continue;
796
797		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
798		if (mtu > max_mtu)
799			max_mtu = mtu;
800	}
801
802	return max_mtu;
803}
804
805static int lan966x_qsys_sw_status(struct lan966x *lan966x)
806{
807	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
808}
809
810static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
811{
812	struct page_pool *page_pool;
813	struct fdma fdma_rx_old;
 
 
814	int err;
815
816	/* Store these for later to free them */
817	memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
 
818	page_pool = lan966x->rx.page_pool;
819
820	napi_synchronize(&lan966x->napi);
821	napi_disable(&lan966x->napi);
822	lan966x_fdma_stop_netdev(lan966x);
823
824	lan966x_fdma_rx_disable(&lan966x->rx);
825	lan966x_fdma_rx_free_pages(&lan966x->rx);
826	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
827	lan966x->rx.max_mtu = new_mtu;
828	err = lan966x_fdma_rx_alloc(&lan966x->rx);
829	if (err)
830		goto restore;
831	lan966x_fdma_rx_start(&lan966x->rx);
832
833	fdma_free_coherent(lan966x->dev, &fdma_rx_old);
 
 
834
835	page_pool_destroy(page_pool);
836
837	lan966x_fdma_wakeup_netdev(lan966x);
838	napi_enable(&lan966x->napi);
839
840	return err;
841restore:
842	lan966x->rx.page_pool = page_pool;
843	memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
 
844	lan966x_fdma_rx_start(&lan966x->rx);
845
846	return err;
847}
848
849static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
850{
851	return lan966x_fdma_get_max_mtu(lan966x) +
852	       IFH_LEN_BYTES +
853	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
854	       VLAN_HLEN * 2 +
855	       XDP_PACKET_HEADROOM;
856}
857
858static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
859{
860	int err;
861	u32 val;
862
863	/* Disable the CPU port */
864	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
865		QSYS_SW_PORT_MODE_PORT_ENA,
866		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
867
868	/* Flush the CPU queues */
869	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
870			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
871			   READL_SLEEP_US, READL_TIMEOUT_US);
872
873	/* Add a sleep in case there are frames between the queues and the CPU
874	 * port
875	 */
876	usleep_range(1000, 2000);
877
878	err = lan966x_fdma_reload(lan966x, max_mtu);
879
880	/* Enable back the CPU port */
881	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
882		QSYS_SW_PORT_MODE_PORT_ENA,
883		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
884
885	return err;
886}
887
888int lan966x_fdma_change_mtu(struct lan966x *lan966x)
889{
890	int max_mtu;
891
892	max_mtu = lan966x_fdma_get_max_frame(lan966x);
893	if (max_mtu == lan966x->rx.max_mtu)
894		return 0;
895
896	return __lan966x_fdma_reload(lan966x, max_mtu);
897}
898
899int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
900{
901	int max_mtu;
902
903	max_mtu = lan966x_fdma_get_max_frame(lan966x);
904	return __lan966x_fdma_reload(lan966x, max_mtu);
905}
906
907void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
908{
909	if (lan966x->fdma_ndev)
910		return;
911
912	lan966x->fdma_ndev = dev;
913	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
914	napi_enable(&lan966x->napi);
915}
916
917void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
918{
919	if (lan966x->fdma_ndev == dev) {
920		netif_napi_del(&lan966x->napi);
921		lan966x->fdma_ndev = NULL;
922	}
923}
924
925int lan966x_fdma_init(struct lan966x *lan966x)
926{
927	int err;
928
929	if (!lan966x->fdma)
930		return 0;
931
932	lan966x->rx.lan966x = lan966x;
933	lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
934	lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
935	lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
936	lan966x->rx.fdma.priv = lan966x;
937	lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
938	lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
939	lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
940	lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
941	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
942	lan966x->tx.lan966x = lan966x;
943	lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
944	lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
945	lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
946	lan966x->tx.fdma.priv = lan966x;
947	lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
948	lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
949	lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
950	lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
951
952	err = lan966x_fdma_rx_alloc(&lan966x->rx);
953	if (err)
954		return err;
955
956	err = lan966x_fdma_tx_alloc(&lan966x->tx);
957	if (err) {
958		fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
959		return err;
960	}
961
962	lan966x_fdma_rx_start(&lan966x->rx);
963
964	return 0;
965}
966
967void lan966x_fdma_deinit(struct lan966x *lan966x)
968{
969	if (!lan966x->fdma)
970		return;
971
972	lan966x_fdma_rx_disable(&lan966x->rx);
973	lan966x_fdma_tx_disable(&lan966x->tx);
974
975	napi_synchronize(&lan966x->napi);
976	napi_disable(&lan966x->napi);
977
978	lan966x_fdma_rx_free_pages(&lan966x->rx);
979	fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
980	page_pool_destroy(lan966x->rx.page_pool);
981	lan966x_fdma_tx_free(&lan966x->tx);
982}
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/bpf.h>
   4#include <linux/filter.h>
 
   5
   6#include "lan966x_main.h"
   7
   8static int lan966x_fdma_channel_active(struct lan966x *lan966x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   9{
  10	return lan_rd(lan966x, FDMA_CH_ACTIVE);
 
 
 
 
  11}
  12
  13static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
  14					       struct lan966x_db *db)
  15{
  16	struct page *page;
  17
  18	page = page_pool_dev_alloc_pages(rx->page_pool);
  19	if (unlikely(!page))
  20		return NULL;
  21
  22	db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
 
  23
  24	return page;
 
 
  25}
  26
  27static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
  28{
 
  29	int i, j;
  30
  31	for (i = 0; i < FDMA_DCB_MAX; ++i) {
  32		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
  33			page_pool_put_full_page(rx->page_pool,
  34						rx->page[i][j], false);
  35	}
  36}
  37
  38static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
  39{
 
  40	struct page *page;
  41
  42	page = rx->page[rx->dcb_index][rx->db_index];
  43	if (unlikely(!page))
  44		return;
  45
  46	page_pool_recycle_direct(rx->page_pool, page);
  47}
  48
  49static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
  50				    struct lan966x_rx_dcb *dcb,
  51				    u64 nextptr)
  52{
  53	struct lan966x_db *db;
  54	int i;
  55
  56	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
  57		db = &dcb->db[i];
  58		db->status = FDMA_DCB_STATUS_INTR;
  59	}
  60
  61	dcb->nextptr = FDMA_DCB_INVALID_DATA;
  62	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
  63
  64	rx->last_entry->nextptr = nextptr;
  65	rx->last_entry = dcb;
  66}
  67
  68static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
  69{
  70	struct lan966x *lan966x = rx->lan966x;
  71	struct page_pool_params pp_params = {
  72		.order = rx->page_order,
  73		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  74		.pool_size = FDMA_DCB_MAX,
  75		.nid = NUMA_NO_NODE,
  76		.dev = lan966x->dev,
  77		.dma_dir = DMA_FROM_DEVICE,
  78		.offset = XDP_PACKET_HEADROOM,
  79		.max_len = rx->max_mtu -
  80			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  81	};
  82
  83	if (lan966x_xdp_present(lan966x))
  84		pp_params.dma_dir = DMA_BIDIRECTIONAL;
  85
  86	rx->page_pool = page_pool_create(&pp_params);
  87
  88	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
  89		struct lan966x_port *port;
  90
  91		if (!lan966x->ports[i])
  92			continue;
  93
  94		port = lan966x->ports[i];
  95		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
  96		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
  97					   rx->page_pool);
  98	}
  99
 100	return PTR_ERR_OR_ZERO(rx->page_pool);
 101}
 102
 103static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
 104{
 105	struct lan966x *lan966x = rx->lan966x;
 106	struct lan966x_rx_dcb *dcb;
 107	struct lan966x_db *db;
 108	struct page *page;
 109	int i, j;
 110	int size;
 111
 112	if (lan966x_fdma_rx_alloc_page_pool(rx))
 113		return PTR_ERR(rx->page_pool);
 114
 115	/* calculate how many pages are needed to allocate the dcbs */
 116	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 117	size = ALIGN(size, PAGE_SIZE);
 118
 119	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
 120	if (!rx->dcbs)
 121		return -ENOMEM;
 122
 123	rx->last_entry = rx->dcbs;
 124	rx->db_index = 0;
 125	rx->dcb_index = 0;
 126
 127	/* Now for each dcb allocate the dbs */
 128	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 129		dcb = &rx->dcbs[i];
 130		dcb->info = 0;
 131
 132		/* For each db allocate a page and map it to the DB dataptr. */
 133		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
 134			db = &dcb->db[j];
 135			page = lan966x_fdma_rx_alloc_page(rx, db);
 136			if (!page)
 137				return -ENOMEM;
 138
 139			db->status = 0;
 140			rx->page[i][j] = page;
 141		}
 142
 143		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
 144	}
 145
 146	return 0;
 147}
 148
 149static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
 150{
 151	rx->dcb_index++;
 152	rx->dcb_index &= FDMA_DCB_MAX - 1;
 153}
 154
 155static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
 156{
 157	struct lan966x *lan966x = rx->lan966x;
 158	u32 size;
 159
 160	/* Now it is possible to do the cleanup of dcb */
 161	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 162	size = ALIGN(size, PAGE_SIZE);
 163	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
 164}
 165
 166static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
 167{
 168	struct lan966x *lan966x = rx->lan966x;
 
 169	u32 mask;
 170
 171	/* When activating a channel, first is required to write the first DCB
 172	 * address and then to activate it
 173	 */
 174	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
 175	       FDMA_DCB_LLP(rx->channel_id));
 176	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
 177	       FDMA_DCB_LLP1(rx->channel_id));
 178
 179	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
 180	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 181	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 182	       FDMA_CH_CFG_CH_MEM_SET(1),
 183	       lan966x, FDMA_CH_CFG(rx->channel_id));
 184
 185	/* Start fdma */
 186	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
 187		FDMA_PORT_CTRL_XTR_STOP,
 188		lan966x, FDMA_PORT_CTRL(0));
 189
 190	/* Enable interrupts */
 191	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 192	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 193	mask |= BIT(rx->channel_id);
 194	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 195		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 196		lan966x, FDMA_INTR_DB_ENA);
 197
 198	/* Activate the channel */
 199	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
 200		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 201		lan966x, FDMA_CH_ACTIVATE);
 202}
 203
 204static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
 205{
 206	struct lan966x *lan966x = rx->lan966x;
 
 207	u32 val;
 208
 209	/* Disable the channel */
 210	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
 211		FDMA_CH_DISABLE_CH_DISABLE,
 212		lan966x, FDMA_CH_DISABLE);
 213
 214	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 215				  val, !(val & BIT(rx->channel_id)),
 216				  READL_SLEEP_US, READL_TIMEOUT_US);
 217
 218	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
 219		FDMA_CH_DB_DISCARD_DB_DISCARD,
 220		lan966x, FDMA_CH_DB_DISCARD);
 221}
 222
 223static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
 224{
 225	struct lan966x *lan966x = rx->lan966x;
 226
 227	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
 228		FDMA_CH_RELOAD_CH_RELOAD,
 229		lan966x, FDMA_CH_RELOAD);
 230}
 231
 232static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
 233				    struct lan966x_tx_dcb *dcb)
 234{
 235	dcb->nextptr = FDMA_DCB_INVALID_DATA;
 236	dcb->info = 0;
 237}
 238
 239static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
 240{
 241	struct lan966x *lan966x = tx->lan966x;
 242	struct lan966x_tx_dcb *dcb;
 243	struct lan966x_db *db;
 244	int size;
 245	int i, j;
 246
 247	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
 248			       GFP_KERNEL);
 249	if (!tx->dcbs_buf)
 250		return -ENOMEM;
 251
 252	/* calculate how many pages are needed to allocate the dcbs */
 253	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 254	size = ALIGN(size, PAGE_SIZE);
 255	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
 256	if (!tx->dcbs)
 257		goto out;
 258
 259	/* Now for each dcb allocate the db */
 260	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 261		dcb = &tx->dcbs[i];
 262
 263		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
 264			db = &dcb->db[j];
 265			db->dataptr = 0;
 266			db->status = 0;
 267		}
 268
 269		lan966x_fdma_tx_add_dcb(tx, dcb);
 270	}
 271
 272	return 0;
 273
 274out:
 275	kfree(tx->dcbs_buf);
 276	return -ENOMEM;
 277}
 278
 279static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
 280{
 281	struct lan966x *lan966x = tx->lan966x;
 282	int size;
 283
 284	kfree(tx->dcbs_buf);
 285
 286	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 287	size = ALIGN(size, PAGE_SIZE);
 288	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
 289}
 290
 291static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
 292{
 293	struct lan966x *lan966x = tx->lan966x;
 
 294	u32 mask;
 295
 296	/* When activating a channel, first is required to write the first DCB
 297	 * address and then to activate it
 298	 */
 299	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
 300	       FDMA_DCB_LLP(tx->channel_id));
 301	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
 302	       FDMA_DCB_LLP1(tx->channel_id));
 303
 304	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
 305	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 306	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 307	       FDMA_CH_CFG_CH_MEM_SET(1),
 308	       lan966x, FDMA_CH_CFG(tx->channel_id));
 309
 310	/* Start fdma */
 311	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
 312		FDMA_PORT_CTRL_INJ_STOP,
 313		lan966x, FDMA_PORT_CTRL(0));
 314
 315	/* Enable interrupts */
 316	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 317	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 318	mask |= BIT(tx->channel_id);
 319	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 320		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 321		lan966x, FDMA_INTR_DB_ENA);
 322
 323	/* Activate the channel */
 324	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
 325		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 326		lan966x, FDMA_CH_ACTIVATE);
 327}
 328
 329static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
 330{
 331	struct lan966x *lan966x = tx->lan966x;
 
 332	u32 val;
 333
 334	/* Disable the channel */
 335	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
 336		FDMA_CH_DISABLE_CH_DISABLE,
 337		lan966x, FDMA_CH_DISABLE);
 338
 339	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 340				  val, !(val & BIT(tx->channel_id)),
 341				  READL_SLEEP_US, READL_TIMEOUT_US);
 342
 343	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
 344		FDMA_CH_DB_DISCARD_DB_DISCARD,
 345		lan966x, FDMA_CH_DB_DISCARD);
 346
 347	tx->activated = false;
 348	tx->last_in_use = -1;
 349}
 350
 351static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
 352{
 353	struct lan966x *lan966x = tx->lan966x;
 354
 355	/* Write the registers to reload the channel */
 356	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
 357		FDMA_CH_RELOAD_CH_RELOAD,
 358		lan966x, FDMA_CH_RELOAD);
 359}
 360
 361static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
 362{
 363	struct lan966x_port *port;
 364	int i;
 365
 366	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 367		port = lan966x->ports[i];
 368		if (!port)
 369			continue;
 370
 371		if (netif_queue_stopped(port->dev))
 372			netif_wake_queue(port->dev);
 373	}
 374}
 375
 376static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
 377{
 378	struct lan966x_port *port;
 379	int i;
 380
 381	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 382		port = lan966x->ports[i];
 383		if (!port)
 384			continue;
 385
 386		netif_stop_queue(port->dev);
 387	}
 388}
 389
 390static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
 391{
 392	struct lan966x_tx *tx = &lan966x->tx;
 
 393	struct lan966x_tx_dcb_buf *dcb_buf;
 
 394	struct xdp_frame_bulk bq;
 395	struct lan966x_db *db;
 396	unsigned long flags;
 397	bool clear = false;
 
 398	int i;
 399
 400	xdp_frame_bulk_init(&bq);
 401
 402	spin_lock_irqsave(&lan966x->tx_lock, flags);
 403	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 404		dcb_buf = &tx->dcbs_buf[i];
 405
 406		if (!dcb_buf->used)
 407			continue;
 408
 409		db = &tx->dcbs[i].db[0];
 410		if (!(db->status & FDMA_DCB_STATUS_DONE))
 411			continue;
 412
 413		dcb_buf->dev->stats.tx_packets++;
 414		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
 415
 416		dcb_buf->used = false;
 417		if (dcb_buf->use_skb) {
 418			dma_unmap_single(lan966x->dev,
 419					 dcb_buf->dma_addr,
 420					 dcb_buf->len,
 421					 DMA_TO_DEVICE);
 422
 423			if (!dcb_buf->ptp)
 424				napi_consume_skb(dcb_buf->data.skb, weight);
 425		} else {
 426			if (dcb_buf->xdp_ndo)
 427				dma_unmap_single(lan966x->dev,
 428						 dcb_buf->dma_addr,
 429						 dcb_buf->len,
 430						 DMA_TO_DEVICE);
 431
 432			if (dcb_buf->xdp_ndo)
 433				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
 434			else
 435				xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
 
 436		}
 437
 438		clear = true;
 439	}
 440
 441	xdp_flush_frame_bulk(&bq);
 442
 443	if (clear)
 444		lan966x_fdma_wakeup_netdev(lan966x);
 445
 446	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
 447}
 448
 449static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
 450{
 451	struct lan966x_db *db;
 452
 453	/* Check if there is any data */
 454	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 455	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
 456		return false;
 457
 458	return true;
 459}
 460
 461static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
 462{
 463	struct lan966x *lan966x = rx->lan966x;
 
 464	struct lan966x_port *port;
 465	struct lan966x_db *db;
 466	struct page *page;
 467
 468	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 469	page = rx->page[rx->dcb_index][rx->db_index];
 470	if (unlikely(!page))
 471		return FDMA_ERROR;
 472
 473	dma_sync_single_for_cpu(lan966x->dev,
 474				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
 475				FDMA_DCB_STATUS_BLOCKL(db->status),
 476				DMA_FROM_DEVICE);
 477
 478	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
 479				 src_port);
 480	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
 481		return FDMA_ERROR;
 482
 483	port = lan966x->ports[*src_port];
 484	if (!lan966x_xdp_port_present(port))
 485		return FDMA_PASS;
 486
 487	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
 488}
 489
 490static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
 491						 u64 src_port)
 492{
 493	struct lan966x *lan966x = rx->lan966x;
 494	struct lan966x_db *db;
 495	struct sk_buff *skb;
 
 496	struct page *page;
 497	u64 timestamp;
 498
 499	/* Get the received frame and unmap it */
 500	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 501	page = rx->page[rx->dcb_index][rx->db_index];
 502
 503	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
 504	if (unlikely(!skb))
 505		goto free_page;
 506
 507	skb_mark_for_recycle(skb);
 508
 509	skb_reserve(skb, XDP_PACKET_HEADROOM);
 510	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
 511
 512	lan966x_ifh_get_timestamp(skb->data, &timestamp);
 513
 514	skb->dev = lan966x->ports[src_port]->dev;
 515	skb_pull(skb, IFH_LEN_BYTES);
 516
 517	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
 518		skb_trim(skb, skb->len - ETH_FCS_LEN);
 519
 520	lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
 521	skb->protocol = eth_type_trans(skb, skb->dev);
 522
 523	if (lan966x->bridge_mask & BIT(src_port)) {
 524		skb->offload_fwd_mark = 1;
 525
 526		skb_reset_network_header(skb);
 527		if (!lan966x_hw_offload(lan966x, src_port, skb))
 528			skb->offload_fwd_mark = 0;
 529	}
 530
 531	skb->dev->stats.rx_bytes += skb->len;
 532	skb->dev->stats.rx_packets++;
 533
 534	return skb;
 535
 536free_page:
 537	page_pool_recycle_direct(rx->page_pool, page);
 538
 539	return NULL;
 540}
 541
 542static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
 543{
 544	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
 545	struct lan966x_rx *rx = &lan966x->rx;
 546	int dcb_reload = rx->dcb_index;
 547	struct lan966x_rx_dcb *old_dcb;
 548	struct lan966x_db *db;
 549	bool redirect = false;
 550	struct sk_buff *skb;
 551	struct page *page;
 552	int counter = 0;
 553	u64 src_port;
 554	u64 nextptr;
 
 555
 556	lan966x_fdma_tx_clear_buf(lan966x, weight);
 557
 558	/* Get all received skb */
 559	while (counter < weight) {
 560		if (!lan966x_fdma_rx_more_frames(rx))
 561			break;
 562
 563		counter++;
 564
 565		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
 566		case FDMA_PASS:
 567			break;
 568		case FDMA_ERROR:
 569			lan966x_fdma_rx_free_page(rx);
 570			lan966x_fdma_rx_advance_dcb(rx);
 571			goto allocate_new;
 572		case FDMA_REDIRECT:
 573			redirect = true;
 574			fallthrough;
 575		case FDMA_TX:
 576			lan966x_fdma_rx_advance_dcb(rx);
 577			continue;
 578		case FDMA_DROP:
 579			lan966x_fdma_rx_free_page(rx);
 580			lan966x_fdma_rx_advance_dcb(rx);
 581			continue;
 582		}
 583
 584		skb = lan966x_fdma_rx_get_frame(rx, src_port);
 585		lan966x_fdma_rx_advance_dcb(rx);
 586		if (!skb)
 587			goto allocate_new;
 588
 589		napi_gro_receive(&lan966x->napi, skb);
 590	}
 591
 592allocate_new:
 593	/* Allocate new pages and map them */
 594	while (dcb_reload != rx->dcb_index) {
 595		db = &rx->dcbs[dcb_reload].db[rx->db_index];
 596		page = lan966x_fdma_rx_alloc_page(rx, db);
 597		if (unlikely(!page))
 598			break;
 599		rx->page[dcb_reload][rx->db_index] = page;
 600
 601		old_dcb = &rx->dcbs[dcb_reload];
 602		dcb_reload++;
 603		dcb_reload &= FDMA_DCB_MAX - 1;
 604
 605		nextptr = rx->dma + ((unsigned long)old_dcb -
 606				     (unsigned long)rx->dcbs);
 607		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
 608		lan966x_fdma_rx_reload(rx);
 609	}
 610
 611	if (redirect)
 612		xdp_do_flush();
 613
 614	if (counter < weight && napi_complete_done(napi, counter))
 615		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
 616
 617	return counter;
 618}
 619
 620irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
 621{
 622	struct lan966x *lan966x = args;
 623	u32 db, err, err_type;
 624
 625	db = lan_rd(lan966x, FDMA_INTR_DB);
 626	err = lan_rd(lan966x, FDMA_INTR_ERR);
 627
 628	if (db) {
 629		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
 630		lan_wr(db, lan966x, FDMA_INTR_DB);
 631
 632		napi_schedule(&lan966x->napi);
 633	}
 634
 635	if (err) {
 636		err_type = lan_rd(lan966x, FDMA_ERRORS);
 637
 638		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
 639
 640		lan_wr(err, lan966x, FDMA_INTR_ERR);
 641		lan_wr(err_type, lan966x, FDMA_ERRORS);
 642	}
 643
 644	return IRQ_HANDLED;
 645}
 646
 647static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
 648{
 649	struct lan966x_tx_dcb_buf *dcb_buf;
 
 650	int i;
 651
 652	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 653		dcb_buf = &tx->dcbs_buf[i];
 654		if (!dcb_buf->used && i != tx->last_in_use)
 
 655			return i;
 656	}
 657
 658	return -1;
 659}
 660
 661static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
 662				      int next_to_use, int len,
 663				      dma_addr_t dma_addr)
 664{
 665	struct lan966x_tx_dcb *next_dcb;
 666	struct lan966x_db *next_db;
 667
 668	next_dcb = &tx->dcbs[next_to_use];
 669	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
 670
 671	next_db = &next_dcb->db[0];
 672	next_db->dataptr = dma_addr;
 673	next_db->status = FDMA_DCB_STATUS_SOF |
 674			  FDMA_DCB_STATUS_EOF |
 675			  FDMA_DCB_STATUS_INTR |
 676			  FDMA_DCB_STATUS_BLOCKO(0) |
 677			  FDMA_DCB_STATUS_BLOCKL(len);
 678}
 679
 680static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
 681{
 682	struct lan966x *lan966x = tx->lan966x;
 683	struct lan966x_tx_dcb *dcb;
 684
 685	if (likely(lan966x->tx.activated)) {
 686		/* Connect current dcb to the next db */
 687		dcb = &tx->dcbs[tx->last_in_use];
 688		dcb->nextptr = tx->dma + (next_to_use *
 689					  sizeof(struct lan966x_tx_dcb));
 690
 691		lan966x_fdma_tx_reload(tx);
 692	} else {
 693		/* Because it is first time, then just activate */
 694		lan966x->tx.activated = true;
 695		lan966x_fdma_tx_activate(tx);
 696	}
 697
 698	/* Move to next dcb because this last in use */
 699	tx->last_in_use = next_to_use;
 700}
 701
 702int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
 703			   struct xdp_frame *xdpf,
 704			   struct page *page,
 705			   bool dma_map)
 706{
 707	struct lan966x *lan966x = port->lan966x;
 708	struct lan966x_tx_dcb_buf *next_dcb_buf;
 709	struct lan966x_tx *tx = &lan966x->tx;
 
 710	dma_addr_t dma_addr;
 
 711	int next_to_use;
 712	__be32 *ifh;
 713	int ret = 0;
 714
 715	spin_lock(&lan966x->tx_lock);
 716
 717	/* Get next index */
 718	next_to_use = lan966x_fdma_get_next_dcb(tx);
 719	if (next_to_use < 0) {
 720		netif_stop_queue(port->dev);
 721		ret = NETDEV_TX_BUSY;
 722		goto out;
 723	}
 724
 
 
 
 725	/* Generate new IFH */
 726	if (dma_map) {
 
 
 727		if (xdpf->headroom < IFH_LEN_BYTES) {
 728			ret = NETDEV_TX_OK;
 729			goto out;
 730		}
 731
 732		ifh = xdpf->data - IFH_LEN_BYTES;
 733		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 734		lan966x_ifh_set_bypass(ifh, 1);
 735		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 736
 737		dma_addr = dma_map_single(lan966x->dev,
 738					  xdpf->data - IFH_LEN_BYTES,
 739					  xdpf->len + IFH_LEN_BYTES,
 740					  DMA_TO_DEVICE);
 741		if (dma_mapping_error(lan966x->dev, dma_addr)) {
 742			ret = NETDEV_TX_OK;
 743			goto out;
 744		}
 745
 746		/* Setup next dcb */
 747		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 748					  xdpf->len + IFH_LEN_BYTES,
 749					  dma_addr);
 750	} else {
 
 
 751		ifh = page_address(page) + XDP_PACKET_HEADROOM;
 752		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 753		lan966x_ifh_set_bypass(ifh, 1);
 754		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 755
 756		dma_addr = page_pool_get_dma_addr(page);
 757		dma_sync_single_for_device(lan966x->dev,
 758					   dma_addr + XDP_PACKET_HEADROOM,
 759					   xdpf->len + IFH_LEN_BYTES,
 760					   DMA_TO_DEVICE);
 761
 762		/* Setup next dcb */
 763		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 764					  xdpf->len + IFH_LEN_BYTES,
 765					  dma_addr + XDP_PACKET_HEADROOM);
 766	}
 767
 768	/* Fill up the buffer */
 769	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 770	next_dcb_buf->use_skb = false;
 771	next_dcb_buf->data.xdpf = xdpf;
 772	next_dcb_buf->xdp_ndo = dma_map;
 773	next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
 774	next_dcb_buf->dma_addr = dma_addr;
 775	next_dcb_buf->used = true;
 776	next_dcb_buf->ptp = false;
 777	next_dcb_buf->dev = port->dev;
 778
 
 
 
 
 
 
 
 
 
 
 
 779	/* Start the transmission */
 780	lan966x_fdma_tx_start(tx, next_to_use);
 781
 782out:
 783	spin_unlock(&lan966x->tx_lock);
 784
 785	return ret;
 786}
 787
 788int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
 789{
 790	struct lan966x_port *port = netdev_priv(dev);
 791	struct lan966x *lan966x = port->lan966x;
 792	struct lan966x_tx_dcb_buf *next_dcb_buf;
 793	struct lan966x_tx *tx = &lan966x->tx;
 794	int needed_headroom;
 795	int needed_tailroom;
 796	dma_addr_t dma_addr;
 797	int next_to_use;
 798	int err;
 799
 800	/* Get next index */
 801	next_to_use = lan966x_fdma_get_next_dcb(tx);
 802	if (next_to_use < 0) {
 803		netif_stop_queue(dev);
 804		return NETDEV_TX_BUSY;
 805	}
 806
 807	if (skb_put_padto(skb, ETH_ZLEN)) {
 808		dev->stats.tx_dropped++;
 809		return NETDEV_TX_OK;
 810	}
 811
 812	/* skb processing */
 813	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
 814	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
 815	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
 816		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
 817				       GFP_ATOMIC);
 818		if (unlikely(err)) {
 819			dev->stats.tx_dropped++;
 820			err = NETDEV_TX_OK;
 821			goto release;
 822		}
 823	}
 824
 825	skb_tx_timestamp(skb);
 826	skb_push(skb, IFH_LEN_BYTES);
 827	memcpy(skb->data, ifh, IFH_LEN_BYTES);
 828	skb_put(skb, 4);
 829
 830	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
 831				  DMA_TO_DEVICE);
 832	if (dma_mapping_error(lan966x->dev, dma_addr)) {
 833		dev->stats.tx_dropped++;
 834		err = NETDEV_TX_OK;
 835		goto release;
 836	}
 837
 838	/* Setup next dcb */
 839	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
 840
 841	/* Fill up the buffer */
 842	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 843	next_dcb_buf->use_skb = true;
 844	next_dcb_buf->data.skb = skb;
 845	next_dcb_buf->xdp_ndo = false;
 846	next_dcb_buf->len = skb->len;
 847	next_dcb_buf->dma_addr = dma_addr;
 848	next_dcb_buf->used = true;
 849	next_dcb_buf->ptp = false;
 850	next_dcb_buf->dev = dev;
 851
 
 
 
 
 
 
 
 
 
 852	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 853	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 854		next_dcb_buf->ptp = true;
 855
 856	/* Start the transmission */
 857	lan966x_fdma_tx_start(tx, next_to_use);
 858
 859	return NETDEV_TX_OK;
 860
 861release:
 862	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 863	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 864		lan966x_ptp_txtstamp_release(port, skb);
 865
 866	dev_kfree_skb_any(skb);
 867	return err;
 868}
 869
 870static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
 871{
 872	int max_mtu = 0;
 873	int i;
 874
 875	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 876		struct lan966x_port *port;
 877		int mtu;
 878
 879		port = lan966x->ports[i];
 880		if (!port)
 881			continue;
 882
 883		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
 884		if (mtu > max_mtu)
 885			max_mtu = mtu;
 886	}
 887
 888	return max_mtu;
 889}
 890
 891static int lan966x_qsys_sw_status(struct lan966x *lan966x)
 892{
 893	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
 894}
 895
 896static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
 897{
 898	struct page_pool *page_pool;
 899	dma_addr_t rx_dma;
 900	void *rx_dcbs;
 901	u32 size;
 902	int err;
 903
 904	/* Store these for later to free them */
 905	rx_dma = lan966x->rx.dma;
 906	rx_dcbs = lan966x->rx.dcbs;
 907	page_pool = lan966x->rx.page_pool;
 908
 909	napi_synchronize(&lan966x->napi);
 910	napi_disable(&lan966x->napi);
 911	lan966x_fdma_stop_netdev(lan966x);
 912
 913	lan966x_fdma_rx_disable(&lan966x->rx);
 914	lan966x_fdma_rx_free_pages(&lan966x->rx);
 915	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
 916	lan966x->rx.max_mtu = new_mtu;
 917	err = lan966x_fdma_rx_alloc(&lan966x->rx);
 918	if (err)
 919		goto restore;
 920	lan966x_fdma_rx_start(&lan966x->rx);
 921
 922	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 923	size = ALIGN(size, PAGE_SIZE);
 924	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
 925
 926	page_pool_destroy(page_pool);
 927
 928	lan966x_fdma_wakeup_netdev(lan966x);
 929	napi_enable(&lan966x->napi);
 930
 931	return err;
 932restore:
 933	lan966x->rx.page_pool = page_pool;
 934	lan966x->rx.dma = rx_dma;
 935	lan966x->rx.dcbs = rx_dcbs;
 936	lan966x_fdma_rx_start(&lan966x->rx);
 937
 938	return err;
 939}
 940
 941static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
 942{
 943	return lan966x_fdma_get_max_mtu(lan966x) +
 944	       IFH_LEN_BYTES +
 945	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 946	       VLAN_HLEN * 2 +
 947	       XDP_PACKET_HEADROOM;
 948}
 949
 950static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
 951{
 952	int err;
 953	u32 val;
 954
 955	/* Disable the CPU port */
 956	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
 957		QSYS_SW_PORT_MODE_PORT_ENA,
 958		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
 959
 960	/* Flush the CPU queues */
 961	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
 962			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
 963			   READL_SLEEP_US, READL_TIMEOUT_US);
 964
 965	/* Add a sleep in case there are frames between the queues and the CPU
 966	 * port
 967	 */
 968	usleep_range(1000, 2000);
 969
 970	err = lan966x_fdma_reload(lan966x, max_mtu);
 971
 972	/* Enable back the CPU port */
 973	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
 974		QSYS_SW_PORT_MODE_PORT_ENA,
 975		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
 976
 977	return err;
 978}
 979
 980int lan966x_fdma_change_mtu(struct lan966x *lan966x)
 981{
 982	int max_mtu;
 983
 984	max_mtu = lan966x_fdma_get_max_frame(lan966x);
 985	if (max_mtu == lan966x->rx.max_mtu)
 986		return 0;
 987
 988	return __lan966x_fdma_reload(lan966x, max_mtu);
 989}
 990
 991int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
 992{
 993	int max_mtu;
 994
 995	max_mtu = lan966x_fdma_get_max_frame(lan966x);
 996	return __lan966x_fdma_reload(lan966x, max_mtu);
 997}
 998
 999void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1000{
1001	if (lan966x->fdma_ndev)
1002		return;
1003
1004	lan966x->fdma_ndev = dev;
1005	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1006	napi_enable(&lan966x->napi);
1007}
1008
1009void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1010{
1011	if (lan966x->fdma_ndev == dev) {
1012		netif_napi_del(&lan966x->napi);
1013		lan966x->fdma_ndev = NULL;
1014	}
1015}
1016
1017int lan966x_fdma_init(struct lan966x *lan966x)
1018{
1019	int err;
1020
1021	if (!lan966x->fdma)
1022		return 0;
1023
1024	lan966x->rx.lan966x = lan966x;
1025	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
 
 
 
 
 
 
 
1026	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1027	lan966x->tx.lan966x = lan966x;
1028	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1029	lan966x->tx.last_in_use = -1;
 
 
 
 
 
 
1030
1031	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1032	if (err)
1033		return err;
1034
1035	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1036	if (err) {
1037		lan966x_fdma_rx_free(&lan966x->rx);
1038		return err;
1039	}
1040
1041	lan966x_fdma_rx_start(&lan966x->rx);
1042
1043	return 0;
1044}
1045
1046void lan966x_fdma_deinit(struct lan966x *lan966x)
1047{
1048	if (!lan966x->fdma)
1049		return;
1050
1051	lan966x_fdma_rx_disable(&lan966x->rx);
1052	lan966x_fdma_tx_disable(&lan966x->tx);
1053
1054	napi_synchronize(&lan966x->napi);
1055	napi_disable(&lan966x->napi);
1056
1057	lan966x_fdma_rx_free_pages(&lan966x->rx);
1058	lan966x_fdma_rx_free(&lan966x->rx);
1059	page_pool_destroy(lan966x->rx.page_pool);
1060	lan966x_fdma_tx_free(&lan966x->tx);
1061}