Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/bpf.h>
   4#include <linux/filter.h>
   5#include <net/page_pool/helpers.h>
   6
   7#include "lan966x_main.h"
   8
   9static int lan966x_fdma_channel_active(struct lan966x *lan966x)
  10{
  11	return lan_rd(lan966x, FDMA_CH_ACTIVE);
  12}
  13
  14static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
  15					       struct lan966x_db *db)
  16{
  17	struct page *page;
  18
  19	page = page_pool_dev_alloc_pages(rx->page_pool);
  20	if (unlikely(!page))
  21		return NULL;
  22
  23	db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
  24
  25	return page;
  26}
  27
  28static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
  29{
  30	int i, j;
  31
  32	for (i = 0; i < FDMA_DCB_MAX; ++i) {
  33		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
  34			page_pool_put_full_page(rx->page_pool,
  35						rx->page[i][j], false);
  36	}
  37}
  38
  39static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
  40{
  41	struct page *page;
  42
  43	page = rx->page[rx->dcb_index][rx->db_index];
  44	if (unlikely(!page))
  45		return;
  46
  47	page_pool_recycle_direct(rx->page_pool, page);
  48}
  49
  50static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
  51				    struct lan966x_rx_dcb *dcb,
  52				    u64 nextptr)
  53{
  54	struct lan966x_db *db;
  55	int i;
  56
  57	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
  58		db = &dcb->db[i];
  59		db->status = FDMA_DCB_STATUS_INTR;
  60	}
  61
  62	dcb->nextptr = FDMA_DCB_INVALID_DATA;
  63	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
  64
  65	rx->last_entry->nextptr = nextptr;
  66	rx->last_entry = dcb;
  67}
  68
  69static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
  70{
  71	struct lan966x *lan966x = rx->lan966x;
  72	struct page_pool_params pp_params = {
  73		.order = rx->page_order,
  74		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  75		.pool_size = FDMA_DCB_MAX,
  76		.nid = NUMA_NO_NODE,
  77		.dev = lan966x->dev,
  78		.dma_dir = DMA_FROM_DEVICE,
  79		.offset = XDP_PACKET_HEADROOM,
  80		.max_len = rx->max_mtu -
  81			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  82	};
  83
  84	if (lan966x_xdp_present(lan966x))
  85		pp_params.dma_dir = DMA_BIDIRECTIONAL;
  86
  87	rx->page_pool = page_pool_create(&pp_params);
  88
  89	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
  90		struct lan966x_port *port;
  91
  92		if (!lan966x->ports[i])
  93			continue;
  94
  95		port = lan966x->ports[i];
  96		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
  97		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
  98					   rx->page_pool);
  99	}
 100
 101	return PTR_ERR_OR_ZERO(rx->page_pool);
 102}
 103
 104static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
 105{
 106	struct lan966x *lan966x = rx->lan966x;
 107	struct lan966x_rx_dcb *dcb;
 108	struct lan966x_db *db;
 109	struct page *page;
 110	int i, j;
 111	int size;
 112
 113	if (lan966x_fdma_rx_alloc_page_pool(rx))
 114		return PTR_ERR(rx->page_pool);
 115
 116	/* calculate how many pages are needed to allocate the dcbs */
 117	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 118	size = ALIGN(size, PAGE_SIZE);
 119
 120	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
 121	if (!rx->dcbs)
 122		return -ENOMEM;
 123
 124	rx->last_entry = rx->dcbs;
 125	rx->db_index = 0;
 126	rx->dcb_index = 0;
 127
 128	/* Now for each dcb allocate the dbs */
 129	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 130		dcb = &rx->dcbs[i];
 131		dcb->info = 0;
 132
 133		/* For each db allocate a page and map it to the DB dataptr. */
 134		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
 135			db = &dcb->db[j];
 136			page = lan966x_fdma_rx_alloc_page(rx, db);
 137			if (!page)
 138				return -ENOMEM;
 139
 140			db->status = 0;
 141			rx->page[i][j] = page;
 142		}
 143
 144		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
 145	}
 146
 147	return 0;
 148}
 149
 150static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
 151{
 152	rx->dcb_index++;
 153	rx->dcb_index &= FDMA_DCB_MAX - 1;
 154}
 155
 156static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
 157{
 158	struct lan966x *lan966x = rx->lan966x;
 159	u32 size;
 160
 161	/* Now it is possible to do the cleanup of dcb */
 162	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 163	size = ALIGN(size, PAGE_SIZE);
 164	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
 165}
 166
 167static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
 168{
 169	struct lan966x *lan966x = rx->lan966x;
 170	u32 mask;
 171
 172	/* When activating a channel, first is required to write the first DCB
 173	 * address and then to activate it
 174	 */
 175	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
 176	       FDMA_DCB_LLP(rx->channel_id));
 177	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
 178	       FDMA_DCB_LLP1(rx->channel_id));
 179
 180	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
 181	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 182	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 183	       FDMA_CH_CFG_CH_MEM_SET(1),
 184	       lan966x, FDMA_CH_CFG(rx->channel_id));
 185
 186	/* Start fdma */
 187	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
 188		FDMA_PORT_CTRL_XTR_STOP,
 189		lan966x, FDMA_PORT_CTRL(0));
 190
 191	/* Enable interrupts */
 192	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 193	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 194	mask |= BIT(rx->channel_id);
 195	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 196		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 197		lan966x, FDMA_INTR_DB_ENA);
 198
 199	/* Activate the channel */
 200	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
 201		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 202		lan966x, FDMA_CH_ACTIVATE);
 203}
 204
 205static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
 206{
 207	struct lan966x *lan966x = rx->lan966x;
 208	u32 val;
 209
 210	/* Disable the channel */
 211	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
 212		FDMA_CH_DISABLE_CH_DISABLE,
 213		lan966x, FDMA_CH_DISABLE);
 214
 215	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 216				  val, !(val & BIT(rx->channel_id)),
 217				  READL_SLEEP_US, READL_TIMEOUT_US);
 218
 219	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
 220		FDMA_CH_DB_DISCARD_DB_DISCARD,
 221		lan966x, FDMA_CH_DB_DISCARD);
 222}
 223
 224static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
 225{
 226	struct lan966x *lan966x = rx->lan966x;
 227
 228	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
 229		FDMA_CH_RELOAD_CH_RELOAD,
 230		lan966x, FDMA_CH_RELOAD);
 231}
 232
 233static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
 234				    struct lan966x_tx_dcb *dcb)
 235{
 236	dcb->nextptr = FDMA_DCB_INVALID_DATA;
 237	dcb->info = 0;
 238}
 239
 240static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
 241{
 242	struct lan966x *lan966x = tx->lan966x;
 243	struct lan966x_tx_dcb *dcb;
 244	struct lan966x_db *db;
 245	int size;
 246	int i, j;
 247
 248	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
 249			       GFP_KERNEL);
 250	if (!tx->dcbs_buf)
 251		return -ENOMEM;
 252
 253	/* calculate how many pages are needed to allocate the dcbs */
 254	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 255	size = ALIGN(size, PAGE_SIZE);
 256	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
 257	if (!tx->dcbs)
 258		goto out;
 259
 260	/* Now for each dcb allocate the db */
 261	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 262		dcb = &tx->dcbs[i];
 263
 264		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
 265			db = &dcb->db[j];
 266			db->dataptr = 0;
 267			db->status = 0;
 268		}
 269
 270		lan966x_fdma_tx_add_dcb(tx, dcb);
 271	}
 272
 273	return 0;
 274
 275out:
 276	kfree(tx->dcbs_buf);
 277	return -ENOMEM;
 278}
 279
 280static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
 281{
 282	struct lan966x *lan966x = tx->lan966x;
 283	int size;
 284
 285	kfree(tx->dcbs_buf);
 286
 287	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 288	size = ALIGN(size, PAGE_SIZE);
 289	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
 290}
 291
 292static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
 293{
 294	struct lan966x *lan966x = tx->lan966x;
 295	u32 mask;
 296
 297	/* When activating a channel, first is required to write the first DCB
 298	 * address and then to activate it
 299	 */
 300	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
 301	       FDMA_DCB_LLP(tx->channel_id));
 302	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
 303	       FDMA_DCB_LLP1(tx->channel_id));
 304
 305	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
 306	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 307	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 308	       FDMA_CH_CFG_CH_MEM_SET(1),
 309	       lan966x, FDMA_CH_CFG(tx->channel_id));
 310
 311	/* Start fdma */
 312	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
 313		FDMA_PORT_CTRL_INJ_STOP,
 314		lan966x, FDMA_PORT_CTRL(0));
 315
 316	/* Enable interrupts */
 317	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 318	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 319	mask |= BIT(tx->channel_id);
 320	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 321		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 322		lan966x, FDMA_INTR_DB_ENA);
 323
 324	/* Activate the channel */
 325	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
 326		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 327		lan966x, FDMA_CH_ACTIVATE);
 328}
 329
 330static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
 331{
 332	struct lan966x *lan966x = tx->lan966x;
 333	u32 val;
 334
 335	/* Disable the channel */
 336	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
 337		FDMA_CH_DISABLE_CH_DISABLE,
 338		lan966x, FDMA_CH_DISABLE);
 339
 340	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 341				  val, !(val & BIT(tx->channel_id)),
 342				  READL_SLEEP_US, READL_TIMEOUT_US);
 343
 344	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
 345		FDMA_CH_DB_DISCARD_DB_DISCARD,
 346		lan966x, FDMA_CH_DB_DISCARD);
 347
 348	tx->activated = false;
 349	tx->last_in_use = -1;
 350}
 351
 352static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
 353{
 354	struct lan966x *lan966x = tx->lan966x;
 355
 356	/* Write the registers to reload the channel */
 357	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
 358		FDMA_CH_RELOAD_CH_RELOAD,
 359		lan966x, FDMA_CH_RELOAD);
 360}
 361
 362static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
 363{
 364	struct lan966x_port *port;
 365	int i;
 366
 367	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 368		port = lan966x->ports[i];
 369		if (!port)
 370			continue;
 371
 372		if (netif_queue_stopped(port->dev))
 373			netif_wake_queue(port->dev);
 374	}
 375}
 376
 377static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
 378{
 379	struct lan966x_port *port;
 380	int i;
 381
 382	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 383		port = lan966x->ports[i];
 384		if (!port)
 385			continue;
 386
 387		netif_stop_queue(port->dev);
 388	}
 389}
 390
 391static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
 392{
 393	struct lan966x_tx *tx = &lan966x->tx;
 394	struct lan966x_rx *rx = &lan966x->rx;
 395	struct lan966x_tx_dcb_buf *dcb_buf;
 396	struct xdp_frame_bulk bq;
 397	struct lan966x_db *db;
 398	unsigned long flags;
 399	bool clear = false;
 400	int i;
 401
 402	xdp_frame_bulk_init(&bq);
 403
 404	spin_lock_irqsave(&lan966x->tx_lock, flags);
 405	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 406		dcb_buf = &tx->dcbs_buf[i];
 407
 408		if (!dcb_buf->used)
 409			continue;
 410
 411		db = &tx->dcbs[i].db[0];
 412		if (!(db->status & FDMA_DCB_STATUS_DONE))
 413			continue;
 414
 415		dcb_buf->dev->stats.tx_packets++;
 416		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
 417
 418		dcb_buf->used = false;
 419		if (dcb_buf->use_skb) {
 420			dma_unmap_single(lan966x->dev,
 421					 dcb_buf->dma_addr,
 422					 dcb_buf->len,
 423					 DMA_TO_DEVICE);
 424
 425			if (!dcb_buf->ptp)
 426				napi_consume_skb(dcb_buf->data.skb, weight);
 427		} else {
 428			if (dcb_buf->xdp_ndo)
 429				dma_unmap_single(lan966x->dev,
 430						 dcb_buf->dma_addr,
 431						 dcb_buf->len,
 432						 DMA_TO_DEVICE);
 433
 434			if (dcb_buf->xdp_ndo)
 435				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
 436			else
 437				page_pool_recycle_direct(rx->page_pool,
 438							 dcb_buf->data.page);
 439		}
 440
 441		clear = true;
 442	}
 443
 444	xdp_flush_frame_bulk(&bq);
 445
 446	if (clear)
 447		lan966x_fdma_wakeup_netdev(lan966x);
 448
 449	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
 450}
 451
 452static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
 453{
 454	struct lan966x_db *db;
 455
 456	/* Check if there is any data */
 457	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 458	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
 459		return false;
 460
 461	return true;
 462}
 463
 464static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
 465{
 466	struct lan966x *lan966x = rx->lan966x;
 467	struct lan966x_port *port;
 468	struct lan966x_db *db;
 469	struct page *page;
 470
 471	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 472	page = rx->page[rx->dcb_index][rx->db_index];
 473	if (unlikely(!page))
 474		return FDMA_ERROR;
 475
 476	dma_sync_single_for_cpu(lan966x->dev,
 477				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
 478				FDMA_DCB_STATUS_BLOCKL(db->status),
 479				DMA_FROM_DEVICE);
 480
 481	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
 482				 src_port);
 483	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
 484		return FDMA_ERROR;
 485
 486	port = lan966x->ports[*src_port];
 487	if (!lan966x_xdp_port_present(port))
 488		return FDMA_PASS;
 489
 490	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
 491}
 492
 493static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
 494						 u64 src_port)
 495{
 496	struct lan966x *lan966x = rx->lan966x;
 497	struct lan966x_db *db;
 498	struct sk_buff *skb;
 499	struct page *page;
 500	u64 timestamp;
 501
 502	/* Get the received frame and unmap it */
 503	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 504	page = rx->page[rx->dcb_index][rx->db_index];
 505
 506	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
 507	if (unlikely(!skb))
 508		goto free_page;
 509
 510	skb_mark_for_recycle(skb);
 511
 512	skb_reserve(skb, XDP_PACKET_HEADROOM);
 513	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
 514
 515	lan966x_ifh_get_timestamp(skb->data, &timestamp);
 516
 517	skb->dev = lan966x->ports[src_port]->dev;
 518	skb_pull(skb, IFH_LEN_BYTES);
 519
 520	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
 521		skb_trim(skb, skb->len - ETH_FCS_LEN);
 522
 523	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
 524	skb->protocol = eth_type_trans(skb, skb->dev);
 525
 526	if (lan966x->bridge_mask & BIT(src_port)) {
 527		skb->offload_fwd_mark = 1;
 528
 529		skb_reset_network_header(skb);
 530		if (!lan966x_hw_offload(lan966x, src_port, skb))
 531			skb->offload_fwd_mark = 0;
 532	}
 533
 534	skb->dev->stats.rx_bytes += skb->len;
 535	skb->dev->stats.rx_packets++;
 536
 537	return skb;
 538
 539free_page:
 540	page_pool_recycle_direct(rx->page_pool, page);
 541
 542	return NULL;
 543}
 544
 545static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
 546{
 547	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
 548	struct lan966x_rx *rx = &lan966x->rx;
 549	int dcb_reload = rx->dcb_index;
 550	struct lan966x_rx_dcb *old_dcb;
 551	struct lan966x_db *db;
 552	bool redirect = false;
 553	struct sk_buff *skb;
 554	struct page *page;
 555	int counter = 0;
 556	u64 src_port;
 557	u64 nextptr;
 558
 559	lan966x_fdma_tx_clear_buf(lan966x, weight);
 560
 561	/* Get all received skb */
 562	while (counter < weight) {
 563		if (!lan966x_fdma_rx_more_frames(rx))
 564			break;
 565
 566		counter++;
 567
 568		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
 569		case FDMA_PASS:
 570			break;
 571		case FDMA_ERROR:
 572			lan966x_fdma_rx_free_page(rx);
 573			lan966x_fdma_rx_advance_dcb(rx);
 574			goto allocate_new;
 575		case FDMA_REDIRECT:
 576			redirect = true;
 577			fallthrough;
 578		case FDMA_TX:
 579			lan966x_fdma_rx_advance_dcb(rx);
 580			continue;
 581		case FDMA_DROP:
 582			lan966x_fdma_rx_free_page(rx);
 583			lan966x_fdma_rx_advance_dcb(rx);
 584			continue;
 585		}
 586
 587		skb = lan966x_fdma_rx_get_frame(rx, src_port);
 588		lan966x_fdma_rx_advance_dcb(rx);
 589		if (!skb)
 590			goto allocate_new;
 591
 592		napi_gro_receive(&lan966x->napi, skb);
 593	}
 594
 595allocate_new:
 596	/* Allocate new pages and map them */
 597	while (dcb_reload != rx->dcb_index) {
 598		db = &rx->dcbs[dcb_reload].db[rx->db_index];
 599		page = lan966x_fdma_rx_alloc_page(rx, db);
 600		if (unlikely(!page))
 601			break;
 602		rx->page[dcb_reload][rx->db_index] = page;
 603
 604		old_dcb = &rx->dcbs[dcb_reload];
 605		dcb_reload++;
 606		dcb_reload &= FDMA_DCB_MAX - 1;
 607
 608		nextptr = rx->dma + ((unsigned long)old_dcb -
 609				     (unsigned long)rx->dcbs);
 610		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
 611		lan966x_fdma_rx_reload(rx);
 612	}
 613
 614	if (redirect)
 615		xdp_do_flush();
 616
 617	if (counter < weight && napi_complete_done(napi, counter))
 618		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
 619
 620	return counter;
 621}
 622
 623irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
 624{
 625	struct lan966x *lan966x = args;
 626	u32 db, err, err_type;
 627
 628	db = lan_rd(lan966x, FDMA_INTR_DB);
 629	err = lan_rd(lan966x, FDMA_INTR_ERR);
 630
 631	if (db) {
 632		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
 633		lan_wr(db, lan966x, FDMA_INTR_DB);
 634
 635		napi_schedule(&lan966x->napi);
 636	}
 637
 638	if (err) {
 639		err_type = lan_rd(lan966x, FDMA_ERRORS);
 640
 641		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
 642
 643		lan_wr(err, lan966x, FDMA_INTR_ERR);
 644		lan_wr(err_type, lan966x, FDMA_ERRORS);
 645	}
 646
 647	return IRQ_HANDLED;
 648}
 649
 650static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
 651{
 652	struct lan966x_tx_dcb_buf *dcb_buf;
 653	int i;
 654
 655	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 656		dcb_buf = &tx->dcbs_buf[i];
 657		if (!dcb_buf->used && i != tx->last_in_use)
 658			return i;
 659	}
 660
 661	return -1;
 662}
 663
 664static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
 665				      int next_to_use, int len,
 666				      dma_addr_t dma_addr)
 667{
 668	struct lan966x_tx_dcb *next_dcb;
 669	struct lan966x_db *next_db;
 670
 671	next_dcb = &tx->dcbs[next_to_use];
 672	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
 673
 674	next_db = &next_dcb->db[0];
 675	next_db->dataptr = dma_addr;
 676	next_db->status = FDMA_DCB_STATUS_SOF |
 677			  FDMA_DCB_STATUS_EOF |
 678			  FDMA_DCB_STATUS_INTR |
 679			  FDMA_DCB_STATUS_BLOCKO(0) |
 680			  FDMA_DCB_STATUS_BLOCKL(len);
 681}
 682
 683static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
 684{
 685	struct lan966x *lan966x = tx->lan966x;
 686	struct lan966x_tx_dcb *dcb;
 687
 688	if (likely(lan966x->tx.activated)) {
 689		/* Connect current dcb to the next db */
 690		dcb = &tx->dcbs[tx->last_in_use];
 691		dcb->nextptr = tx->dma + (next_to_use *
 692					  sizeof(struct lan966x_tx_dcb));
 693
 694		lan966x_fdma_tx_reload(tx);
 695	} else {
 696		/* Because it is first time, then just activate */
 697		lan966x->tx.activated = true;
 698		lan966x_fdma_tx_activate(tx);
 699	}
 700
 701	/* Move to next dcb because this last in use */
 702	tx->last_in_use = next_to_use;
 703}
 704
 705int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
 
 
 
 706{
 707	struct lan966x *lan966x = port->lan966x;
 708	struct lan966x_tx_dcb_buf *next_dcb_buf;
 709	struct lan966x_tx *tx = &lan966x->tx;
 710	struct xdp_frame *xdpf;
 711	dma_addr_t dma_addr;
 712	struct page *page;
 713	int next_to_use;
 714	__be32 *ifh;
 715	int ret = 0;
 716
 717	spin_lock(&lan966x->tx_lock);
 718
 719	/* Get next index */
 720	next_to_use = lan966x_fdma_get_next_dcb(tx);
 721	if (next_to_use < 0) {
 722		netif_stop_queue(port->dev);
 723		ret = NETDEV_TX_BUSY;
 724		goto out;
 725	}
 726
 727	/* Get the next buffer */
 728	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 729
 730	/* Generate new IFH */
 731	if (!len) {
 732		xdpf = ptr;
 733
 734		if (xdpf->headroom < IFH_LEN_BYTES) {
 735			ret = NETDEV_TX_OK;
 736			goto out;
 737		}
 738
 739		ifh = xdpf->data - IFH_LEN_BYTES;
 740		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 741		lan966x_ifh_set_bypass(ifh, 1);
 742		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 743
 744		dma_addr = dma_map_single(lan966x->dev,
 745					  xdpf->data - IFH_LEN_BYTES,
 746					  xdpf->len + IFH_LEN_BYTES,
 747					  DMA_TO_DEVICE);
 748		if (dma_mapping_error(lan966x->dev, dma_addr)) {
 749			ret = NETDEV_TX_OK;
 750			goto out;
 751		}
 752
 753		next_dcb_buf->data.xdpf = xdpf;
 754		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
 755
 756		/* Setup next dcb */
 757		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 758					  xdpf->len + IFH_LEN_BYTES,
 759					  dma_addr);
 760	} else {
 761		page = ptr;
 762
 763		ifh = page_address(page) + XDP_PACKET_HEADROOM;
 764		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 765		lan966x_ifh_set_bypass(ifh, 1);
 766		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 767
 768		dma_addr = page_pool_get_dma_addr(page);
 769		dma_sync_single_for_device(lan966x->dev,
 770					   dma_addr + XDP_PACKET_HEADROOM,
 771					   len + IFH_LEN_BYTES,
 772					   DMA_TO_DEVICE);
 773
 774		next_dcb_buf->data.page = page;
 775		next_dcb_buf->len = len + IFH_LEN_BYTES;
 776
 777		/* Setup next dcb */
 778		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 779					  len + IFH_LEN_BYTES,
 780					  dma_addr + XDP_PACKET_HEADROOM);
 781	}
 782
 783	/* Fill up the buffer */
 
 784	next_dcb_buf->use_skb = false;
 785	next_dcb_buf->xdp_ndo = !len;
 
 
 786	next_dcb_buf->dma_addr = dma_addr;
 787	next_dcb_buf->used = true;
 788	next_dcb_buf->ptp = false;
 789	next_dcb_buf->dev = port->dev;
 790
 791	/* Start the transmission */
 792	lan966x_fdma_tx_start(tx, next_to_use);
 793
 794out:
 795	spin_unlock(&lan966x->tx_lock);
 796
 797	return ret;
 798}
 799
 800int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
 801{
 802	struct lan966x_port *port = netdev_priv(dev);
 803	struct lan966x *lan966x = port->lan966x;
 804	struct lan966x_tx_dcb_buf *next_dcb_buf;
 805	struct lan966x_tx *tx = &lan966x->tx;
 806	int needed_headroom;
 807	int needed_tailroom;
 808	dma_addr_t dma_addr;
 809	int next_to_use;
 810	int err;
 811
 812	/* Get next index */
 813	next_to_use = lan966x_fdma_get_next_dcb(tx);
 814	if (next_to_use < 0) {
 815		netif_stop_queue(dev);
 816		return NETDEV_TX_BUSY;
 817	}
 818
 819	if (skb_put_padto(skb, ETH_ZLEN)) {
 820		dev->stats.tx_dropped++;
 821		return NETDEV_TX_OK;
 822	}
 823
 824	/* skb processing */
 825	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
 826	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
 827	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
 828		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
 829				       GFP_ATOMIC);
 830		if (unlikely(err)) {
 831			dev->stats.tx_dropped++;
 832			err = NETDEV_TX_OK;
 833			goto release;
 834		}
 835	}
 836
 837	skb_tx_timestamp(skb);
 838	skb_push(skb, IFH_LEN_BYTES);
 839	memcpy(skb->data, ifh, IFH_LEN_BYTES);
 840	skb_put(skb, 4);
 841
 842	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
 843				  DMA_TO_DEVICE);
 844	if (dma_mapping_error(lan966x->dev, dma_addr)) {
 845		dev->stats.tx_dropped++;
 846		err = NETDEV_TX_OK;
 847		goto release;
 848	}
 849
 850	/* Setup next dcb */
 851	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
 852
 853	/* Fill up the buffer */
 854	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 855	next_dcb_buf->use_skb = true;
 856	next_dcb_buf->data.skb = skb;
 857	next_dcb_buf->xdp_ndo = false;
 858	next_dcb_buf->len = skb->len;
 859	next_dcb_buf->dma_addr = dma_addr;
 860	next_dcb_buf->used = true;
 861	next_dcb_buf->ptp = false;
 862	next_dcb_buf->dev = dev;
 863
 864	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 865	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 866		next_dcb_buf->ptp = true;
 867
 868	/* Start the transmission */
 869	lan966x_fdma_tx_start(tx, next_to_use);
 870
 871	return NETDEV_TX_OK;
 872
 873release:
 874	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 875	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 876		lan966x_ptp_txtstamp_release(port, skb);
 877
 878	dev_kfree_skb_any(skb);
 879	return err;
 880}
 881
 882static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
 883{
 884	int max_mtu = 0;
 885	int i;
 886
 887	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 888		struct lan966x_port *port;
 889		int mtu;
 890
 891		port = lan966x->ports[i];
 892		if (!port)
 893			continue;
 894
 895		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
 896		if (mtu > max_mtu)
 897			max_mtu = mtu;
 898	}
 899
 900	return max_mtu;
 901}
 902
 903static int lan966x_qsys_sw_status(struct lan966x *lan966x)
 904{
 905	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
 906}
 907
 908static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
 909{
 910	struct page_pool *page_pool;
 911	dma_addr_t rx_dma;
 912	void *rx_dcbs;
 913	u32 size;
 914	int err;
 915
 916	/* Store these for later to free them */
 917	rx_dma = lan966x->rx.dma;
 918	rx_dcbs = lan966x->rx.dcbs;
 919	page_pool = lan966x->rx.page_pool;
 920
 921	napi_synchronize(&lan966x->napi);
 922	napi_disable(&lan966x->napi);
 923	lan966x_fdma_stop_netdev(lan966x);
 924
 925	lan966x_fdma_rx_disable(&lan966x->rx);
 926	lan966x_fdma_rx_free_pages(&lan966x->rx);
 927	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
 928	lan966x->rx.max_mtu = new_mtu;
 929	err = lan966x_fdma_rx_alloc(&lan966x->rx);
 930	if (err)
 931		goto restore;
 932	lan966x_fdma_rx_start(&lan966x->rx);
 933
 934	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 935	size = ALIGN(size, PAGE_SIZE);
 936	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
 937
 938	page_pool_destroy(page_pool);
 939
 940	lan966x_fdma_wakeup_netdev(lan966x);
 941	napi_enable(&lan966x->napi);
 942
 943	return err;
 944restore:
 945	lan966x->rx.page_pool = page_pool;
 946	lan966x->rx.dma = rx_dma;
 947	lan966x->rx.dcbs = rx_dcbs;
 948	lan966x_fdma_rx_start(&lan966x->rx);
 949
 950	return err;
 951}
 952
 953static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
 954{
 955	return lan966x_fdma_get_max_mtu(lan966x) +
 956	       IFH_LEN_BYTES +
 957	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 958	       VLAN_HLEN * 2 +
 959	       XDP_PACKET_HEADROOM;
 960}
 961
 962static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
 963{
 964	int err;
 965	u32 val;
 966
 967	/* Disable the CPU port */
 968	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
 969		QSYS_SW_PORT_MODE_PORT_ENA,
 970		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
 971
 972	/* Flush the CPU queues */
 973	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
 974			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
 975			   READL_SLEEP_US, READL_TIMEOUT_US);
 976
 977	/* Add a sleep in case there are frames between the queues and the CPU
 978	 * port
 979	 */
 980	usleep_range(1000, 2000);
 981
 982	err = lan966x_fdma_reload(lan966x, max_mtu);
 983
 984	/* Enable back the CPU port */
 985	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
 986		QSYS_SW_PORT_MODE_PORT_ENA,
 987		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
 988
 989	return err;
 990}
 991
 992int lan966x_fdma_change_mtu(struct lan966x *lan966x)
 993{
 994	int max_mtu;
 995
 996	max_mtu = lan966x_fdma_get_max_frame(lan966x);
 997	if (max_mtu == lan966x->rx.max_mtu)
 998		return 0;
 999
1000	return __lan966x_fdma_reload(lan966x, max_mtu);
1001}
1002
1003int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
1004{
1005	int max_mtu;
1006
1007	max_mtu = lan966x_fdma_get_max_frame(lan966x);
1008	return __lan966x_fdma_reload(lan966x, max_mtu);
1009}
1010
1011void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1012{
1013	if (lan966x->fdma_ndev)
1014		return;
1015
1016	lan966x->fdma_ndev = dev;
1017	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1018	napi_enable(&lan966x->napi);
1019}
1020
1021void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1022{
1023	if (lan966x->fdma_ndev == dev) {
1024		netif_napi_del(&lan966x->napi);
1025		lan966x->fdma_ndev = NULL;
1026	}
1027}
1028
1029int lan966x_fdma_init(struct lan966x *lan966x)
1030{
1031	int err;
1032
1033	if (!lan966x->fdma)
1034		return 0;
1035
1036	lan966x->rx.lan966x = lan966x;
1037	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
1038	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1039	lan966x->tx.lan966x = lan966x;
1040	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1041	lan966x->tx.last_in_use = -1;
1042
1043	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1044	if (err)
1045		return err;
1046
1047	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1048	if (err) {
1049		lan966x_fdma_rx_free(&lan966x->rx);
1050		return err;
1051	}
1052
1053	lan966x_fdma_rx_start(&lan966x->rx);
1054
1055	return 0;
1056}
1057
1058void lan966x_fdma_deinit(struct lan966x *lan966x)
1059{
1060	if (!lan966x->fdma)
1061		return;
1062
1063	lan966x_fdma_rx_disable(&lan966x->rx);
1064	lan966x_fdma_tx_disable(&lan966x->tx);
1065
1066	napi_synchronize(&lan966x->napi);
1067	napi_disable(&lan966x->napi);
1068
1069	lan966x_fdma_rx_free_pages(&lan966x->rx);
1070	lan966x_fdma_rx_free(&lan966x->rx);
1071	page_pool_destroy(lan966x->rx.page_pool);
1072	lan966x_fdma_tx_free(&lan966x->tx);
1073}
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/bpf.h>
   4#include <linux/filter.h>
 
   5
   6#include "lan966x_main.h"
   7
   8static int lan966x_fdma_channel_active(struct lan966x *lan966x)
   9{
  10	return lan_rd(lan966x, FDMA_CH_ACTIVE);
  11}
  12
  13static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
  14					       struct lan966x_db *db)
  15{
  16	struct page *page;
  17
  18	page = page_pool_dev_alloc_pages(rx->page_pool);
  19	if (unlikely(!page))
  20		return NULL;
  21
  22	db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
  23
  24	return page;
  25}
  26
  27static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
  28{
  29	int i, j;
  30
  31	for (i = 0; i < FDMA_DCB_MAX; ++i) {
  32		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
  33			page_pool_put_full_page(rx->page_pool,
  34						rx->page[i][j], false);
  35	}
  36}
  37
  38static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
  39{
  40	struct page *page;
  41
  42	page = rx->page[rx->dcb_index][rx->db_index];
  43	if (unlikely(!page))
  44		return;
  45
  46	page_pool_recycle_direct(rx->page_pool, page);
  47}
  48
  49static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
  50				    struct lan966x_rx_dcb *dcb,
  51				    u64 nextptr)
  52{
  53	struct lan966x_db *db;
  54	int i;
  55
  56	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
  57		db = &dcb->db[i];
  58		db->status = FDMA_DCB_STATUS_INTR;
  59	}
  60
  61	dcb->nextptr = FDMA_DCB_INVALID_DATA;
  62	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
  63
  64	rx->last_entry->nextptr = nextptr;
  65	rx->last_entry = dcb;
  66}
  67
  68static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
  69{
  70	struct lan966x *lan966x = rx->lan966x;
  71	struct page_pool_params pp_params = {
  72		.order = rx->page_order,
  73		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  74		.pool_size = FDMA_DCB_MAX,
  75		.nid = NUMA_NO_NODE,
  76		.dev = lan966x->dev,
  77		.dma_dir = DMA_FROM_DEVICE,
  78		.offset = XDP_PACKET_HEADROOM,
  79		.max_len = rx->max_mtu -
  80			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  81	};
  82
  83	if (lan966x_xdp_present(lan966x))
  84		pp_params.dma_dir = DMA_BIDIRECTIONAL;
  85
  86	rx->page_pool = page_pool_create(&pp_params);
  87
  88	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
  89		struct lan966x_port *port;
  90
  91		if (!lan966x->ports[i])
  92			continue;
  93
  94		port = lan966x->ports[i];
  95		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
  96		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
  97					   rx->page_pool);
  98	}
  99
 100	return PTR_ERR_OR_ZERO(rx->page_pool);
 101}
 102
 103static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
 104{
 105	struct lan966x *lan966x = rx->lan966x;
 106	struct lan966x_rx_dcb *dcb;
 107	struct lan966x_db *db;
 108	struct page *page;
 109	int i, j;
 110	int size;
 111
 112	if (lan966x_fdma_rx_alloc_page_pool(rx))
 113		return PTR_ERR(rx->page_pool);
 114
 115	/* calculate how many pages are needed to allocate the dcbs */
 116	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 117	size = ALIGN(size, PAGE_SIZE);
 118
 119	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
 120	if (!rx->dcbs)
 121		return -ENOMEM;
 122
 123	rx->last_entry = rx->dcbs;
 124	rx->db_index = 0;
 125	rx->dcb_index = 0;
 126
 127	/* Now for each dcb allocate the dbs */
 128	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 129		dcb = &rx->dcbs[i];
 130		dcb->info = 0;
 131
 132		/* For each db allocate a page and map it to the DB dataptr. */
 133		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
 134			db = &dcb->db[j];
 135			page = lan966x_fdma_rx_alloc_page(rx, db);
 136			if (!page)
 137				return -ENOMEM;
 138
 139			db->status = 0;
 140			rx->page[i][j] = page;
 141		}
 142
 143		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
 144	}
 145
 146	return 0;
 147}
 148
 149static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
 150{
 151	rx->dcb_index++;
 152	rx->dcb_index &= FDMA_DCB_MAX - 1;
 153}
 154
 155static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
 156{
 157	struct lan966x *lan966x = rx->lan966x;
 158	u32 size;
 159
 160	/* Now it is possible to do the cleanup of dcb */
 161	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 162	size = ALIGN(size, PAGE_SIZE);
 163	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
 164}
 165
 166static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
 167{
 168	struct lan966x *lan966x = rx->lan966x;
 169	u32 mask;
 170
 171	/* When activating a channel, first is required to write the first DCB
 172	 * address and then to activate it
 173	 */
 174	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
 175	       FDMA_DCB_LLP(rx->channel_id));
 176	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
 177	       FDMA_DCB_LLP1(rx->channel_id));
 178
 179	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
 180	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 181	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 182	       FDMA_CH_CFG_CH_MEM_SET(1),
 183	       lan966x, FDMA_CH_CFG(rx->channel_id));
 184
 185	/* Start fdma */
 186	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
 187		FDMA_PORT_CTRL_XTR_STOP,
 188		lan966x, FDMA_PORT_CTRL(0));
 189
 190	/* Enable interrupts */
 191	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 192	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 193	mask |= BIT(rx->channel_id);
 194	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 195		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 196		lan966x, FDMA_INTR_DB_ENA);
 197
 198	/* Activate the channel */
 199	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
 200		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 201		lan966x, FDMA_CH_ACTIVATE);
 202}
 203
 204static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
 205{
 206	struct lan966x *lan966x = rx->lan966x;
 207	u32 val;
 208
 209	/* Disable the channel */
 210	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
 211		FDMA_CH_DISABLE_CH_DISABLE,
 212		lan966x, FDMA_CH_DISABLE);
 213
 214	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 215				  val, !(val & BIT(rx->channel_id)),
 216				  READL_SLEEP_US, READL_TIMEOUT_US);
 217
 218	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
 219		FDMA_CH_DB_DISCARD_DB_DISCARD,
 220		lan966x, FDMA_CH_DB_DISCARD);
 221}
 222
 223static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
 224{
 225	struct lan966x *lan966x = rx->lan966x;
 226
 227	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
 228		FDMA_CH_RELOAD_CH_RELOAD,
 229		lan966x, FDMA_CH_RELOAD);
 230}
 231
 232static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
 233				    struct lan966x_tx_dcb *dcb)
 234{
 235	dcb->nextptr = FDMA_DCB_INVALID_DATA;
 236	dcb->info = 0;
 237}
 238
 239static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
 240{
 241	struct lan966x *lan966x = tx->lan966x;
 242	struct lan966x_tx_dcb *dcb;
 243	struct lan966x_db *db;
 244	int size;
 245	int i, j;
 246
 247	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
 248			       GFP_KERNEL);
 249	if (!tx->dcbs_buf)
 250		return -ENOMEM;
 251
 252	/* calculate how many pages are needed to allocate the dcbs */
 253	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 254	size = ALIGN(size, PAGE_SIZE);
 255	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
 256	if (!tx->dcbs)
 257		goto out;
 258
 259	/* Now for each dcb allocate the db */
 260	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 261		dcb = &tx->dcbs[i];
 262
 263		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
 264			db = &dcb->db[j];
 265			db->dataptr = 0;
 266			db->status = 0;
 267		}
 268
 269		lan966x_fdma_tx_add_dcb(tx, dcb);
 270	}
 271
 272	return 0;
 273
 274out:
 275	kfree(tx->dcbs_buf);
 276	return -ENOMEM;
 277}
 278
 279static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
 280{
 281	struct lan966x *lan966x = tx->lan966x;
 282	int size;
 283
 284	kfree(tx->dcbs_buf);
 285
 286	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
 287	size = ALIGN(size, PAGE_SIZE);
 288	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
 289}
 290
 291static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
 292{
 293	struct lan966x *lan966x = tx->lan966x;
 294	u32 mask;
 295
 296	/* When activating a channel, first is required to write the first DCB
 297	 * address and then to activate it
 298	 */
 299	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
 300	       FDMA_DCB_LLP(tx->channel_id));
 301	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
 302	       FDMA_DCB_LLP1(tx->channel_id));
 303
 304	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
 305	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
 306	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
 307	       FDMA_CH_CFG_CH_MEM_SET(1),
 308	       lan966x, FDMA_CH_CFG(tx->channel_id));
 309
 310	/* Start fdma */
 311	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
 312		FDMA_PORT_CTRL_INJ_STOP,
 313		lan966x, FDMA_PORT_CTRL(0));
 314
 315	/* Enable interrupts */
 316	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
 317	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
 318	mask |= BIT(tx->channel_id);
 319	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
 320		FDMA_INTR_DB_ENA_INTR_DB_ENA,
 321		lan966x, FDMA_INTR_DB_ENA);
 322
 323	/* Activate the channel */
 324	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
 325		FDMA_CH_ACTIVATE_CH_ACTIVATE,
 326		lan966x, FDMA_CH_ACTIVATE);
 327}
 328
 329static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
 330{
 331	struct lan966x *lan966x = tx->lan966x;
 332	u32 val;
 333
 334	/* Disable the channel */
 335	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
 336		FDMA_CH_DISABLE_CH_DISABLE,
 337		lan966x, FDMA_CH_DISABLE);
 338
 339	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
 340				  val, !(val & BIT(tx->channel_id)),
 341				  READL_SLEEP_US, READL_TIMEOUT_US);
 342
 343	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
 344		FDMA_CH_DB_DISCARD_DB_DISCARD,
 345		lan966x, FDMA_CH_DB_DISCARD);
 346
 347	tx->activated = false;
 348	tx->last_in_use = -1;
 349}
 350
 351static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
 352{
 353	struct lan966x *lan966x = tx->lan966x;
 354
 355	/* Write the registers to reload the channel */
 356	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
 357		FDMA_CH_RELOAD_CH_RELOAD,
 358		lan966x, FDMA_CH_RELOAD);
 359}
 360
 361static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
 362{
 363	struct lan966x_port *port;
 364	int i;
 365
 366	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 367		port = lan966x->ports[i];
 368		if (!port)
 369			continue;
 370
 371		if (netif_queue_stopped(port->dev))
 372			netif_wake_queue(port->dev);
 373	}
 374}
 375
 376static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
 377{
 378	struct lan966x_port *port;
 379	int i;
 380
 381	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 382		port = lan966x->ports[i];
 383		if (!port)
 384			continue;
 385
 386		netif_stop_queue(port->dev);
 387	}
 388}
 389
 390static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
 391{
 392	struct lan966x_tx *tx = &lan966x->tx;
 
 393	struct lan966x_tx_dcb_buf *dcb_buf;
 394	struct xdp_frame_bulk bq;
 395	struct lan966x_db *db;
 396	unsigned long flags;
 397	bool clear = false;
 398	int i;
 399
 400	xdp_frame_bulk_init(&bq);
 401
 402	spin_lock_irqsave(&lan966x->tx_lock, flags);
 403	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 404		dcb_buf = &tx->dcbs_buf[i];
 405
 406		if (!dcb_buf->used)
 407			continue;
 408
 409		db = &tx->dcbs[i].db[0];
 410		if (!(db->status & FDMA_DCB_STATUS_DONE))
 411			continue;
 412
 413		dcb_buf->dev->stats.tx_packets++;
 414		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
 415
 416		dcb_buf->used = false;
 417		if (dcb_buf->use_skb) {
 418			dma_unmap_single(lan966x->dev,
 419					 dcb_buf->dma_addr,
 420					 dcb_buf->len,
 421					 DMA_TO_DEVICE);
 422
 423			if (!dcb_buf->ptp)
 424				napi_consume_skb(dcb_buf->data.skb, weight);
 425		} else {
 426			if (dcb_buf->xdp_ndo)
 427				dma_unmap_single(lan966x->dev,
 428						 dcb_buf->dma_addr,
 429						 dcb_buf->len,
 430						 DMA_TO_DEVICE);
 431
 432			if (dcb_buf->xdp_ndo)
 433				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
 434			else
 435				xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
 
 436		}
 437
 438		clear = true;
 439	}
 440
 441	xdp_flush_frame_bulk(&bq);
 442
 443	if (clear)
 444		lan966x_fdma_wakeup_netdev(lan966x);
 445
 446	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
 447}
 448
 449static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
 450{
 451	struct lan966x_db *db;
 452
 453	/* Check if there is any data */
 454	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 455	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
 456		return false;
 457
 458	return true;
 459}
 460
 461static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
 462{
 463	struct lan966x *lan966x = rx->lan966x;
 464	struct lan966x_port *port;
 465	struct lan966x_db *db;
 466	struct page *page;
 467
 468	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 469	page = rx->page[rx->dcb_index][rx->db_index];
 470	if (unlikely(!page))
 471		return FDMA_ERROR;
 472
 473	dma_sync_single_for_cpu(lan966x->dev,
 474				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
 475				FDMA_DCB_STATUS_BLOCKL(db->status),
 476				DMA_FROM_DEVICE);
 477
 478	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
 479				 src_port);
 480	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
 481		return FDMA_ERROR;
 482
 483	port = lan966x->ports[*src_port];
 484	if (!lan966x_xdp_port_present(port))
 485		return FDMA_PASS;
 486
 487	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
 488}
 489
 490static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
 491						 u64 src_port)
 492{
 493	struct lan966x *lan966x = rx->lan966x;
 494	struct lan966x_db *db;
 495	struct sk_buff *skb;
 496	struct page *page;
 497	u64 timestamp;
 498
 499	/* Get the received frame and unmap it */
 500	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
 501	page = rx->page[rx->dcb_index][rx->db_index];
 502
 503	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
 504	if (unlikely(!skb))
 505		goto free_page;
 506
 507	skb_mark_for_recycle(skb);
 508
 509	skb_reserve(skb, XDP_PACKET_HEADROOM);
 510	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
 511
 512	lan966x_ifh_get_timestamp(skb->data, &timestamp);
 513
 514	skb->dev = lan966x->ports[src_port]->dev;
 515	skb_pull(skb, IFH_LEN_BYTES);
 516
 517	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
 518		skb_trim(skb, skb->len - ETH_FCS_LEN);
 519
 520	lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
 521	skb->protocol = eth_type_trans(skb, skb->dev);
 522
 523	if (lan966x->bridge_mask & BIT(src_port)) {
 524		skb->offload_fwd_mark = 1;
 525
 526		skb_reset_network_header(skb);
 527		if (!lan966x_hw_offload(lan966x, src_port, skb))
 528			skb->offload_fwd_mark = 0;
 529	}
 530
 531	skb->dev->stats.rx_bytes += skb->len;
 532	skb->dev->stats.rx_packets++;
 533
 534	return skb;
 535
 536free_page:
 537	page_pool_recycle_direct(rx->page_pool, page);
 538
 539	return NULL;
 540}
 541
 542static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
 543{
 544	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
 545	struct lan966x_rx *rx = &lan966x->rx;
 546	int dcb_reload = rx->dcb_index;
 547	struct lan966x_rx_dcb *old_dcb;
 548	struct lan966x_db *db;
 549	bool redirect = false;
 550	struct sk_buff *skb;
 551	struct page *page;
 552	int counter = 0;
 553	u64 src_port;
 554	u64 nextptr;
 555
 556	lan966x_fdma_tx_clear_buf(lan966x, weight);
 557
 558	/* Get all received skb */
 559	while (counter < weight) {
 560		if (!lan966x_fdma_rx_more_frames(rx))
 561			break;
 562
 563		counter++;
 564
 565		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
 566		case FDMA_PASS:
 567			break;
 568		case FDMA_ERROR:
 569			lan966x_fdma_rx_free_page(rx);
 570			lan966x_fdma_rx_advance_dcb(rx);
 571			goto allocate_new;
 572		case FDMA_REDIRECT:
 573			redirect = true;
 574			fallthrough;
 575		case FDMA_TX:
 576			lan966x_fdma_rx_advance_dcb(rx);
 577			continue;
 578		case FDMA_DROP:
 579			lan966x_fdma_rx_free_page(rx);
 580			lan966x_fdma_rx_advance_dcb(rx);
 581			continue;
 582		}
 583
 584		skb = lan966x_fdma_rx_get_frame(rx, src_port);
 585		lan966x_fdma_rx_advance_dcb(rx);
 586		if (!skb)
 587			goto allocate_new;
 588
 589		napi_gro_receive(&lan966x->napi, skb);
 590	}
 591
 592allocate_new:
 593	/* Allocate new pages and map them */
 594	while (dcb_reload != rx->dcb_index) {
 595		db = &rx->dcbs[dcb_reload].db[rx->db_index];
 596		page = lan966x_fdma_rx_alloc_page(rx, db);
 597		if (unlikely(!page))
 598			break;
 599		rx->page[dcb_reload][rx->db_index] = page;
 600
 601		old_dcb = &rx->dcbs[dcb_reload];
 602		dcb_reload++;
 603		dcb_reload &= FDMA_DCB_MAX - 1;
 604
 605		nextptr = rx->dma + ((unsigned long)old_dcb -
 606				     (unsigned long)rx->dcbs);
 607		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
 608		lan966x_fdma_rx_reload(rx);
 609	}
 610
 611	if (redirect)
 612		xdp_do_flush();
 613
 614	if (counter < weight && napi_complete_done(napi, counter))
 615		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
 616
 617	return counter;
 618}
 619
 620irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
 621{
 622	struct lan966x *lan966x = args;
 623	u32 db, err, err_type;
 624
 625	db = lan_rd(lan966x, FDMA_INTR_DB);
 626	err = lan_rd(lan966x, FDMA_INTR_ERR);
 627
 628	if (db) {
 629		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
 630		lan_wr(db, lan966x, FDMA_INTR_DB);
 631
 632		napi_schedule(&lan966x->napi);
 633	}
 634
 635	if (err) {
 636		err_type = lan_rd(lan966x, FDMA_ERRORS);
 637
 638		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
 639
 640		lan_wr(err, lan966x, FDMA_INTR_ERR);
 641		lan_wr(err_type, lan966x, FDMA_ERRORS);
 642	}
 643
 644	return IRQ_HANDLED;
 645}
 646
 647static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
 648{
 649	struct lan966x_tx_dcb_buf *dcb_buf;
 650	int i;
 651
 652	for (i = 0; i < FDMA_DCB_MAX; ++i) {
 653		dcb_buf = &tx->dcbs_buf[i];
 654		if (!dcb_buf->used && i != tx->last_in_use)
 655			return i;
 656	}
 657
 658	return -1;
 659}
 660
 661static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
 662				      int next_to_use, int len,
 663				      dma_addr_t dma_addr)
 664{
 665	struct lan966x_tx_dcb *next_dcb;
 666	struct lan966x_db *next_db;
 667
 668	next_dcb = &tx->dcbs[next_to_use];
 669	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
 670
 671	next_db = &next_dcb->db[0];
 672	next_db->dataptr = dma_addr;
 673	next_db->status = FDMA_DCB_STATUS_SOF |
 674			  FDMA_DCB_STATUS_EOF |
 675			  FDMA_DCB_STATUS_INTR |
 676			  FDMA_DCB_STATUS_BLOCKO(0) |
 677			  FDMA_DCB_STATUS_BLOCKL(len);
 678}
 679
 680static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
 681{
 682	struct lan966x *lan966x = tx->lan966x;
 683	struct lan966x_tx_dcb *dcb;
 684
 685	if (likely(lan966x->tx.activated)) {
 686		/* Connect current dcb to the next db */
 687		dcb = &tx->dcbs[tx->last_in_use];
 688		dcb->nextptr = tx->dma + (next_to_use *
 689					  sizeof(struct lan966x_tx_dcb));
 690
 691		lan966x_fdma_tx_reload(tx);
 692	} else {
 693		/* Because it is first time, then just activate */
 694		lan966x->tx.activated = true;
 695		lan966x_fdma_tx_activate(tx);
 696	}
 697
 698	/* Move to next dcb because this last in use */
 699	tx->last_in_use = next_to_use;
 700}
 701
 702int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
 703			   struct xdp_frame *xdpf,
 704			   struct page *page,
 705			   bool dma_map)
 706{
 707	struct lan966x *lan966x = port->lan966x;
 708	struct lan966x_tx_dcb_buf *next_dcb_buf;
 709	struct lan966x_tx *tx = &lan966x->tx;
 
 710	dma_addr_t dma_addr;
 
 711	int next_to_use;
 712	__be32 *ifh;
 713	int ret = 0;
 714
 715	spin_lock(&lan966x->tx_lock);
 716
 717	/* Get next index */
 718	next_to_use = lan966x_fdma_get_next_dcb(tx);
 719	if (next_to_use < 0) {
 720		netif_stop_queue(port->dev);
 721		ret = NETDEV_TX_BUSY;
 722		goto out;
 723	}
 724
 
 
 
 725	/* Generate new IFH */
 726	if (dma_map) {
 
 
 727		if (xdpf->headroom < IFH_LEN_BYTES) {
 728			ret = NETDEV_TX_OK;
 729			goto out;
 730		}
 731
 732		ifh = xdpf->data - IFH_LEN_BYTES;
 733		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 734		lan966x_ifh_set_bypass(ifh, 1);
 735		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 736
 737		dma_addr = dma_map_single(lan966x->dev,
 738					  xdpf->data - IFH_LEN_BYTES,
 739					  xdpf->len + IFH_LEN_BYTES,
 740					  DMA_TO_DEVICE);
 741		if (dma_mapping_error(lan966x->dev, dma_addr)) {
 742			ret = NETDEV_TX_OK;
 743			goto out;
 744		}
 745
 
 
 
 746		/* Setup next dcb */
 747		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 748					  xdpf->len + IFH_LEN_BYTES,
 749					  dma_addr);
 750	} else {
 
 
 751		ifh = page_address(page) + XDP_PACKET_HEADROOM;
 752		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 753		lan966x_ifh_set_bypass(ifh, 1);
 754		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 755
 756		dma_addr = page_pool_get_dma_addr(page);
 757		dma_sync_single_for_device(lan966x->dev,
 758					   dma_addr + XDP_PACKET_HEADROOM,
 759					   xdpf->len + IFH_LEN_BYTES,
 760					   DMA_TO_DEVICE);
 761
 
 
 
 762		/* Setup next dcb */
 763		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
 764					  xdpf->len + IFH_LEN_BYTES,
 765					  dma_addr + XDP_PACKET_HEADROOM);
 766	}
 767
 768	/* Fill up the buffer */
 769	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 770	next_dcb_buf->use_skb = false;
 771	next_dcb_buf->data.xdpf = xdpf;
 772	next_dcb_buf->xdp_ndo = dma_map;
 773	next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
 774	next_dcb_buf->dma_addr = dma_addr;
 775	next_dcb_buf->used = true;
 776	next_dcb_buf->ptp = false;
 777	next_dcb_buf->dev = port->dev;
 778
 779	/* Start the transmission */
 780	lan966x_fdma_tx_start(tx, next_to_use);
 781
 782out:
 783	spin_unlock(&lan966x->tx_lock);
 784
 785	return ret;
 786}
 787
 788int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
 789{
 790	struct lan966x_port *port = netdev_priv(dev);
 791	struct lan966x *lan966x = port->lan966x;
 792	struct lan966x_tx_dcb_buf *next_dcb_buf;
 793	struct lan966x_tx *tx = &lan966x->tx;
 794	int needed_headroom;
 795	int needed_tailroom;
 796	dma_addr_t dma_addr;
 797	int next_to_use;
 798	int err;
 799
 800	/* Get next index */
 801	next_to_use = lan966x_fdma_get_next_dcb(tx);
 802	if (next_to_use < 0) {
 803		netif_stop_queue(dev);
 804		return NETDEV_TX_BUSY;
 805	}
 806
 807	if (skb_put_padto(skb, ETH_ZLEN)) {
 808		dev->stats.tx_dropped++;
 809		return NETDEV_TX_OK;
 810	}
 811
 812	/* skb processing */
 813	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
 814	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
 815	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
 816		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
 817				       GFP_ATOMIC);
 818		if (unlikely(err)) {
 819			dev->stats.tx_dropped++;
 820			err = NETDEV_TX_OK;
 821			goto release;
 822		}
 823	}
 824
 825	skb_tx_timestamp(skb);
 826	skb_push(skb, IFH_LEN_BYTES);
 827	memcpy(skb->data, ifh, IFH_LEN_BYTES);
 828	skb_put(skb, 4);
 829
 830	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
 831				  DMA_TO_DEVICE);
 832	if (dma_mapping_error(lan966x->dev, dma_addr)) {
 833		dev->stats.tx_dropped++;
 834		err = NETDEV_TX_OK;
 835		goto release;
 836	}
 837
 838	/* Setup next dcb */
 839	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
 840
 841	/* Fill up the buffer */
 842	next_dcb_buf = &tx->dcbs_buf[next_to_use];
 843	next_dcb_buf->use_skb = true;
 844	next_dcb_buf->data.skb = skb;
 845	next_dcb_buf->xdp_ndo = false;
 846	next_dcb_buf->len = skb->len;
 847	next_dcb_buf->dma_addr = dma_addr;
 848	next_dcb_buf->used = true;
 849	next_dcb_buf->ptp = false;
 850	next_dcb_buf->dev = dev;
 851
 852	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 853	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 854		next_dcb_buf->ptp = true;
 855
 856	/* Start the transmission */
 857	lan966x_fdma_tx_start(tx, next_to_use);
 858
 859	return NETDEV_TX_OK;
 860
 861release:
 862	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 863	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
 864		lan966x_ptp_txtstamp_release(port, skb);
 865
 866	dev_kfree_skb_any(skb);
 867	return err;
 868}
 869
 870static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
 871{
 872	int max_mtu = 0;
 873	int i;
 874
 875	for (i = 0; i < lan966x->num_phys_ports; ++i) {
 876		struct lan966x_port *port;
 877		int mtu;
 878
 879		port = lan966x->ports[i];
 880		if (!port)
 881			continue;
 882
 883		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
 884		if (mtu > max_mtu)
 885			max_mtu = mtu;
 886	}
 887
 888	return max_mtu;
 889}
 890
 891static int lan966x_qsys_sw_status(struct lan966x *lan966x)
 892{
 893	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
 894}
 895
 896static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
 897{
 898	struct page_pool *page_pool;
 899	dma_addr_t rx_dma;
 900	void *rx_dcbs;
 901	u32 size;
 902	int err;
 903
 904	/* Store these for later to free them */
 905	rx_dma = lan966x->rx.dma;
 906	rx_dcbs = lan966x->rx.dcbs;
 907	page_pool = lan966x->rx.page_pool;
 908
 909	napi_synchronize(&lan966x->napi);
 910	napi_disable(&lan966x->napi);
 911	lan966x_fdma_stop_netdev(lan966x);
 912
 913	lan966x_fdma_rx_disable(&lan966x->rx);
 914	lan966x_fdma_rx_free_pages(&lan966x->rx);
 915	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
 916	lan966x->rx.max_mtu = new_mtu;
 917	err = lan966x_fdma_rx_alloc(&lan966x->rx);
 918	if (err)
 919		goto restore;
 920	lan966x_fdma_rx_start(&lan966x->rx);
 921
 922	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
 923	size = ALIGN(size, PAGE_SIZE);
 924	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
 925
 926	page_pool_destroy(page_pool);
 927
 928	lan966x_fdma_wakeup_netdev(lan966x);
 929	napi_enable(&lan966x->napi);
 930
 931	return err;
 932restore:
 933	lan966x->rx.page_pool = page_pool;
 934	lan966x->rx.dma = rx_dma;
 935	lan966x->rx.dcbs = rx_dcbs;
 936	lan966x_fdma_rx_start(&lan966x->rx);
 937
 938	return err;
 939}
 940
 941static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
 942{
 943	return lan966x_fdma_get_max_mtu(lan966x) +
 944	       IFH_LEN_BYTES +
 945	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 946	       VLAN_HLEN * 2 +
 947	       XDP_PACKET_HEADROOM;
 948}
 949
 950static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
 951{
 952	int err;
 953	u32 val;
 954
 955	/* Disable the CPU port */
 956	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
 957		QSYS_SW_PORT_MODE_PORT_ENA,
 958		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
 959
 960	/* Flush the CPU queues */
 961	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
 962			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
 963			   READL_SLEEP_US, READL_TIMEOUT_US);
 964
 965	/* Add a sleep in case there are frames between the queues and the CPU
 966	 * port
 967	 */
 968	usleep_range(1000, 2000);
 969
 970	err = lan966x_fdma_reload(lan966x, max_mtu);
 971
 972	/* Enable back the CPU port */
 973	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
 974		QSYS_SW_PORT_MODE_PORT_ENA,
 975		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
 976
 977	return err;
 978}
 979
 980int lan966x_fdma_change_mtu(struct lan966x *lan966x)
 981{
 982	int max_mtu;
 983
 984	max_mtu = lan966x_fdma_get_max_frame(lan966x);
 985	if (max_mtu == lan966x->rx.max_mtu)
 986		return 0;
 987
 988	return __lan966x_fdma_reload(lan966x, max_mtu);
 989}
 990
 991int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
 992{
 993	int max_mtu;
 994
 995	max_mtu = lan966x_fdma_get_max_frame(lan966x);
 996	return __lan966x_fdma_reload(lan966x, max_mtu);
 997}
 998
 999void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1000{
1001	if (lan966x->fdma_ndev)
1002		return;
1003
1004	lan966x->fdma_ndev = dev;
1005	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1006	napi_enable(&lan966x->napi);
1007}
1008
1009void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1010{
1011	if (lan966x->fdma_ndev == dev) {
1012		netif_napi_del(&lan966x->napi);
1013		lan966x->fdma_ndev = NULL;
1014	}
1015}
1016
1017int lan966x_fdma_init(struct lan966x *lan966x)
1018{
1019	int err;
1020
1021	if (!lan966x->fdma)
1022		return 0;
1023
1024	lan966x->rx.lan966x = lan966x;
1025	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
1026	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1027	lan966x->tx.lan966x = lan966x;
1028	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1029	lan966x->tx.last_in_use = -1;
1030
1031	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1032	if (err)
1033		return err;
1034
1035	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1036	if (err) {
1037		lan966x_fdma_rx_free(&lan966x->rx);
1038		return err;
1039	}
1040
1041	lan966x_fdma_rx_start(&lan966x->rx);
1042
1043	return 0;
1044}
1045
1046void lan966x_fdma_deinit(struct lan966x *lan966x)
1047{
1048	if (!lan966x->fdma)
1049		return;
1050
1051	lan966x_fdma_rx_disable(&lan966x->rx);
1052	lan966x_fdma_tx_disable(&lan966x->tx);
1053
1054	napi_synchronize(&lan966x->napi);
1055	napi_disable(&lan966x->napi);
1056
1057	lan966x_fdma_rx_free_pages(&lan966x->rx);
1058	lan966x_fdma_rx_free(&lan966x->rx);
1059	page_pool_destroy(lan966x->rx.page_pool);
1060	lan966x_fdma_tx_free(&lan966x->tx);
1061}