Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
   3 *
   4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
   5 *
   6 */
   7
   8#include <linux/bpf_trace.h>
   9#include <linux/clk.h>
  10#include <linux/etherdevice.h>
  11#include <linux/if_vlan.h>
  12#include <linux/interrupt.h>
  13#include <linux/irqdomain.h>
  14#include <linux/kernel.h>
  15#include <linux/kmemleak.h>
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
  18#include <linux/net_tstamp.h>
  19#include <linux/of.h>
  20#include <linux/of_mdio.h>
  21#include <linux/of_net.h>
  22#include <linux/of_device.h>
  23#include <linux/of_platform.h>
  24#include <linux/phylink.h>
  25#include <linux/phy/phy.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/regmap.h>
  29#include <linux/rtnetlink.h>
  30#include <linux/mfd/syscon.h>
  31#include <linux/sys_soc.h>
  32#include <linux/dma/ti-cppi5.h>
  33#include <linux/dma/k3-udma-glue.h>
  34#include <net/page_pool/helpers.h>
  35#include <net/switchdev.h>
  36
  37#include "cpsw_ale.h"
  38#include "cpsw_sl.h"
  39#include "am65-cpsw-nuss.h"
  40#include "am65-cpsw-switchdev.h"
  41#include "k3-cppi-desc-pool.h"
  42#include "am65-cpts.h"
  43
  44#define AM65_CPSW_SS_BASE	0x0
  45#define AM65_CPSW_SGMII_BASE	0x100
  46#define AM65_CPSW_XGMII_BASE	0x2100
  47#define AM65_CPSW_CPSW_NU_BASE	0x20000
  48#define AM65_CPSW_NU_PORTS_BASE	0x1000
  49#define AM65_CPSW_NU_FRAM_BASE	0x12000
  50#define AM65_CPSW_NU_STATS_BASE	0x1a000
  51#define AM65_CPSW_NU_ALE_BASE	0x1e000
  52#define AM65_CPSW_NU_CPTS_BASE	0x1d000
  53
  54#define AM65_CPSW_NU_PORTS_OFFSET	0x1000
  55#define AM65_CPSW_NU_STATS_PORT_OFFSET	0x200
  56#define AM65_CPSW_NU_FRAM_PORT_OFFSET	0x200
  57
  58#define AM65_CPSW_MAX_PORTS	8
  59
  60#define AM65_CPSW_MIN_PACKET_SIZE	VLAN_ETH_ZLEN
  61#define AM65_CPSW_MAX_PACKET_SIZE	2024
  62
  63#define AM65_CPSW_REG_CTL		0x004
  64#define AM65_CPSW_REG_STAT_PORT_EN	0x014
  65#define AM65_CPSW_REG_PTYPE		0x018
  66
  67#define AM65_CPSW_P0_REG_CTL			0x004
  68#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET	0x008
  69
  70#define AM65_CPSW_PORT_REG_PRI_CTL		0x01c
  71#define AM65_CPSW_PORT_REG_RX_PRI_MAP		0x020
  72#define AM65_CPSW_PORT_REG_RX_MAXLEN		0x024
  73
  74#define AM65_CPSW_PORTN_REG_CTL			0x004
  75#define AM65_CPSW_PORTN_REG_DSCP_MAP		0x120
  76#define AM65_CPSW_PORTN_REG_SA_L		0x308
  77#define AM65_CPSW_PORTN_REG_SA_H		0x30c
  78#define AM65_CPSW_PORTN_REG_TS_CTL              0x310
  79#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG	0x314
  80#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG	0x318
  81#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2       0x31C
  82
  83#define AM65_CPSW_SGMII_CONTROL_REG		0x010
  84#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG	0x018
  85#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE	BIT(0)
  86
  87#define AM65_CPSW_CTL_VLAN_AWARE		BIT(1)
  88#define AM65_CPSW_CTL_P0_ENABLE			BIT(2)
  89#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE		BIT(13)
  90#define AM65_CPSW_CTL_P0_RX_PAD			BIT(14)
  91
  92/* AM65_CPSW_P0_REG_CTL */
  93#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN	BIT(0)
  94#define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN	BIT(16)
  95
  96/* AM65_CPSW_PORT_REG_PRI_CTL */
  97#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN	BIT(8)
  98
  99/* AM65_CPSW_PN_REG_CTL */
 100#define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN	BIT(1)
 101#define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN	BIT(2)
 102
 103/* AM65_CPSW_PN_TS_CTL register fields */
 104#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN		BIT(4)
 105#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN	BIT(5)
 106#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN	BIT(6)
 107#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN		BIT(7)
 108#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN		BIT(10)
 109#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN	BIT(11)
 110#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT	16
 111
 112#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN		BIT(0)
 113#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN	BIT(1)
 114#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN	BIT(2)
 115#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN		BIT(3)
 116#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN		BIT(9)
 117
 118/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
 119#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT	16
 120
 121/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
 122#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107	BIT(16)
 123#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129	BIT(17)
 124#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130	BIT(18)
 125#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131	BIT(19)
 126#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132	BIT(20)
 127#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319	BIT(21)
 128#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320	BIT(22)
 129#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
 130
 131/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
 132#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
 133
 134#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
 135
 136#define AM65_CPSW_TS_TX_ANX_ALL_EN		\
 137	(AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN |	\
 138	 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN |	\
 139	 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
 140
 141#define AM65_CPSW_TS_RX_ANX_ALL_EN		\
 142	(AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN |	\
 143	 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN |	\
 144	 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
 145
 146#define AM65_CPSW_ALE_AGEOUT_DEFAULT	30
 147/* Number of TX/RX descriptors per channel/flow */
 148#define AM65_CPSW_MAX_TX_DESC	500
 149#define AM65_CPSW_MAX_RX_DESC	500
 150
 151#define AM65_CPSW_NAV_PS_DATA_SIZE 16
 152#define AM65_CPSW_NAV_SW_DATA_SIZE 16
 153
 154#define AM65_CPSW_DEBUG	(NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
 155			 NETIF_MSG_IFUP	| NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
 156			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 157
 158#define AM65_CPSW_DEFAULT_TX_CHNS	8
 159#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS	1
 160
 161/* CPPI streaming packet interface */
 162#define AM65_CPSW_CPPI_TX_FLOW_ID  0x3FFF
 163#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
 164
 165/* XDP */
 166#define AM65_CPSW_XDP_CONSUMED BIT(1)
 167#define AM65_CPSW_XDP_REDIRECT BIT(0)
 168#define AM65_CPSW_XDP_PASS     0
 169
 170/* Include headroom compatible with both skb and xdpf */
 171#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
 172#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
 173
 174static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
 175				      const u8 *dev_addr)
 176{
 177	u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
 178		     (dev_addr[2] << 16) | (dev_addr[3] << 24);
 179	u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
 180
 181	writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
 182	writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
 183}
 184
 185#define AM65_CPSW_DSCP_MAX	GENMASK(5, 0)
 186#define AM65_CPSW_PRI_MAX	GENMASK(2, 0)
 187#define AM65_CPSW_DSCP_PRI_PER_REG	8
 188#define AM65_CPSW_DSCP_PRI_SIZE		4	/* in bits */
 189static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri)
 190{
 191	int reg_ofs;
 192	int bit_ofs;
 193	u32 val;
 194
 195	if (dscp > AM65_CPSW_DSCP_MAX)
 196		return -EINVAL;
 197
 198	if (pri > AM65_CPSW_PRI_MAX)
 199		return -EINVAL;
 200
 201	/* 32-bit register offset to this dscp */
 202	reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4;
 203	/* bit field offset to this dscp */
 204	bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG);
 205
 206	val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
 207	val &= ~(AM65_CPSW_PRI_MAX << bit_ofs);	/* clear */
 208	val |= pri << bit_ofs;			/* set */
 209	writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
 210
 211	return 0;
 212}
 213
 214static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave)
 215{
 216	int dscp, pri;
 217	u32 val;
 218
 219	/* Default DSCP to User Priority mapping as per:
 220	 * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3
 221	 * and
 222	 * https://datatracker.ietf.org/doc/html/rfc8622#section-11
 223	 */
 224	for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) {
 225		switch (dscp) {
 226		case 56:	/* CS7 */
 227		case 48:	/* CS6 */
 228			pri = 7;
 229			break;
 230		case 46:	/* EF */
 231		case 44:	/* VA */
 232			pri = 6;
 233			break;
 234		case 40:	/* CS5 */
 235			pri = 5;
 236			break;
 237		case 34:	/* AF41 */
 238		case 36:	/* AF42 */
 239		case 38:	/* AF43 */
 240		case 32:	/* CS4 */
 241		case 26:	/* AF31 */
 242		case 28:	/* AF32 */
 243		case 30:	/* AF33 */
 244		case 24:	/* CS3 */
 245			pri = 4;
 246			break;
 247		case 18:	/* AF21 */
 248		case 20:	/* AF22 */
 249		case 22:	/* AF23 */
 250			pri = 3;
 251			break;
 252		case 16:	/* CS2 */
 253		case 10:	/* AF11 */
 254		case 12:	/* AF12 */
 255		case 14:	/* AF13 */
 256		case 0:		/* DF */
 257			pri = 0;
 258			break;
 259		case 8:		/* CS1 */
 260		case 1:		/* LE */
 261			pri = 1;
 262			break;
 263		default:
 264			pri = 0;
 265			break;
 266		}
 267
 268		am65_cpsw_port_set_dscp_map(slave, dscp, pri);
 269	}
 270
 271	/* enable port IPV4 and IPV6 DSCP for this port */
 272	val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL);
 273	val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN |
 274		AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN;
 275	writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL);
 276}
 277
 278static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
 279{
 280	cpsw_sl_reset(port->slave.mac_sl, 100);
 281	/* Max length register has to be restored after MAC SL reset */
 282	writel(AM65_CPSW_MAX_PACKET_SIZE,
 283	       port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
 284}
 285
 286static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
 287{
 288	common->nuss_ver = readl(common->ss_base);
 289	common->cpsw_ver = readl(common->cpsw_base);
 290	dev_info(common->dev,
 291		 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
 292		common->nuss_ver,
 293		common->cpsw_ver,
 294		common->port_num + 1,
 295		common->pdata.quirks);
 296}
 297
 298static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
 299					    __be16 proto, u16 vid)
 300{
 301	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 302	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 303	u32 port_mask, unreg_mcast = 0;
 304	int ret;
 305
 306	if (!common->is_emac_mode)
 307		return 0;
 308
 309	if (!netif_running(ndev) || !vid)
 310		return 0;
 311
 312	ret = pm_runtime_resume_and_get(common->dev);
 313	if (ret < 0)
 314		return ret;
 315
 316	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
 317	if (!vid)
 318		unreg_mcast = port_mask;
 319	dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
 320	ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
 321				       unreg_mcast, port_mask, 0);
 322
 323	pm_runtime_put(common->dev);
 324	return ret;
 325}
 326
 327static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
 328					     __be16 proto, u16 vid)
 329{
 330	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 331	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 332	int ret;
 333
 334	if (!common->is_emac_mode)
 335		return 0;
 336
 337	if (!netif_running(ndev) || !vid)
 338		return 0;
 339
 340	ret = pm_runtime_resume_and_get(common->dev);
 341	if (ret < 0)
 342		return ret;
 343
 344	dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
 345	ret = cpsw_ale_del_vlan(common->ale, vid,
 346				BIT(port->port_id) | ALE_PORT_HOST);
 347
 348	pm_runtime_put(common->dev);
 349	return ret;
 350}
 351
 352static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
 353					bool promisc)
 354{
 355	struct am65_cpsw_common *common = port->common;
 356
 357	if (promisc && !common->is_emac_mode) {
 358		dev_dbg(common->dev, "promisc mode requested in switch mode");
 359		return;
 360	}
 361
 362	if (promisc) {
 363		/* Enable promiscuous mode */
 364		cpsw_ale_control_set(common->ale, port->port_id,
 365				     ALE_PORT_MACONLY_CAF, 1);
 366		dev_dbg(common->dev, "promisc enabled\n");
 367	} else {
 368		/* Disable promiscuous mode */
 369		cpsw_ale_control_set(common->ale, port->port_id,
 370				     ALE_PORT_MACONLY_CAF, 0);
 371		dev_dbg(common->dev, "promisc disabled\n");
 372	}
 373}
 374
 375static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
 376{
 377	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 378	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 379	u32 port_mask;
 380	bool promisc;
 381
 382	promisc = !!(ndev->flags & IFF_PROMISC);
 383	am65_cpsw_slave_set_promisc(port, promisc);
 384
 385	if (promisc)
 386		return;
 387
 388	/* Restore allmulti on vlans if necessary */
 389	cpsw_ale_set_allmulti(common->ale,
 390			      ndev->flags & IFF_ALLMULTI, port->port_id);
 391
 392	port_mask = ALE_PORT_HOST;
 393	/* Clear all mcast from ALE */
 394	cpsw_ale_flush_multicast(common->ale, port_mask, -1);
 395
 396	if (!netdev_mc_empty(ndev)) {
 397		struct netdev_hw_addr *ha;
 398
 399		/* program multicast address list into ALE register */
 400		netdev_for_each_mc_addr(ha, ndev) {
 401			cpsw_ale_add_mcast(common->ale, ha->addr,
 402					   port_mask, 0, 0, 0);
 403		}
 404	}
 405}
 406
 407static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
 408					       unsigned int txqueue)
 409{
 410	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 411	struct am65_cpsw_tx_chn *tx_chn;
 412	struct netdev_queue *netif_txq;
 413	unsigned long trans_start;
 414
 415	netif_txq = netdev_get_tx_queue(ndev, txqueue);
 416	tx_chn = &common->tx_chns[txqueue];
 417	trans_start = READ_ONCE(netif_txq->trans_start);
 418
 419	netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
 420		   txqueue,
 421		   netif_tx_queue_stopped(netif_txq),
 422		   jiffies_to_msecs(jiffies - trans_start),
 423		   netdev_queue_dql_avail(netif_txq),
 424		   k3_cppi_desc_pool_avail(tx_chn->desc_pool));
 425
 426	if (netif_tx_queue_stopped(netif_txq)) {
 427		/* try recover if stopped by us */
 428		txq_trans_update(netif_txq);
 429		netif_tx_wake_queue(netif_txq);
 430	}
 431}
 432
 433static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
 434				  struct page *page, u32 flow_idx)
 435{
 436	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
 437	struct cppi5_host_desc_t *desc_rx;
 438	struct device *dev = common->dev;
 439	struct am65_cpsw_swdata *swdata;
 440	dma_addr_t desc_dma;
 441	dma_addr_t buf_dma;
 442
 443	desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
 444	if (!desc_rx) {
 445		dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
 446		return -ENOMEM;
 447	}
 448	desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
 449
 450	buf_dma = dma_map_single(rx_chn->dma_dev,
 451				 page_address(page) + AM65_CPSW_HEADROOM,
 452				 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
 453	if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
 454		k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
 455		dev_err(dev, "Failed to map rx buffer\n");
 456		return -EINVAL;
 457	}
 458
 459	cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
 460			 AM65_CPSW_NAV_PS_DATA_SIZE);
 461	k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
 462	cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
 463			       buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
 464	swdata = cppi5_hdesc_get_swdata(desc_rx);
 465	swdata->page = page;
 466	swdata->flow_id = flow_idx;
 467
 468	return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
 469					desc_rx, desc_dma);
 470}
 471
 472void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
 473{
 474	struct am65_cpsw_host *host_p = am65_common_get_host(common);
 475	u32 val, pri_map;
 476
 477	/* P0 set Receive Priority Type */
 478	val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
 479
 480	if (common->pf_p0_rx_ptype_rrobin) {
 481		val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
 482		/* Enet Ports fifos works in fixed priority mode only, so
 483		 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
 484		 */
 485		pri_map = 0x0;
 486	} else {
 487		val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
 488		/* restore P0_Rx_Pri_Map */
 489		pri_map = 0x76543210;
 490	}
 491
 492	writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
 493	writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
 494}
 495
 496static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
 497static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
 498static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
 499static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
 500
 501static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
 502{
 503	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
 504	struct am65_cpsw_rx_flow *flow;
 505	struct xdp_rxq_info *rxq;
 506	int id, port;
 507
 508	for (id = 0; id < common->rx_ch_num_flows; id++) {
 509		flow = &rx_chn->flows[id];
 510
 511		for (port = 0; port < common->port_num; port++) {
 512			if (!common->ports[port].ndev)
 513				continue;
 514
 515			rxq = &common->ports[port].xdp_rxq[id];
 516
 517			if (xdp_rxq_info_is_reg(rxq))
 518				xdp_rxq_info_unreg(rxq);
 519		}
 520
 521		if (flow->page_pool) {
 522			page_pool_destroy(flow->page_pool);
 523			flow->page_pool = NULL;
 524		}
 525	}
 526}
 527
 528static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
 529{
 530	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
 531	struct page_pool_params pp_params = {
 532		.flags = PP_FLAG_DMA_MAP,
 533		.order = 0,
 534		.pool_size = AM65_CPSW_MAX_RX_DESC,
 535		.nid = dev_to_node(common->dev),
 536		.dev = common->dev,
 537		.dma_dir = DMA_BIDIRECTIONAL,
 538		/* .napi set dynamically */
 539	};
 540	struct am65_cpsw_rx_flow *flow;
 541	struct xdp_rxq_info *rxq;
 542	struct page_pool *pool;
 543	int id, port, ret;
 544
 545	for (id = 0; id < common->rx_ch_num_flows; id++) {
 546		flow = &rx_chn->flows[id];
 547		pp_params.napi = &flow->napi_rx;
 548		pool = page_pool_create(&pp_params);
 549		if (IS_ERR(pool)) {
 550			ret = PTR_ERR(pool);
 551			goto err;
 552		}
 553
 554		flow->page_pool = pool;
 555
 556		/* using same page pool is allowed as no running rx handlers
 557		 * simultaneously for both ndevs
 558		 */
 559		for (port = 0; port < common->port_num; port++) {
 560			if (!common->ports[port].ndev)
 561				continue;
 562
 563			rxq = &common->ports[port].xdp_rxq[id];
 564
 565			ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
 566					       id, flow->napi_rx.napi_id);
 567			if (ret)
 568				goto err;
 569
 570			ret = xdp_rxq_info_reg_mem_model(rxq,
 571							 MEM_TYPE_PAGE_POOL,
 572							 pool);
 573			if (ret)
 574				goto err;
 575		}
 576	}
 577
 578	return 0;
 579
 580err:
 581	am65_cpsw_destroy_xdp_rxqs(common);
 582	return ret;
 583}
 584
 585static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
 586				   void *desc,
 587				   unsigned char dsize_log2)
 588{
 589	void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
 590
 591	return (desc - pool_addr) >> dsize_log2;
 592}
 593
 594static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
 595					struct cppi5_host_desc_t *desc,
 596					enum am65_cpsw_tx_buf_type buf_type)
 597{
 598	int desc_idx;
 599
 600	desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
 601					   tx_chn->dsize_log2);
 602	k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
 603					(void *)buf_type);
 604}
 605
 606static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
 607							  dma_addr_t desc_dma)
 608{
 609	struct cppi5_host_desc_t *desc_tx;
 610	int desc_idx;
 611
 612	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
 613	desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
 614					   tx_chn->dsize_log2);
 615
 616	return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
 617								       desc_idx);
 618}
 619
 620static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
 621				      struct page *page,
 622				      bool allow_direct)
 623{
 624	page_pool_put_full_page(flow->page_pool, page, allow_direct);
 625}
 626
 627static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
 628{
 629	struct am65_cpsw_rx_chn *rx_chn = data;
 630	struct cppi5_host_desc_t *desc_rx;
 631	struct am65_cpsw_swdata *swdata;
 632	dma_addr_t buf_dma;
 633	struct page *page;
 634	u32 buf_dma_len;
 635	u32 flow_id;
 636
 637	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
 638	swdata = cppi5_hdesc_get_swdata(desc_rx);
 639	page = swdata->page;
 640	flow_id = swdata->flow_id;
 641	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
 642	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
 643	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
 644	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
 645
 646	am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
 647}
 648
 649static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
 650				     struct cppi5_host_desc_t *desc)
 651{
 652	struct cppi5_host_desc_t *first_desc, *next_desc;
 653	dma_addr_t buf_dma, next_desc_dma;
 654	u32 buf_dma_len;
 655
 656	first_desc = desc;
 657	next_desc = first_desc;
 658
 659	cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
 660	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
 661
 662	dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
 663
 664	next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
 665	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
 666	while (next_desc_dma) {
 667		next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
 668						       next_desc_dma);
 669		cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
 670		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
 671
 672		dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
 673			       DMA_TO_DEVICE);
 674
 675		next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
 676		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
 677
 678		k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
 679	}
 680
 681	k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
 682}
 683
 684static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
 685{
 686	struct am65_cpsw_tx_chn *tx_chn = data;
 687	enum am65_cpsw_tx_buf_type buf_type;
 688	struct cppi5_host_desc_t *desc_tx;
 689	struct xdp_frame *xdpf;
 690	struct sk_buff *skb;
 691	void **swdata;
 692
 693	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
 694	swdata = cppi5_hdesc_get_swdata(desc_tx);
 695	buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
 696	if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
 697		skb = *(swdata);
 698		dev_kfree_skb_any(skb);
 699	} else {
 700		xdpf = *(swdata);
 701		xdp_return_frame(xdpf);
 702	}
 703
 704	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
 705}
 706
 707static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
 708					   struct net_device *ndev,
 709					   unsigned int len,
 710					   unsigned int headroom)
 711{
 712	struct sk_buff *skb;
 713
 714	len += AM65_CPSW_HEADROOM;
 715
 716	skb = build_skb(page_addr, len);
 717	if (unlikely(!skb))
 718		return NULL;
 719
 720	skb_reserve(skb, headroom);
 721	skb->dev = ndev;
 722
 723	return skb;
 724}
 725
 726static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
 727{
 728	struct am65_cpsw_host *host_p = am65_common_get_host(common);
 729	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
 730	struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
 731	int port_idx, i, ret, tx, flow_idx;
 732	struct am65_cpsw_rx_flow *flow;
 733	u32 val, port_mask;
 734	struct page *page;
 735
 736	if (common->usage_count)
 737		return 0;
 738
 739	/* Control register */
 740	writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
 741	       AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
 742	       common->cpsw_base + AM65_CPSW_REG_CTL);
 743	/* Max length register */
 744	writel(AM65_CPSW_MAX_PACKET_SIZE,
 745	       host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
 746	/* set base flow_id */
 747	writel(common->rx_flow_id_base,
 748	       host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
 749	writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN,
 750	       host_p->port_base + AM65_CPSW_P0_REG_CTL);
 751
 752	am65_cpsw_nuss_set_p0_ptype(common);
 753
 754	/* enable statistic */
 755	val = BIT(HOST_PORT_NUM);
 756	for (port_idx = 0; port_idx < common->port_num; port_idx++) {
 757		struct am65_cpsw_port *port = &common->ports[port_idx];
 758
 759		if (!port->disabled)
 760			val |=  BIT(port->port_id);
 761	}
 762	writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
 763
 764	/* disable priority elevation */
 765	writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
 766
 767	cpsw_ale_start(common->ale);
 768
 769	/* limit to one RX flow only */
 770	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
 771			     ALE_DEFAULT_THREAD_ID, 0);
 772	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
 773			     ALE_DEFAULT_THREAD_ENABLE, 1);
 774	/* switch to vlan unaware mode */
 775	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
 776	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
 777			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 778
 779	/* default vlan cfg: create mask based on enabled ports */
 780	port_mask = GENMASK(common->port_num, 0) &
 781		    ~common->disabled_ports_mask;
 782
 783	cpsw_ale_add_vlan(common->ale, 0, port_mask,
 784			  port_mask, port_mask,
 785			  port_mask & ~ALE_PORT_HOST);
 786
 787	if (common->is_emac_mode)
 788		am65_cpsw_init_host_port_emac(common);
 789	else
 790		am65_cpsw_init_host_port_switch(common);
 791
 792	am65_cpsw_qos_tx_p0_rate_init(common);
 793
 794	ret = am65_cpsw_create_xdp_rxqs(common);
 795	if (ret) {
 796		dev_err(common->dev, "Failed to create XDP rx queues\n");
 797		return ret;
 798	}
 799
 800	for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
 801		flow = &rx_chn->flows[flow_idx];
 802		for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
 803			page = page_pool_dev_alloc_pages(flow->page_pool);
 804			if (!page) {
 805				dev_err(common->dev, "cannot allocate page in flow %d\n",
 806					flow_idx);
 807				ret = -ENOMEM;
 808				goto fail_rx;
 809			}
 810
 811			ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
 812			if (ret < 0) {
 813				dev_err(common->dev,
 814					"cannot submit page to rx channel flow %d, error %d\n",
 815					flow_idx, ret);
 816				am65_cpsw_put_page(flow, page, false);
 817				goto fail_rx;
 818			}
 819		}
 820	}
 821
 822	ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
 823	if (ret) {
 824		dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
 825		goto fail_rx;
 826	}
 827
 828	for (i = 0; i < common->rx_ch_num_flows ; i++) {
 829		napi_enable(&rx_chn->flows[i].napi_rx);
 830		if (rx_chn->flows[i].irq_disabled) {
 831			rx_chn->flows[i].irq_disabled = false;
 832			enable_irq(rx_chn->flows[i].irq);
 833		}
 834	}
 835
 836	for (tx = 0; tx < common->tx_ch_num; tx++) {
 837		ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
 838		if (ret) {
 839			dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
 840				tx, ret);
 841			tx--;
 842			goto fail_tx;
 843		}
 844		napi_enable(&tx_chn[tx].napi_tx);
 845	}
 846
 847	dev_dbg(common->dev, "cpsw_nuss started\n");
 848	return 0;
 849
 850fail_tx:
 851	while (tx >= 0) {
 852		napi_disable(&tx_chn[tx].napi_tx);
 853		k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
 854		tx--;
 855	}
 856
 857	for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
 858		flow = &rx_chn->flows[flow_idx];
 859		if (!flow->irq_disabled) {
 860			disable_irq(flow->irq);
 861			flow->irq_disabled = true;
 862		}
 863		napi_disable(&flow->napi_rx);
 864	}
 865
 866	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
 867
 868fail_rx:
 869	for (i = 0; i < common->rx_ch_num_flows; i++)
 870		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
 871					  am65_cpsw_nuss_rx_cleanup, !!i);
 872
 873	am65_cpsw_destroy_xdp_rxqs(common);
 874
 875	return ret;
 876}
 877
 878static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
 879{
 880	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
 881	struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
 882	int i;
 883
 884	if (common->usage_count != 1)
 885		return 0;
 886
 887	cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
 888			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
 889
 890	/* shutdown tx channels */
 891	atomic_set(&common->tdown_cnt, common->tx_ch_num);
 892	/* ensure new tdown_cnt value is visible */
 893	smp_mb__after_atomic();
 894	reinit_completion(&common->tdown_complete);
 895
 896	for (i = 0; i < common->tx_ch_num; i++)
 897		k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
 898
 899	i = wait_for_completion_timeout(&common->tdown_complete,
 900					msecs_to_jiffies(1000));
 901	if (!i)
 902		dev_err(common->dev, "tx timeout\n");
 903	for (i = 0; i < common->tx_ch_num; i++) {
 904		napi_disable(&tx_chn[i].napi_tx);
 905		hrtimer_cancel(&tx_chn[i].tx_hrtimer);
 906	}
 907
 908	for (i = 0; i < common->tx_ch_num; i++) {
 909		k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
 910					  am65_cpsw_nuss_tx_cleanup);
 911		k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
 912	}
 913
 914	reinit_completion(&common->tdown_complete);
 915	k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
 916
 917	if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
 918		i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
 919		if (!i)
 920			dev_err(common->dev, "rx teardown timeout\n");
 921	}
 922
 923	for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
 924		napi_disable(&rx_chn->flows[i].napi_rx);
 925		hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
 926		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
 927					  am65_cpsw_nuss_rx_cleanup, !!i);
 928	}
 929
 930	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
 931
 932	cpsw_ale_stop(common->ale);
 933
 934	writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
 935	writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
 936
 937	am65_cpsw_destroy_xdp_rxqs(common);
 938
 939	dev_dbg(common->dev, "cpsw_nuss stopped\n");
 940	return 0;
 941}
 942
 943static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
 944{
 945	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 946	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 947	int ret;
 948
 949	phylink_stop(port->slave.phylink);
 950
 951	netif_tx_stop_all_queues(ndev);
 952
 953	phylink_disconnect_phy(port->slave.phylink);
 954
 955	ret = am65_cpsw_nuss_common_stop(common);
 956	if (ret)
 957		return ret;
 958
 959	common->usage_count--;
 960	pm_runtime_put(common->dev);
 961	return 0;
 962}
 963
 964static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
 965{
 966	struct am65_cpsw_port *port = arg;
 967
 968	if (!vdev)
 969		return 0;
 970
 971	return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
 972}
 973
 974static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
 975{
 976	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
 977	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
 978	int ret, i;
 979	u32 reg;
 980
 981	ret = pm_runtime_resume_and_get(common->dev);
 982	if (ret < 0)
 983		return ret;
 984
 985	/* Idle MAC port */
 986	cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
 987	cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
 988	cpsw_sl_ctl_reset(port->slave.mac_sl);
 989
 990	/* soft reset MAC */
 991	cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
 992	mdelay(1);
 993	reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
 994	if (reg) {
 995		dev_err(common->dev, "soft RESET didn't complete\n");
 996		ret = -ETIMEDOUT;
 997		goto runtime_put;
 998	}
 999
1000	/* Notify the stack of the actual queue counts. */
1001	ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
1002	if (ret) {
1003		dev_err(common->dev, "cannot set real number of tx queues\n");
1004		goto runtime_put;
1005	}
1006
1007	ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
1008	if (ret) {
1009		dev_err(common->dev, "cannot set real number of rx queues\n");
1010		goto runtime_put;
1011	}
1012
1013	for (i = 0; i < common->tx_ch_num; i++) {
1014		struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
1015
1016		netdev_tx_reset_queue(txq);
1017		txq->tx_maxrate =  common->tx_chns[i].rate_mbps;
1018	}
1019
1020	ret = am65_cpsw_nuss_common_open(common);
1021	if (ret)
1022		goto runtime_put;
1023
1024	common->usage_count++;
1025
1026	am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
1027	am65_cpsw_port_enable_dscp_map(port);
1028
1029	if (common->is_emac_mode)
1030		am65_cpsw_init_port_emac_ale(port);
1031	else
1032		am65_cpsw_init_port_switch_ale(port);
1033
1034	/* mac_sl should be configured via phy-link interface */
1035	am65_cpsw_sl_ctl_reset(port);
1036
1037	ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0);
1038	if (ret)
1039		goto error_cleanup;
1040
1041	/* restore vlan configurations */
1042	vlan_for_each(ndev, cpsw_restore_vlans, port);
1043
1044	phylink_start(port->slave.phylink);
1045
1046	return 0;
1047
1048error_cleanup:
1049	am65_cpsw_nuss_ndo_slave_stop(ndev);
1050	return ret;
1051
1052runtime_put:
1053	pm_runtime_put(common->dev);
1054	return ret;
1055}
1056
1057static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
1058				  struct am65_cpsw_tx_chn *tx_chn,
1059				  struct xdp_frame *xdpf,
1060				  enum am65_cpsw_tx_buf_type buf_type)
1061{
1062	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1063	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1064	struct cppi5_host_desc_t *host_desc;
1065	struct netdev_queue *netif_txq;
1066	dma_addr_t dma_desc, dma_buf;
1067	u32 pkt_len = xdpf->len;
1068	void **swdata;
1069	int ret;
1070
1071	host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1072	if (unlikely(!host_desc)) {
1073		ndev->stats.tx_dropped++;
1074		return AM65_CPSW_XDP_CONSUMED;	/* drop */
1075	}
1076
1077	am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
1078
1079	dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
1080				 pkt_len, DMA_TO_DEVICE);
1081	if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
1082		ndev->stats.tx_dropped++;
1083		ret = AM65_CPSW_XDP_CONSUMED;	/* drop */
1084		goto pool_free;
1085	}
1086
1087	cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1088			 AM65_CPSW_NAV_PS_DATA_SIZE);
1089	cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
1090	cppi5_hdesc_set_pktlen(host_desc, pkt_len);
1091	cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
1092	cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
1093
1094	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
1095	cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
1096
1097	swdata = cppi5_hdesc_get_swdata(host_desc);
1098	*(swdata) = xdpf;
1099
1100	/* Report BQL before sending the packet */
1101	netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1102	netdev_tx_sent_queue(netif_txq, pkt_len);
1103
1104	dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
1105	if (AM65_CPSW_IS_CPSW2G(common)) {
1106		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
1107					       dma_desc);
1108	} else {
1109		spin_lock_bh(&tx_chn->lock);
1110		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
1111					       dma_desc);
1112		spin_unlock_bh(&tx_chn->lock);
1113	}
1114	if (ret) {
1115		/* Inform BQL */
1116		netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1117		ndev->stats.tx_errors++;
1118		ret = AM65_CPSW_XDP_CONSUMED; /* drop */
1119		goto dma_unmap;
1120	}
1121
1122	return 0;
1123
1124dma_unmap:
1125	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
1126	dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
1127pool_free:
1128	k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
1129	return ret;
1130}
1131
1132static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
1133			     struct am65_cpsw_port *port,
1134			     struct xdp_buff *xdp,
1135			     int cpu, int *len)
1136{
1137	struct am65_cpsw_common *common = flow->common;
1138	struct net_device *ndev = port->ndev;
1139	int ret = AM65_CPSW_XDP_CONSUMED;
1140	struct am65_cpsw_tx_chn *tx_chn;
1141	struct netdev_queue *netif_txq;
1142	struct xdp_frame *xdpf;
1143	struct bpf_prog *prog;
1144	struct page *page;
1145	int pkt_len;
1146	u32 act;
1147	int err;
1148
1149	pkt_len = *len;
1150	prog = READ_ONCE(port->xdp_prog);
1151	if (!prog)
1152		return AM65_CPSW_XDP_PASS;
1153
1154	act = bpf_prog_run_xdp(prog, xdp);
1155	/* XDP prog might have changed packet data and boundaries */
1156	*len = xdp->data_end - xdp->data;
1157
1158	switch (act) {
1159	case XDP_PASS:
1160		ret = AM65_CPSW_XDP_PASS;
1161		goto out;
1162	case XDP_TX:
1163		tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
1164		netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1165
1166		xdpf = xdp_convert_buff_to_frame(xdp);
1167		if (unlikely(!xdpf)) {
1168			ndev->stats.tx_dropped++;
1169			goto drop;
1170		}
1171
1172		__netif_tx_lock(netif_txq, cpu);
1173		err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
1174					     AM65_CPSW_TX_BUF_TYPE_XDP_TX);
1175		__netif_tx_unlock(netif_txq);
1176		if (err)
1177			goto drop;
1178
1179		dev_sw_netstats_rx_add(ndev, pkt_len);
1180		ret = AM65_CPSW_XDP_CONSUMED;
1181		goto out;
1182	case XDP_REDIRECT:
1183		if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
1184			goto drop;
1185
1186		dev_sw_netstats_rx_add(ndev, pkt_len);
1187		ret = AM65_CPSW_XDP_REDIRECT;
1188		goto out;
1189	default:
1190		bpf_warn_invalid_xdp_action(ndev, prog, act);
1191		fallthrough;
1192	case XDP_ABORTED:
1193drop:
1194		trace_xdp_exception(ndev, prog, act);
1195		fallthrough;
1196	case XDP_DROP:
1197		ndev->stats.rx_dropped++;
1198	}
1199
1200	page = virt_to_head_page(xdp->data);
1201	am65_cpsw_put_page(flow, page, true);
1202
1203out:
1204	return ret;
1205}
1206
1207/* RX psdata[2] word format - checksum information */
1208#define AM65_CPSW_RX_PSD_CSUM_ADD	GENMASK(15, 0)
1209#define AM65_CPSW_RX_PSD_CSUM_ERR	BIT(16)
1210#define AM65_CPSW_RX_PSD_IS_FRAGMENT	BIT(17)
1211#define AM65_CPSW_RX_PSD_IS_TCP		BIT(18)
1212#define AM65_CPSW_RX_PSD_IPV6_VALID	BIT(19)
1213#define AM65_CPSW_RX_PSD_IPV4_VALID	BIT(20)
1214
1215static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
1216{
1217	/* HW can verify IPv4/IPv6 TCP/UDP packets checksum
1218	 * csum information provides in psdata[2] word:
1219	 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
1220	 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
1221	 * bits - indicates IPv4/IPv6 packet
1222	 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
1223	 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
1224	 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
1225	 */
1226	skb_checksum_none_assert(skb);
1227
1228	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
1229		return;
1230
1231	if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
1232			  AM65_CPSW_RX_PSD_IPV4_VALID)) &&
1233			  !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
1234		/* csum for fragmented packets is unsupported */
1235		if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
1236			skb->ip_summed = CHECKSUM_UNNECESSARY;
1237	}
1238}
1239
1240static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1241				     int cpu, int *xdp_state)
1242{
1243	struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
1244	u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
1245	struct am65_cpsw_common *common = flow->common;
1246	struct am65_cpsw_ndev_priv *ndev_priv;
1247	struct cppi5_host_desc_t *desc_rx;
1248	struct device *dev = common->dev;
1249	struct am65_cpsw_swdata *swdata;
1250	struct page *page, *new_page;
1251	dma_addr_t desc_dma, buf_dma;
1252	struct am65_cpsw_port *port;
1253	struct net_device *ndev;
1254	u32 flow_idx = flow->id;
1255	struct sk_buff *skb;
1256	struct xdp_buff	xdp;
1257	int headroom, ret;
1258	void *page_addr;
1259	u32 *psdata;
1260
1261	*xdp_state = AM65_CPSW_XDP_PASS;
1262	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
1263	if (ret) {
1264		if (ret != -ENODATA)
1265			dev_err(dev, "RX: pop chn fail %d\n", ret);
1266		return ret;
1267	}
1268
1269	if (cppi5_desc_is_tdcm(desc_dma)) {
1270		dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
1271		if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
1272			complete(&common->tdown_complete);
1273		return 0;
1274	}
1275
1276	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
1277	dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
1278		__func__, flow_idx, &desc_dma);
1279
1280	swdata = cppi5_hdesc_get_swdata(desc_rx);
1281	page = swdata->page;
1282	page_addr = page_address(page);
1283	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1284	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
1285	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1286	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1287	dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
1288	port = am65_common_get_port(common, port_id);
1289	ndev = port->ndev;
1290	psdata = cppi5_hdesc_get_psdata(desc_rx);
1291	csum_info = psdata[2];
1292	dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
1293
1294	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
1295	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1296
1297	if (port->xdp_prog) {
1298		xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
1299		xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
1300				 pkt_len, false);
1301		*xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
1302					       cpu, &pkt_len);
1303		if (*xdp_state != AM65_CPSW_XDP_PASS)
1304			goto allocate;
1305
1306		headroom = xdp.data - xdp.data_hard_start;
1307	} else {
1308		headroom = AM65_CPSW_HEADROOM;
1309	}
1310
1311	skb = am65_cpsw_build_skb(page_addr, ndev,
1312				  AM65_CPSW_MAX_PACKET_SIZE, headroom);
1313	if (unlikely(!skb)) {
1314		new_page = page;
1315		goto requeue;
1316	}
1317
1318	ndev_priv = netdev_priv(ndev);
1319	am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
1320	skb_put(skb, pkt_len);
1321	if (port->rx_ts_enabled)
1322		am65_cpts_rx_timestamp(common->cpts, skb);
1323	skb_mark_for_recycle(skb);
1324	skb->protocol = eth_type_trans(skb, ndev);
1325	am65_cpsw_nuss_rx_csum(skb, csum_info);
1326	napi_gro_receive(&flow->napi_rx, skb);
1327
1328	dev_sw_netstats_rx_add(ndev, pkt_len);
1329
1330allocate:
1331	new_page = page_pool_dev_alloc_pages(flow->page_pool);
1332	if (unlikely(!new_page)) {
1333		dev_err(dev, "page alloc failed\n");
1334		return -ENOMEM;
1335	}
1336
1337	if (netif_dormant(ndev)) {
1338		am65_cpsw_put_page(flow, new_page, true);
1339		ndev->stats.rx_dropped++;
1340		return 0;
1341	}
1342
1343requeue:
1344	ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
1345	if (WARN_ON(ret < 0)) {
1346		am65_cpsw_put_page(flow, new_page, true);
1347		ndev->stats.rx_errors++;
1348		ndev->stats.rx_dropped++;
1349	}
1350
1351	return ret;
1352}
1353
1354static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
1355{
1356	struct am65_cpsw_rx_flow *flow = container_of(timer,
1357						      struct am65_cpsw_rx_flow,
1358						      rx_hrtimer);
1359
1360	enable_irq(flow->irq);
1361	return HRTIMER_NORESTART;
1362}
1363
1364static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
1365{
1366	struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
1367	struct am65_cpsw_common *common = flow->common;
1368	int cpu = smp_processor_id();
1369	int xdp_state_or = 0;
1370	int cur_budget, ret;
1371	int xdp_state;
1372	int num_rx = 0;
1373
1374	/* process only this flow */
1375	cur_budget = budget;
1376	while (cur_budget--) {
1377		ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
1378		xdp_state_or |= xdp_state;
1379		if (ret)
1380			break;
1381		num_rx++;
1382	}
1383
1384	if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
1385		xdp_do_flush();
1386
1387	dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
1388
1389	if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
1390		if (flow->irq_disabled) {
1391			flow->irq_disabled = false;
1392			if (unlikely(flow->rx_pace_timeout)) {
1393				hrtimer_start(&flow->rx_hrtimer,
1394					      ns_to_ktime(flow->rx_pace_timeout),
1395					      HRTIMER_MODE_REL_PINNED);
1396			} else {
1397				enable_irq(flow->irq);
1398			}
1399		}
1400	}
1401
1402	return num_rx;
1403}
1404
1405static struct sk_buff *
1406am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
1407				   dma_addr_t desc_dma)
1408{
1409	struct cppi5_host_desc_t *desc_tx;
1410	struct sk_buff *skb;
1411	void **swdata;
1412
1413	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
1414					     desc_dma);
1415	swdata = cppi5_hdesc_get_swdata(desc_tx);
1416	skb = *(swdata);
1417	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1418
1419	am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
1420
1421	dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
1422
1423	return skb;
1424}
1425
1426static struct xdp_frame *
1427am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
1428				   struct am65_cpsw_tx_chn *tx_chn,
1429				   dma_addr_t desc_dma,
1430				   struct net_device **ndev)
1431{
1432	struct cppi5_host_desc_t *desc_tx;
1433	struct am65_cpsw_port *port;
1434	struct xdp_frame *xdpf;
1435	u32 port_id = 0;
1436	void **swdata;
1437
1438	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
1439	cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
1440	swdata = cppi5_hdesc_get_swdata(desc_tx);
1441	xdpf = *(swdata);
1442	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1443
1444	port = am65_common_get_port(common, port_id);
1445	dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
1446	*ndev = port->ndev;
1447
1448	return xdpf;
1449}
1450
1451static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
1452				   struct netdev_queue *netif_txq)
1453{
1454	if (netif_tx_queue_stopped(netif_txq)) {
1455		/* Check whether the queue is stopped due to stalled
1456		 * tx dma, if the queue is stopped then wake the queue
1457		 * as we have free desc for tx
1458		 */
1459		__netif_tx_lock(netif_txq, smp_processor_id());
1460		if (netif_running(ndev) &&
1461		    (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
1462			netif_tx_wake_queue(netif_txq);
1463
1464		__netif_tx_unlock(netif_txq);
1465	}
1466}
1467
1468static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
1469					   int chn, unsigned int budget, bool *tdown)
1470{
1471	enum am65_cpsw_tx_buf_type buf_type;
1472	struct device *dev = common->dev;
1473	struct am65_cpsw_tx_chn *tx_chn;
1474	struct netdev_queue *netif_txq;
1475	unsigned int total_bytes = 0;
1476	struct net_device *ndev;
1477	struct xdp_frame *xdpf;
1478	struct sk_buff *skb;
1479	dma_addr_t desc_dma;
1480	int res, num_tx = 0;
1481
1482	tx_chn = &common->tx_chns[chn];
1483
1484	while (true) {
1485		spin_lock(&tx_chn->lock);
1486		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1487		spin_unlock(&tx_chn->lock);
1488		if (res == -ENODATA)
1489			break;
1490
1491		if (cppi5_desc_is_tdcm(desc_dma)) {
1492			if (atomic_dec_and_test(&common->tdown_cnt))
1493				complete(&common->tdown_complete);
1494			*tdown = true;
1495			break;
1496		}
1497
1498		buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1499		if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1500			skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1501			ndev = skb->dev;
1502			total_bytes = skb->len;
1503			napi_consume_skb(skb, budget);
1504		} else {
1505			xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1506								  desc_dma, &ndev);
1507			total_bytes = xdpf->len;
1508			if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1509				xdp_return_frame_rx_napi(xdpf);
1510			else
1511				xdp_return_frame(xdpf);
1512		}
1513		num_tx++;
1514
1515		netif_txq = netdev_get_tx_queue(ndev, chn);
1516
1517		netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1518
1519		am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1520	}
1521
1522	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1523
1524	return num_tx;
1525}
1526
1527static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
1528					      int chn, unsigned int budget, bool *tdown)
1529{
1530	enum am65_cpsw_tx_buf_type buf_type;
1531	struct device *dev = common->dev;
1532	struct am65_cpsw_tx_chn *tx_chn;
1533	struct netdev_queue *netif_txq;
1534	unsigned int total_bytes = 0;
1535	struct net_device *ndev;
1536	struct xdp_frame *xdpf;
1537	struct sk_buff *skb;
1538	dma_addr_t desc_dma;
1539	int res, num_tx = 0;
1540
1541	tx_chn = &common->tx_chns[chn];
1542
1543	while (true) {
1544		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1545		if (res == -ENODATA)
1546			break;
1547
1548		if (cppi5_desc_is_tdcm(desc_dma)) {
1549			if (atomic_dec_and_test(&common->tdown_cnt))
1550				complete(&common->tdown_complete);
1551			*tdown = true;
1552			break;
1553		}
1554
1555		buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1556		if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1557			skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1558			ndev = skb->dev;
1559			total_bytes += skb->len;
1560			napi_consume_skb(skb, budget);
1561		} else {
1562			xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1563								  desc_dma, &ndev);
1564			total_bytes += xdpf->len;
1565			if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1566				xdp_return_frame_rx_napi(xdpf);
1567			else
1568				xdp_return_frame(xdpf);
1569		}
1570		num_tx++;
1571	}
1572
1573	if (!num_tx)
1574		return 0;
1575
1576	netif_txq = netdev_get_tx_queue(ndev, chn);
1577
1578	netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1579
1580	am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1581
1582	dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1583
1584	return num_tx;
1585}
1586
1587static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
1588{
1589	struct am65_cpsw_tx_chn *tx_chns =
1590			container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
1591
1592	enable_irq(tx_chns->irq);
1593	return HRTIMER_NORESTART;
1594}
1595
1596static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
1597{
1598	struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1599	bool tdown = false;
1600	int num_tx;
1601
1602	if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1603		num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
1604							    budget, &tdown);
1605	else
1606		num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
1607							 tx_chn->id, budget, &tdown);
1608
1609	if (num_tx >= budget)
1610		return budget;
1611
1612	if (napi_complete_done(napi_tx, num_tx)) {
1613		if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
1614			hrtimer_start(&tx_chn->tx_hrtimer,
1615				      ns_to_ktime(tx_chn->tx_pace_timeout),
1616				      HRTIMER_MODE_REL_PINNED);
1617		} else {
1618			enable_irq(tx_chn->irq);
1619		}
1620	}
1621
1622	return 0;
1623}
1624
1625static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
1626{
1627	struct am65_cpsw_rx_flow *flow = dev_id;
1628
1629	flow->irq_disabled = true;
1630	disable_irq_nosync(irq);
1631	napi_schedule(&flow->napi_rx);
1632
1633	return IRQ_HANDLED;
1634}
1635
1636static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
1637{
1638	struct am65_cpsw_tx_chn *tx_chn = dev_id;
1639
1640	disable_irq_nosync(irq);
1641	napi_schedule(&tx_chn->napi_tx);
1642
1643	return IRQ_HANDLED;
1644}
1645
1646static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1647						 struct net_device *ndev)
1648{
1649	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1650	struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1651	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1652	struct device *dev = common->dev;
1653	struct am65_cpsw_tx_chn *tx_chn;
1654	struct netdev_queue *netif_txq;
1655	dma_addr_t desc_dma, buf_dma;
1656	int ret, q_idx, i;
1657	void **swdata;
1658	u32 *psdata;
1659	u32 pkt_len;
1660
1661	/* padding enabled in hw */
1662	pkt_len = skb_headlen(skb);
1663
1664	/* SKB TX timestamp */
1665	if (port->tx_ts_enabled)
1666		am65_cpts_prep_tx_timestamp(common->cpts, skb);
1667
1668	q_idx = skb_get_queue_mapping(skb);
1669	dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1670
1671	tx_chn = &common->tx_chns[q_idx];
1672	netif_txq = netdev_get_tx_queue(ndev, q_idx);
1673
1674	/* Map the linear buffer */
1675	buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
1676				 DMA_TO_DEVICE);
1677	if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1678		dev_err(dev, "Failed to map tx skb buffer\n");
1679		ndev->stats.tx_errors++;
1680		goto err_free_skb;
1681	}
1682
1683	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1684	if (!first_desc) {
1685		dev_dbg(dev, "Failed to allocate descriptor\n");
1686		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
1687				 DMA_TO_DEVICE);
1688		goto busy_stop_q;
1689	}
1690
1691	am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
1692				    AM65_CPSW_TX_BUF_TYPE_SKB);
1693
1694	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1695			 AM65_CPSW_NAV_PS_DATA_SIZE);
1696	cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
1697	cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
1698	cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1699
1700	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1701	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1702	swdata = cppi5_hdesc_get_swdata(first_desc);
1703	*(swdata) = skb;
1704	psdata = cppi5_hdesc_get_psdata(first_desc);
1705
1706	/* HW csum offload if enabled */
1707	psdata[2] = 0;
1708	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1709		unsigned int cs_start, cs_offset;
1710
1711		cs_start = skb_transport_offset(skb);
1712		cs_offset = cs_start + skb->csum_offset;
1713		/* HW numerates bytes starting from 1 */
1714		psdata[2] = ((cs_offset + 1) << 24) |
1715			    ((cs_start + 1) << 16) | (skb->len - cs_start);
1716		dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1717	}
1718
1719	if (!skb_is_nonlinear(skb))
1720		goto done_tx;
1721
1722	dev_dbg(dev, "fragmented SKB\n");
1723
1724	/* Handle the case where skb is fragmented in pages */
1725	cur_desc = first_desc;
1726	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1727		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1728		u32 frag_size = skb_frag_size(frag);
1729
1730		next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1731		if (!next_desc) {
1732			dev_err(dev, "Failed to allocate descriptor\n");
1733			goto busy_free_descs;
1734		}
1735
1736		am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
1737					    AM65_CPSW_TX_BUF_TYPE_SKB);
1738
1739		buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1740					   DMA_TO_DEVICE);
1741		if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1742			dev_err(dev, "Failed to map tx skb page\n");
1743			k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1744			ndev->stats.tx_errors++;
1745			goto err_free_descs;
1746		}
1747
1748		cppi5_hdesc_reset_hbdesc(next_desc);
1749		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1750		cppi5_hdesc_attach_buf(next_desc,
1751				       buf_dma, frag_size, buf_dma, frag_size);
1752
1753		desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1754						      next_desc);
1755		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1756		cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1757
1758		pkt_len += frag_size;
1759		cur_desc = next_desc;
1760	}
1761	WARN_ON(pkt_len != skb->len);
1762
1763done_tx:
1764	skb_tx_timestamp(skb);
1765
1766	/* report bql before sending packet */
1767	netdev_tx_sent_queue(netif_txq, pkt_len);
1768
1769	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1770	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1771	if (AM65_CPSW_IS_CPSW2G(common)) {
1772		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1773	} else {
1774		spin_lock_bh(&tx_chn->lock);
1775		ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1776		spin_unlock_bh(&tx_chn->lock);
1777	}
1778	if (ret) {
1779		dev_err(dev, "can't push desc %d\n", ret);
1780		/* inform bql */
1781		netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1782		ndev->stats.tx_errors++;
1783		goto err_free_descs;
1784	}
1785
1786	if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1787		netif_tx_stop_queue(netif_txq);
1788		/* Barrier, so that stop_queue visible to other cpus */
1789		smp_mb__after_atomic();
1790		dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1791
1792		/* re-check for smp */
1793		if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1794		    MAX_SKB_FRAGS) {
1795			netif_tx_wake_queue(netif_txq);
1796			dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1797		}
1798	}
1799
1800	return NETDEV_TX_OK;
1801
1802err_free_descs:
1803	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1804err_free_skb:
1805	ndev->stats.tx_dropped++;
1806	dev_kfree_skb_any(skb);
1807	return NETDEV_TX_OK;
1808
1809busy_free_descs:
1810	am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1811busy_stop_q:
1812	netif_tx_stop_queue(netif_txq);
1813	return NETDEV_TX_BUSY;
1814}
1815
1816static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1817						    void *addr)
1818{
1819	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1820	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1821	struct sockaddr *sockaddr = (struct sockaddr *)addr;
1822	int ret;
1823
1824	ret = eth_prepare_mac_addr_change(ndev, addr);
1825	if (ret < 0)
1826		return ret;
1827
1828	ret = pm_runtime_resume_and_get(common->dev);
1829	if (ret < 0)
1830		return ret;
1831
1832	cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1833			   HOST_PORT_NUM, 0, 0);
1834	cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1835			   HOST_PORT_NUM, ALE_SECURE, 0);
1836
1837	am65_cpsw_port_set_sl_mac(port, addr);
1838	eth_commit_mac_addr_change(ndev, sockaddr);
1839
1840	pm_runtime_put(common->dev);
1841
1842	return 0;
1843}
1844
1845static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1846				       struct ifreq *ifr)
1847{
1848	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1849	u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1850	struct hwtstamp_config cfg;
1851
1852	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1853		return -EOPNOTSUPP;
1854
1855	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1856		return -EFAULT;
1857
1858	/* TX HW timestamp */
1859	switch (cfg.tx_type) {
1860	case HWTSTAMP_TX_OFF:
1861	case HWTSTAMP_TX_ON:
1862		break;
1863	default:
1864		return -ERANGE;
1865	}
1866
1867	switch (cfg.rx_filter) {
1868	case HWTSTAMP_FILTER_NONE:
1869		port->rx_ts_enabled = false;
1870		break;
1871	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1872	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1873	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1874	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1875	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1876	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1877	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1878	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1879	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1880		port->rx_ts_enabled = true;
1881		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1882		break;
1883	case HWTSTAMP_FILTER_ALL:
1884	case HWTSTAMP_FILTER_SOME:
1885	case HWTSTAMP_FILTER_NTP_ALL:
1886		return -EOPNOTSUPP;
1887	default:
1888		return -ERANGE;
1889	}
1890
1891	port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1892
1893	/* cfg TX timestamp */
1894	seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1895		  AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1896
1897	ts_vlan_ltype = ETH_P_8021Q;
1898
1899	ts_ctrl_ltype2 = ETH_P_1588 |
1900			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1901			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1902			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1903			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1904			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1905			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1906			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1907			 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1908
1909	ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1910		  AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1911
1912	if (port->tx_ts_enabled)
1913		ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1914			   AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1915
1916	if (port->rx_ts_enabled)
1917		ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
1918			   AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
1919
1920	writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1921	writel(ts_vlan_ltype, port->port_base +
1922	       AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1923	writel(ts_ctrl_ltype2, port->port_base +
1924	       AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1925	writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1926
1927	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1928}
1929
1930static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1931				       struct ifreq *ifr)
1932{
1933	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1934	struct hwtstamp_config cfg;
1935
1936	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1937		return -EOPNOTSUPP;
1938
1939	cfg.flags = 0;
1940	cfg.tx_type = port->tx_ts_enabled ?
1941		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1942	cfg.rx_filter = port->rx_ts_enabled ?
1943			HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
1944
1945	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1946}
1947
1948static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1949					  struct ifreq *req, int cmd)
1950{
1951	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1952
1953	if (!netif_running(ndev))
1954		return -EINVAL;
1955
1956	switch (cmd) {
1957	case SIOCSHWTSTAMP:
1958		return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1959	case SIOCGHWTSTAMP:
1960		return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1961	}
1962
1963	return phylink_mii_ioctl(port->slave.phylink, req, cmd);
1964}
1965
1966static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1967					 struct rtnl_link_stats64 *stats)
1968{
1969	dev_fetch_sw_netstats(stats, dev->tstats);
1970
1971	stats->rx_errors	= dev->stats.rx_errors;
1972	stats->rx_dropped	= dev->stats.rx_dropped;
1973	stats->tx_dropped	= dev->stats.tx_dropped;
1974}
1975
1976static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
1977				    struct bpf_prog *prog)
1978{
1979	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1980	bool running = netif_running(ndev);
1981	struct bpf_prog *old_prog;
1982
1983	if (running)
1984		am65_cpsw_nuss_ndo_slave_stop(ndev);
1985
1986	old_prog = xchg(&port->xdp_prog, prog);
1987	if (old_prog)
1988		bpf_prog_put(old_prog);
1989
1990	if (running)
1991		return am65_cpsw_nuss_ndo_slave_open(ndev);
1992
1993	return 0;
1994}
1995
1996static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1997{
1998	switch (bpf->command) {
1999	case XDP_SETUP_PROG:
2000		return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
2001	default:
2002		return -EINVAL;
2003	}
2004}
2005
2006static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
2007				  struct xdp_frame **frames, u32 flags)
2008{
2009	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2010	struct am65_cpsw_tx_chn *tx_chn;
2011	struct netdev_queue *netif_txq;
2012	int cpu = smp_processor_id();
2013	int i, nxmit = 0;
2014
2015	tx_chn = &common->tx_chns[cpu % common->tx_ch_num];
2016	netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
2017
2018	__netif_tx_lock(netif_txq, cpu);
2019	for (i = 0; i < n; i++) {
2020		if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
2021					   AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
2022			break;
2023		nxmit++;
2024	}
2025	__netif_tx_unlock(netif_txq);
2026
2027	return nxmit;
2028}
2029
2030static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
2031	.ndo_open		= am65_cpsw_nuss_ndo_slave_open,
2032	.ndo_stop		= am65_cpsw_nuss_ndo_slave_stop,
2033	.ndo_start_xmit		= am65_cpsw_nuss_ndo_slave_xmit,
2034	.ndo_set_rx_mode	= am65_cpsw_nuss_ndo_slave_set_rx_mode,
2035	.ndo_get_stats64        = am65_cpsw_nuss_ndo_get_stats,
2036	.ndo_validate_addr	= eth_validate_addr,
2037	.ndo_set_mac_address	= am65_cpsw_nuss_ndo_slave_set_mac_address,
2038	.ndo_tx_timeout		= am65_cpsw_nuss_ndo_host_tx_timeout,
2039	.ndo_vlan_rx_add_vid	= am65_cpsw_nuss_ndo_slave_add_vid,
2040	.ndo_vlan_rx_kill_vid	= am65_cpsw_nuss_ndo_slave_kill_vid,
2041	.ndo_eth_ioctl		= am65_cpsw_nuss_ndo_slave_ioctl,
2042	.ndo_setup_tc           = am65_cpsw_qos_ndo_setup_tc,
2043	.ndo_set_tx_maxrate	= am65_cpsw_qos_ndo_tx_p0_set_maxrate,
2044	.ndo_bpf		= am65_cpsw_ndo_bpf,
2045	.ndo_xdp_xmit		= am65_cpsw_ndo_xdp_xmit,
2046};
2047
2048static void am65_cpsw_disable_phy(struct phy *phy)
2049{
2050	phy_power_off(phy);
2051	phy_exit(phy);
2052}
2053
2054static int am65_cpsw_enable_phy(struct phy *phy)
2055{
2056	int ret;
2057
2058	ret = phy_init(phy);
2059	if (ret < 0)
2060		return ret;
2061
2062	ret = phy_power_on(phy);
2063	if (ret < 0) {
2064		phy_exit(phy);
2065		return ret;
2066	}
2067
2068	return 0;
2069}
2070
2071static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
2072{
2073	struct am65_cpsw_port *port;
2074	struct phy *phy;
2075	int i;
2076
2077	for (i = 0; i < common->port_num; i++) {
2078		port = &common->ports[i];
2079		phy = port->slave.serdes_phy;
2080		if (phy)
2081			am65_cpsw_disable_phy(phy);
2082	}
2083}
2084
2085static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
2086				     struct am65_cpsw_port *port)
2087{
2088	const char *name = "serdes";
2089	struct phy *phy;
2090	int ret;
2091
2092	phy = devm_of_phy_optional_get(dev, port_np, name);
2093	if (IS_ERR_OR_NULL(phy))
2094		return PTR_ERR_OR_ZERO(phy);
2095
2096	/* Serdes PHY exists. Store it. */
2097	port->slave.serdes_phy = phy;
2098
2099	ret =  am65_cpsw_enable_phy(phy);
2100	if (ret < 0)
2101		goto err_phy;
2102
2103	return 0;
2104
2105err_phy:
2106	devm_phy_put(dev, phy);
2107	return ret;
2108}
2109
2110static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
2111				      const struct phylink_link_state *state)
2112{
2113	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2114							  phylink_config);
2115	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2116	struct am65_cpsw_common *common = port->common;
2117
2118	if (common->pdata.extra_modes & BIT(state->interface)) {
2119		if (state->interface == PHY_INTERFACE_MODE_SGMII) {
2120			writel(ADVERTISE_SGMII,
2121			       port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
2122			cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2123		} else {
2124			cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2125		}
2126
2127		if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
2128			cpsw_sl_ctl_set(port->slave.mac_sl,
2129					CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2130		} else {
2131			cpsw_sl_ctl_clr(port->slave.mac_sl,
2132					CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2133		}
2134
2135		writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
2136		       port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
2137	}
2138}
2139
2140static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
2141					 phy_interface_t interface)
2142{
2143	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2144							  phylink_config);
2145	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2146	struct am65_cpsw_common *common = port->common;
2147	struct net_device *ndev = port->ndev;
2148	u32 mac_control;
2149	int tmo;
2150
2151	/* disable forwarding */
2152	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2153
2154	cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2155
2156	tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
2157	dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
2158		cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
2159
2160	/* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */
2161	mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A |
2162		      CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN;
2163	/* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */
2164	if (phy_interface_mode_is_rgmii(interface))
2165		mac_control |= CPSW_SL_CTL_EXT_EN;
2166	/* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */
2167	cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control);
2168
2169	am65_cpsw_qos_link_down(ndev);
2170	netif_tx_stop_all_queues(ndev);
2171}
2172
2173static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
2174				       unsigned int mode, phy_interface_t interface, int speed,
2175				       int duplex, bool tx_pause, bool rx_pause)
2176{
2177	struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2178							  phylink_config);
2179	struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2180	struct am65_cpsw_common *common = port->common;
2181	u32 mac_control = CPSW_SL_CTL_GMII_EN;
2182	struct net_device *ndev = port->ndev;
2183
2184	/* Bring the port out of idle state */
2185	cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2186
2187	if (speed == SPEED_1000)
2188		mac_control |= CPSW_SL_CTL_GIG;
2189	/* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */
2190	if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
2191		/* Can be used with in band mode only */
2192		mac_control |= CPSW_SL_CTL_EXT_EN;
2193	if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
2194		mac_control |= CPSW_SL_CTL_IFCTL_A;
2195	if (duplex)
2196		mac_control |= CPSW_SL_CTL_FULLDUPLEX;
2197
2198	/* rx_pause/tx_pause */
2199	if (rx_pause)
2200		mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
2201
2202	if (tx_pause)
2203		mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
2204
2205	cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
2206
2207	/* enable forwarding */
2208	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2209
2210	am65_cpsw_qos_link_up(ndev, speed);
2211	netif_tx_wake_all_queues(ndev);
2212}
2213
2214static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
2215	.mac_config = am65_cpsw_nuss_mac_config,
2216	.mac_link_down = am65_cpsw_nuss_mac_link_down,
2217	.mac_link_up = am65_cpsw_nuss_mac_link_up,
2218};
2219
2220static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
2221{
2222	struct am65_cpsw_common *common = port->common;
2223
2224	if (!port->disabled)
2225		return;
2226
2227	cpsw_ale_control_set(common->ale, port->port_id,
2228			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2229
2230	cpsw_sl_reset(port->slave.mac_sl, 100);
2231	cpsw_sl_ctl_reset(port->slave.mac_sl);
2232}
2233
2234static void am65_cpsw_nuss_free_tx_chns(void *data)
2235{
2236	struct am65_cpsw_common *common = data;
2237	int i;
2238
2239	for (i = 0; i < common->tx_ch_num; i++) {
2240		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2241
2242		if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
2243			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
2244
2245		if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
2246			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
2247
2248		memset(tx_chn, 0, sizeof(*tx_chn));
2249	}
2250}
2251
2252static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
2253{
2254	struct device *dev = common->dev;
2255	int i;
2256
2257	common->tx_ch_rate_msk = 0;
2258	for (i = 0; i < common->tx_ch_num; i++) {
2259		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2260
2261		if (tx_chn->irq > 0)
2262			devm_free_irq(dev, tx_chn->irq, tx_chn);
2263
2264		netif_napi_del(&tx_chn->napi_tx);
2265	}
2266
2267	am65_cpsw_nuss_free_tx_chns(common);
2268}
2269
2270static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
2271{
2272	struct device *dev = common->dev;
2273	int i, ret = 0;
2274
2275	for (i = 0; i < common->tx_ch_num; i++) {
2276		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2277
2278		hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
2279		tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
2280
2281		ret = devm_request_irq(dev, tx_chn->irq,
2282				       am65_cpsw_nuss_tx_irq,
2283				       IRQF_TRIGGER_HIGH,
2284				       tx_chn->tx_chn_name, tx_chn);
2285		if (ret) {
2286			dev_err(dev, "failure requesting tx%u irq %u, %d\n",
2287				tx_chn->id, tx_chn->irq, ret);
2288			goto err;
2289		}
2290
2291		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
2292				  am65_cpsw_nuss_tx_poll);
2293	}
2294
2295	return 0;
2296
2297err:
2298	for (--i ; i >= 0 ; i--) {
2299		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2300
2301		netif_napi_del(&tx_chn->napi_tx);
2302		devm_free_irq(dev, tx_chn->irq, tx_chn);
2303	}
2304
2305	return ret;
2306}
2307
2308static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
2309{
2310	u32  max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
2311	struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
2312	struct device *dev = common->dev;
2313	struct k3_ring_cfg ring_cfg = {
2314		.elm_size = K3_RINGACC_RING_ELSIZE_8,
2315		.mode = K3_RINGACC_RING_MODE_RING,
2316		.flags = 0
2317	};
2318	u32 hdesc_size, hdesc_size_out;
2319	int i, ret = 0;
2320
2321	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2322					   AM65_CPSW_NAV_SW_DATA_SIZE);
2323
2324	tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2325	tx_cfg.tx_cfg = ring_cfg;
2326	tx_cfg.txcq_cfg = ring_cfg;
2327	tx_cfg.tx_cfg.size = max_desc_num;
2328	tx_cfg.txcq_cfg.size = max_desc_num;
2329
2330	for (i = 0; i < common->tx_ch_num; i++) {
2331		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2332
2333		snprintf(tx_chn->tx_chn_name,
2334			 sizeof(tx_chn->tx_chn_name), "tx%d", i);
2335
2336		spin_lock_init(&tx_chn->lock);
2337		tx_chn->common = common;
2338		tx_chn->id = i;
2339		tx_chn->descs_num = max_desc_num;
2340
2341		tx_chn->tx_chn =
2342			k3_udma_glue_request_tx_chn(dev,
2343						    tx_chn->tx_chn_name,
2344						    &tx_cfg);
2345		if (IS_ERR(tx_chn->tx_chn)) {
2346			ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
2347					    "Failed to request tx dma channel\n");
2348			goto err;
2349		}
2350		tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
2351
2352		tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
2353								  tx_chn->descs_num,
2354								  hdesc_size,
2355								  tx_chn->tx_chn_name);
2356		if (IS_ERR(tx_chn->desc_pool)) {
2357			ret = PTR_ERR(tx_chn->desc_pool);
2358			dev_err(dev, "Failed to create poll %d\n", ret);
2359			goto err;
2360		}
2361
2362		hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
2363		tx_chn->dsize_log2 = __fls(hdesc_size_out);
2364		WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
2365
2366		tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
2367		if (tx_chn->irq < 0) {
2368			dev_err(dev, "Failed to get tx dma irq %d\n",
2369				tx_chn->irq);
2370			ret = tx_chn->irq;
2371			goto err;
2372		}
2373
2374		snprintf(tx_chn->tx_chn_name,
2375			 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
2376			 dev_name(dev), tx_chn->id);
2377	}
2378
2379	ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
2380	if (ret) {
2381		dev_err(dev, "Failed to add tx NAPI %d\n", ret);
2382		goto err;
2383	}
2384
2385	return 0;
2386
2387err:
2388	am65_cpsw_nuss_free_tx_chns(common);
2389
2390	return ret;
2391}
2392
2393static void am65_cpsw_nuss_free_rx_chns(void *data)
2394{
2395	struct am65_cpsw_common *common = data;
2396	struct am65_cpsw_rx_chn *rx_chn;
2397
2398	rx_chn = &common->rx_chns;
2399
2400	if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
2401		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
2402
2403	if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
2404		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
2405}
2406
2407static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
2408{
2409	struct device *dev = common->dev;
2410	struct am65_cpsw_rx_chn *rx_chn;
2411	struct am65_cpsw_rx_flow *flows;
2412	int i;
2413
2414	rx_chn = &common->rx_chns;
2415	flows = rx_chn->flows;
2416
2417	for (i = 0; i < common->rx_ch_num_flows; i++) {
2418		if (!(flows[i].irq < 0))
2419			devm_free_irq(dev, flows[i].irq, &flows[i]);
2420		netif_napi_del(&flows[i].napi_rx);
2421	}
2422
2423	am65_cpsw_nuss_free_rx_chns(common);
2424
2425	common->rx_flow_id_base = -1;
2426}
2427
2428static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2429{
2430	struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
2431	struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
2432	u32  max_desc_num = AM65_CPSW_MAX_RX_DESC;
2433	struct device *dev = common->dev;
2434	struct am65_cpsw_rx_flow *flow;
2435	u32 hdesc_size, hdesc_size_out;
2436	u32 fdqring_id;
2437	int i, ret = 0;
2438
2439	hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2440					   AM65_CPSW_NAV_SW_DATA_SIZE);
2441
2442	rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2443	rx_cfg.flow_id_num = common->rx_ch_num_flows;
2444	rx_cfg.flow_id_base = common->rx_flow_id_base;
2445
2446	/* init all flows */
2447	rx_chn->dev = dev;
2448	rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
2449
2450	for (i = 0; i < common->rx_ch_num_flows; i++) {
2451		flow = &rx_chn->flows[i];
2452		flow->page_pool = NULL;
2453	}
2454
2455	rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
2456	if (IS_ERR(rx_chn->rx_chn)) {
2457		ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
2458				    "Failed to request rx dma channel\n");
2459		goto err;
2460	}
2461	rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
2462
2463	rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
2464							  rx_chn->descs_num,
2465							  hdesc_size, "rx");
2466	if (IS_ERR(rx_chn->desc_pool)) {
2467		ret = PTR_ERR(rx_chn->desc_pool);
2468		dev_err(dev, "Failed to create rx poll %d\n", ret);
2469		goto err;
2470	}
2471
2472	hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
2473	rx_chn->dsize_log2 = __fls(hdesc_size_out);
2474	WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
2475
2476	common->rx_flow_id_base =
2477			k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
2478	dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
2479
2480	fdqring_id = K3_RINGACC_RING_ID_ANY;
2481	for (i = 0; i < rx_cfg.flow_id_num; i++) {
2482		struct k3_ring_cfg rxring_cfg = {
2483			.elm_size = K3_RINGACC_RING_ELSIZE_8,
2484			.mode = K3_RINGACC_RING_MODE_RING,
2485			.flags = 0,
2486		};
2487		struct k3_ring_cfg fdqring_cfg = {
2488			.elm_size = K3_RINGACC_RING_ELSIZE_8,
2489			.flags = K3_RINGACC_RING_SHARED,
2490		};
2491		struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
2492			.rx_cfg = rxring_cfg,
2493			.rxfdq_cfg = fdqring_cfg,
2494			.ring_rxq_id = K3_RINGACC_RING_ID_ANY,
2495			.src_tag_lo_sel =
2496				K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
2497		};
2498
2499		flow = &rx_chn->flows[i];
2500		flow->id = i;
2501		flow->common = common;
2502		flow->irq = -EINVAL;
2503
2504		rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
2505		rx_flow_cfg.rx_cfg.size = max_desc_num;
2506		/* share same FDQ for all flows */
2507		rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
2508		rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
2509
2510		ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
2511						i, &rx_flow_cfg);
2512		if (ret) {
2513			dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
2514			goto err_flow;
2515		}
2516		if (!i)
2517			fdqring_id =
2518				k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
2519								i);
2520
2521		flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
2522		if (flow->irq <= 0) {
2523			dev_err(dev, "Failed to get rx dma irq %d\n",
2524				flow->irq);
2525			ret = flow->irq;
2526			goto err_flow;
2527		}
2528
2529		snprintf(flow->name,
2530			 sizeof(flow->name), "%s-rx%d",
2531			 dev_name(dev), i);
2532		hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
2533			     HRTIMER_MODE_REL_PINNED);
2534		flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
2535
2536		ret = devm_request_irq(dev, flow->irq,
2537				       am65_cpsw_nuss_rx_irq,
2538				       IRQF_TRIGGER_HIGH,
2539				       flow->name, flow);
2540		if (ret) {
2541			dev_err(dev, "failure requesting rx %d irq %u, %d\n",
2542				i, flow->irq, ret);
2543			flow->irq = -EINVAL;
2544			goto err_flow;
2545		}
2546
2547		netif_napi_add(common->dma_ndev, &flow->napi_rx,
2548			       am65_cpsw_nuss_rx_poll);
2549	}
2550
2551	/* setup classifier to route priorities to flows */
2552	cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
2553
2554	return 0;
2555
2556err_flow:
2557	for (--i; i >= 0 ; i--) {
2558		flow = &rx_chn->flows[i];
2559		netif_napi_del(&flow->napi_rx);
2560		devm_free_irq(dev, flow->irq, flow);
2561	}
2562
2563err:
2564	am65_cpsw_nuss_free_rx_chns(common);
2565
2566	return ret;
2567}
2568
2569static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
2570{
2571	struct am65_cpsw_host *host_p = am65_common_get_host(common);
2572
2573	host_p->common = common;
2574	host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
2575	host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
2576
2577	return 0;
2578}
2579
2580static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
2581					   int slave, u8 *mac_addr)
2582{
2583	u32 mac_lo, mac_hi, offset;
2584	struct regmap *syscon;
2585	int ret;
2586
2587	syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
2588	if (IS_ERR(syscon)) {
2589		if (PTR_ERR(syscon) == -ENODEV)
2590			return 0;
2591		return PTR_ERR(syscon);
2592	}
2593
2594	ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
2595					 &offset);
2596	if (ret)
2597		return ret;
2598
2599	regmap_read(syscon, offset, &mac_lo);
2600	regmap_read(syscon, offset + 4, &mac_hi);
2601
2602	mac_addr[0] = (mac_hi >> 8) & 0xff;
2603	mac_addr[1] = mac_hi & 0xff;
2604	mac_addr[2] = (mac_lo >> 24) & 0xff;
2605	mac_addr[3] = (mac_lo >> 16) & 0xff;
2606	mac_addr[4] = (mac_lo >> 8) & 0xff;
2607	mac_addr[5] = mac_lo & 0xff;
2608
2609	return 0;
2610}
2611
2612static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
2613{
2614	struct device *dev = common->dev;
2615	struct device_node *node;
2616	struct am65_cpts *cpts;
2617	void __iomem *reg_base;
2618
2619	if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
2620		return 0;
2621
2622	node = of_get_child_by_name(dev->of_node, "cpts");
2623	if (!node) {
2624		dev_err(dev, "%s cpts not found\n", __func__);
2625		return -ENOENT;
2626	}
2627
2628	reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
2629	cpts = am65_cpts_create(dev, reg_base, node);
2630	if (IS_ERR(cpts)) {
2631		int ret = PTR_ERR(cpts);
2632
2633		of_node_put(node);
2634		dev_err(dev, "cpts create err %d\n", ret);
2635		return ret;
2636	}
2637	common->cpts = cpts;
2638	/* Forbid PM runtime if CPTS is running.
2639	 * K3 CPSWxG modules may completely lose context during ON->OFF
2640	 * transitions depending on integration.
2641	 * AM65x/J721E MCU CPSW2G: false
2642	 * J721E MAIN_CPSW9G: true
2643	 */
2644	pm_runtime_forbid(dev);
2645
2646	return 0;
2647}
2648
2649static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
2650{
2651	struct device_node *node, *port_np;
2652	struct device *dev = common->dev;
2653	int ret;
2654
2655	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2656	if (!node)
2657		return -ENOENT;
2658
2659	for_each_child_of_node(node, port_np) {
2660		struct am65_cpsw_port *port;
2661		u32 port_id;
2662
2663		/* it is not a slave port node, continue */
2664		if (strcmp(port_np->name, "port"))
2665			continue;
2666
2667		ret = of_property_read_u32(port_np, "reg", &port_id);
2668		if (ret < 0) {
2669			dev_err(dev, "%pOF error reading port_id %d\n",
2670				port_np, ret);
2671			goto of_node_put;
2672		}
2673
2674		if (!port_id || port_id > common->port_num) {
2675			dev_err(dev, "%pOF has invalid port_id %u %s\n",
2676				port_np, port_id, port_np->name);
2677			ret = -EINVAL;
2678			goto of_node_put;
2679		}
2680
2681		port = am65_common_get_port(common, port_id);
2682		port->port_id = port_id;
2683		port->common = common;
2684		port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
2685				  AM65_CPSW_NU_PORTS_OFFSET * (port_id);
2686		if (common->pdata.extra_modes)
2687			port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
2688		port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
2689				  (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
2690		port->name = of_get_property(port_np, "label", NULL);
2691		port->fetch_ram_base =
2692				common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
2693				(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
2694
2695		port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
2696		if (IS_ERR(port->slave.mac_sl)) {
2697			ret = PTR_ERR(port->slave.mac_sl);
2698			goto of_node_put;
2699		}
2700
2701		port->disabled = !of_device_is_available(port_np);
2702		if (port->disabled) {
2703			common->disabled_ports_mask |= BIT(port->port_id);
2704			continue;
2705		}
2706
2707		port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
2708		if (IS_ERR(port->slave.ifphy)) {
2709			ret = PTR_ERR(port->slave.ifphy);
2710			dev_err(dev, "%pOF error retrieving port phy: %d\n",
2711				port_np, ret);
2712			goto of_node_put;
2713		}
2714
2715		/* Initialize the Serdes PHY for the port */
2716		ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
2717		if (ret)
2718			goto of_node_put;
2719
2720		port->slave.mac_only =
2721				of_property_read_bool(port_np, "ti,mac-only");
2722
2723		/* get phy/link info */
2724		port->slave.port_np = port_np;
2725		ret = of_get_phy_mode(port_np, &port->slave.phy_if);
2726		if (ret) {
2727			dev_err(dev, "%pOF read phy-mode err %d\n",
2728				port_np, ret);
2729			goto of_node_put;
2730		}
2731
2732		ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
2733		if (ret)
2734			goto of_node_put;
2735
2736		ret = of_get_mac_address(port_np, port->slave.mac_addr);
2737		if (ret) {
2738			am65_cpsw_am654_get_efuse_macid(port_np,
2739							port->port_id,
2740							port->slave.mac_addr);
2741			if (!is_valid_ether_addr(port->slave.mac_addr)) {
2742				eth_random_addr(port->slave.mac_addr);
2743				dev_err(dev, "Use random MAC address\n");
2744			}
2745		}
2746
2747		/* Reset all Queue priorities to 0 */
2748		writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
2749	}
2750	of_node_put(node);
2751
2752	/* is there at least one ext.port */
2753	if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
2754		dev_err(dev, "No Ext. port are available\n");
2755		return -ENODEV;
2756	}
2757
2758	return 0;
2759
2760of_node_put:
2761	of_node_put(port_np);
2762	of_node_put(node);
2763	return ret;
2764}
2765
2766static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
2767{
2768	struct am65_cpsw_port *port;
2769	int i;
2770
2771	for (i = 0; i < common->port_num; i++) {
2772		port = &common->ports[i];
2773		if (port->slave.phylink)
2774			phylink_destroy(port->slave.phylink);
2775	}
2776}
2777
2778static int
2779am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
2780{
2781	struct am65_cpsw_ndev_priv *ndev_priv;
2782	struct device *dev = common->dev;
2783	struct am65_cpsw_port *port;
2784	struct phylink *phylink;
2785
2786	port = &common->ports[port_idx];
2787
2788	if (port->disabled)
2789		return 0;
2790
2791	/* alloc netdev */
2792	port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv),
2793					AM65_CPSW_MAX_QUEUES,
2794					AM65_CPSW_MAX_QUEUES);
2795	if (!port->ndev) {
2796		dev_err(dev, "error allocating slave net_device %u\n",
2797			port->port_id);
2798		return -ENOMEM;
2799	}
2800
2801	ndev_priv = netdev_priv(port->ndev);
2802	ndev_priv->port = port;
2803	ndev_priv->msg_enable = AM65_CPSW_DEBUG;
2804	mutex_init(&ndev_priv->mm_lock);
2805	port->qos.link_speed = SPEED_UNKNOWN;
2806	SET_NETDEV_DEV(port->ndev, dev);
2807	port->ndev->dev.of_node = port->slave.port_np;
2808
2809	eth_hw_addr_set(port->ndev, port->slave.mac_addr);
2810
2811	port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
2812	port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
2813			      (VLAN_ETH_HLEN + ETH_FCS_LEN);
2814	port->ndev->hw_features = NETIF_F_SG |
2815				  NETIF_F_RXCSUM |
2816				  NETIF_F_HW_CSUM |
2817				  NETIF_F_HW_TC;
2818	port->ndev->features = port->ndev->hw_features |
2819			       NETIF_F_HW_VLAN_CTAG_FILTER;
2820	port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
2821				   NETDEV_XDP_ACT_REDIRECT |
2822				   NETDEV_XDP_ACT_NDO_XMIT;
2823	port->ndev->vlan_features |=  NETIF_F_SG;
2824	port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
2825	port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
2826
2827	/* Configuring Phylink */
2828	port->slave.phylink_config.dev = &port->ndev->dev;
2829	port->slave.phylink_config.type = PHYLINK_NETDEV;
2830	port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
2831						      MAC_1000FD | MAC_5000FD;
2832	port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
2833
2834	switch (port->slave.phy_if) {
2835	case PHY_INTERFACE_MODE_RGMII:
2836	case PHY_INTERFACE_MODE_RGMII_ID:
2837	case PHY_INTERFACE_MODE_RGMII_RXID:
2838	case PHY_INTERFACE_MODE_RGMII_TXID:
2839		phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
2840		break;
2841
2842	case PHY_INTERFACE_MODE_RMII:
2843		__set_bit(PHY_INTERFACE_MODE_RMII,
2844			  port->slave.phylink_config.supported_interfaces);
2845		break;
2846
2847	case PHY_INTERFACE_MODE_QSGMII:
2848	case PHY_INTERFACE_MODE_SGMII:
2849	case PHY_INTERFACE_MODE_USXGMII:
2850		if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
2851			__set_bit(port->slave.phy_if,
2852				  port->slave.phylink_config.supported_interfaces);
2853		} else {
2854			dev_err(dev, "selected phy-mode is not supported\n");
2855			return -EOPNOTSUPP;
2856		}
2857		break;
2858
2859	default:
2860		dev_err(dev, "selected phy-mode is not supported\n");
2861		return -EOPNOTSUPP;
2862	}
2863
2864	phylink = phylink_create(&port->slave.phylink_config,
2865				 of_fwnode_handle(port->slave.port_np),
2866				 port->slave.phy_if,
2867				 &am65_cpsw_phylink_mac_ops);
2868	if (IS_ERR(phylink))
2869		return PTR_ERR(phylink);
2870
2871	port->slave.phylink = phylink;
2872
2873	/* Disable TX checksum offload by default due to HW bug */
2874	if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
2875		port->ndev->features &= ~NETIF_F_HW_CSUM;
2876
2877	port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2878	port->xdp_prog = NULL;
2879
2880	if (!common->dma_ndev)
2881		common->dma_ndev = port->ndev;
2882
2883	return 0;
2884}
2885
2886static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
2887{
2888	int ret;
2889	int i;
2890
2891	for (i = 0; i < common->port_num; i++) {
2892		ret = am65_cpsw_nuss_init_port_ndev(common, i);
2893		if (ret)
2894			return ret;
2895	}
2896
2897	return ret;
2898}
2899
2900static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
2901{
2902	struct am65_cpsw_port *port;
2903	int i;
2904
2905	for (i = 0; i < common->port_num; i++) {
2906		port = &common->ports[i];
2907		if (!port->ndev)
2908			continue;
2909		if (port->ndev->reg_state == NETREG_REGISTERED)
2910			unregister_netdev(port->ndev);
2911		free_netdev(port->ndev);
2912		port->ndev = NULL;
2913	}
2914}
2915
2916static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
2917{
2918	int set_val = 0;
2919	int i;
2920
2921	if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
2922		set_val = 1;
2923
2924	dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
2925
2926	for (i = 1; i <= common->port_num; i++) {
2927		struct am65_cpsw_port *port = am65_common_get_port(common, i);
2928		struct am65_cpsw_ndev_priv *priv;
2929
2930		if (!port->ndev)
2931			continue;
2932
2933		priv = am65_ndev_to_priv(port->ndev);
2934		priv->offload_fwd_mark = set_val;
2935	}
2936}
2937
2938bool am65_cpsw_port_dev_check(const struct net_device *ndev)
2939{
2940	if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
2941		struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2942
2943		return !common->is_emac_mode;
2944	}
2945
2946	return false;
2947}
2948
2949static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
2950					 struct net_device *br_ndev,
2951					 struct netlink_ext_ack *extack)
2952{
2953	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2954	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2955	int err;
2956
2957	if (!common->br_members) {
2958		common->hw_bridge_dev = br_ndev;
2959	} else {
2960		/* This is adding the port to a second bridge, this is
2961		 * unsupported
2962		 */
2963		if (common->hw_bridge_dev != br_ndev)
2964			return -EOPNOTSUPP;
2965	}
2966
2967	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
2968					    false, extack);
2969	if (err)
2970		return err;
2971
2972	common->br_members |= BIT(priv->port->port_id);
2973
2974	am65_cpsw_port_offload_fwd_mark_update(common);
2975
2976	return NOTIFY_DONE;
2977}
2978
2979static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
2980{
2981	struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2982	struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2983
2984	switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
2985
2986	common->br_members &= ~BIT(priv->port->port_id);
2987
2988	am65_cpsw_port_offload_fwd_mark_update(common);
2989
2990	if (!common->br_members)
2991		common->hw_bridge_dev = NULL;
2992}
2993
2994/* netdev notifier */
2995static int am65_cpsw_netdevice_event(struct notifier_block *unused,
2996				     unsigned long event, void *ptr)
2997{
2998	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
2999	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3000	struct netdev_notifier_changeupper_info *info;
3001	int ret = NOTIFY_DONE;
3002
3003	if (!am65_cpsw_port_dev_check(ndev))
3004		return NOTIFY_DONE;
3005
3006	switch (event) {
3007	case NETDEV_CHANGEUPPER:
3008		info = ptr;
3009
3010		if (netif_is_bridge_master(info->upper_dev)) {
3011			if (info->linking)
3012				ret = am65_cpsw_netdevice_port_link(ndev,
3013								    info->upper_dev,
3014								    extack);
3015			else
3016				am65_cpsw_netdevice_port_unlink(ndev);
3017		}
3018		break;
3019	default:
3020		return NOTIFY_DONE;
3021	}
3022
3023	return notifier_from_errno(ret);
3024}
3025
3026static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
3027{
3028	int ret = 0;
3029
3030	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
3031	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3032		return 0;
3033
3034	cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
3035	ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
3036	if (ret) {
3037		dev_err(cpsw->dev, "can't register netdevice notifier\n");
3038		return ret;
3039	}
3040
3041	ret = am65_cpsw_switchdev_register_notifiers(cpsw);
3042	if (ret)
3043		unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
3044
3045	return ret;
3046}
3047
3048static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
3049{
3050	if (AM65_CPSW_IS_CPSW2G(cpsw) ||
3051	    !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3052		return;
3053
3054	am65_cpsw_switchdev_unregister_notifiers(cpsw);
3055	unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
3056}
3057
3058static const struct devlink_ops am65_cpsw_devlink_ops = {};
3059
3060static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
3061{
3062	cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
3063			   ALE_MCAST_BLOCK_LEARN_FWD);
3064}
3065
3066static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
3067{
3068	struct am65_cpsw_host *host = am65_common_get_host(common);
3069
3070	writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3071
3072	am65_cpsw_init_stp_ale_entry(common);
3073
3074	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
3075	dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
3076	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
3077}
3078
3079static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
3080{
3081	struct am65_cpsw_host *host = am65_common_get_host(common);
3082
3083	writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3084
3085	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
3086	dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
3087
3088	/* learning make no sense in multi-mac mode */
3089	cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
3090}
3091
3092static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
3093					struct devlink_param_gset_ctx *ctx)
3094{
3095	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
3096	struct am65_cpsw_common *common = dl_priv->common;
3097
3098	dev_dbg(common->dev, "%s id:%u\n", __func__, id);
3099
3100	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3101		return -EOPNOTSUPP;
3102
3103	ctx->val.vbool = !common->is_emac_mode;
3104
3105	return 0;
3106}
3107
3108static void am65_cpsw_init_port_emac_ale(struct  am65_cpsw_port *port)
3109{
3110	struct am65_cpsw_slave_data *slave = &port->slave;
3111	struct am65_cpsw_common *common = port->common;
3112	u32 port_mask;
3113
3114	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3115
3116	if (slave->mac_only)
3117		/* enable mac-only mode on port */
3118		cpsw_ale_control_set(common->ale, port->port_id,
3119				     ALE_PORT_MACONLY, 1);
3120
3121	cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
3122
3123	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3124
3125	cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
3126			   HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
3127	cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
3128			   port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
3129}
3130
3131static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
3132{
3133	struct am65_cpsw_slave_data *slave = &port->slave;
3134	struct am65_cpsw_common *cpsw = port->common;
3135	u32 port_mask;
3136
3137	cpsw_ale_control_set(cpsw->ale, port->port_id,
3138			     ALE_PORT_NOLEARN, 0);
3139
3140	cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
3141			   HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
3142			   slave->port_vlan);
3143
3144	port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3145
3146	cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
3147			   port_mask, ALE_VLAN, slave->port_vlan,
3148			   ALE_MCAST_FWD_2);
3149
3150	writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3151
3152	cpsw_ale_control_set(cpsw->ale, port->port_id,
3153			     ALE_PORT_MACONLY, 0);
3154}
3155
3156static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
3157					struct devlink_param_gset_ctx *ctx,
3158					struct netlink_ext_ack *extack)
3159{
3160	struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
3161	struct am65_cpsw_common *cpsw = dl_priv->common;
3162	bool switch_en = ctx->val.vbool;
3163	bool if_running = false;
3164	int i;
3165
3166	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
3167
3168	if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3169		return -EOPNOTSUPP;
3170
3171	if (switch_en == !cpsw->is_emac_mode)
3172		return 0;
3173
3174	if (!switch_en && cpsw->br_members) {
3175		dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
3176		return -EINVAL;
3177	}
3178
3179	rtnl_lock();
3180
3181	cpsw->is_emac_mode = !switch_en;
3182
3183	for (i = 0; i < cpsw->port_num; i++) {
3184		struct net_device *sl_ndev = cpsw->ports[i].ndev;
3185
3186		if (!sl_ndev || !netif_running(sl_ndev))
3187			continue;
3188
3189		if_running = true;
3190	}
3191
3192	if (!if_running) {
3193		/* all ndevs are down */
3194		for (i = 0; i < cpsw->port_num; i++) {
3195			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3196			struct am65_cpsw_slave_data *slave;
3197
3198			if (!sl_ndev)
3199				continue;
3200
3201			slave = am65_ndev_to_slave(sl_ndev);
3202			if (switch_en)
3203				slave->port_vlan = cpsw->default_vlan;
3204			else
3205				slave->port_vlan = 0;
3206		}
3207
3208		goto exit;
3209	}
3210
3211	cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
3212	/* clean up ALE table */
3213	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
3214	cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
3215
3216	if (switch_en) {
3217		dev_info(cpsw->dev, "Enable switch mode\n");
3218
3219		am65_cpsw_init_host_port_switch(cpsw);
3220
3221		for (i = 0; i < cpsw->port_num; i++) {
3222			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3223			struct am65_cpsw_slave_data *slave;
3224			struct am65_cpsw_port *port;
3225
3226			if (!sl_ndev)
3227				continue;
3228
3229			port = am65_ndev_to_port(sl_ndev);
3230			slave = am65_ndev_to_slave(sl_ndev);
3231			slave->port_vlan = cpsw->default_vlan;
3232
3233			if (netif_running(sl_ndev))
3234				am65_cpsw_init_port_switch_ale(port);
3235		}
3236
3237	} else {
3238		dev_info(cpsw->dev, "Disable switch mode\n");
3239
3240		am65_cpsw_init_host_port_emac(cpsw);
3241
3242		for (i = 0; i < cpsw->port_num; i++) {
3243			struct net_device *sl_ndev = cpsw->ports[i].ndev;
3244			struct am65_cpsw_port *port;
3245
3246			if (!sl_ndev)
3247				continue;
3248
3249			port = am65_ndev_to_port(sl_ndev);
3250			port->slave.port_vlan = 0;
3251			if (netif_running(sl_ndev))
3252				am65_cpsw_init_port_emac_ale(port);
3253		}
3254	}
3255	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
3256exit:
3257	rtnl_unlock();
3258
3259	return 0;
3260}
3261
3262static const struct devlink_param am65_cpsw_devlink_params[] = {
3263	DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
3264			     DEVLINK_PARAM_TYPE_BOOL,
3265			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3266			     am65_cpsw_dl_switch_mode_get,
3267			     am65_cpsw_dl_switch_mode_set, NULL),
3268};
3269
3270static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
3271{
3272	struct devlink_port_attrs attrs = {};
3273	struct am65_cpsw_devlink *dl_priv;
3274	struct device *dev = common->dev;
3275	struct devlink_port *dl_port;
3276	struct am65_cpsw_port *port;
3277	int ret = 0;
3278	int i;
3279
3280	common->devlink =
3281		devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
3282	if (!common->devlink)
3283		return -ENOMEM;
3284
3285	dl_priv = devlink_priv(common->devlink);
3286	dl_priv->common = common;
3287
3288	/* Provide devlink hook to switch mode when multiple external ports
3289	 * are present NUSS switchdev driver is enabled.
3290	 */
3291	if (!AM65_CPSW_IS_CPSW2G(common) &&
3292	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
3293		ret = devlink_params_register(common->devlink,
3294					      am65_cpsw_devlink_params,
3295					      ARRAY_SIZE(am65_cpsw_devlink_params));
3296		if (ret) {
3297			dev_err(dev, "devlink params reg fail ret:%d\n", ret);
3298			goto dl_unreg;
3299		}
3300	}
3301
3302	for (i = 1; i <= common->port_num; i++) {
3303		port = am65_common_get_port(common, i);
3304		dl_port = &port->devlink_port;
3305
3306		if (port->ndev)
3307			attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
3308		else
3309			attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
3310		attrs.phys.port_number = port->port_id;
3311		attrs.switch_id.id_len = sizeof(resource_size_t);
3312		memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
3313		devlink_port_attrs_set(dl_port, &attrs);
3314
3315		ret = devlink_port_register(common->devlink, dl_port, port->port_id);
3316		if (ret) {
3317			dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
3318				port->port_id, ret);
3319			goto dl_port_unreg;
3320		}
3321	}
3322	devlink_register(common->devlink);
3323	return ret;
3324
3325dl_port_unreg:
3326	for (i = i - 1; i >= 1; i--) {
3327		port = am65_common_get_port(common, i);
3328		dl_port = &port->devlink_port;
3329
3330		devlink_port_unregister(dl_port);
3331	}
3332dl_unreg:
3333	devlink_free(common->devlink);
3334	return ret;
3335}
3336
3337static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
3338{
3339	struct devlink_port *dl_port;
3340	struct am65_cpsw_port *port;
3341	int i;
3342
3343	devlink_unregister(common->devlink);
3344
3345	for (i = 1; i <= common->port_num; i++) {
3346		port = am65_common_get_port(common, i);
3347		dl_port = &port->devlink_port;
3348
3349		devlink_port_unregister(dl_port);
3350	}
3351
3352	if (!AM65_CPSW_IS_CPSW2G(common) &&
3353	    IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3354		devlink_params_unregister(common->devlink,
3355					  am65_cpsw_devlink_params,
3356					  ARRAY_SIZE(am65_cpsw_devlink_params));
3357
3358	devlink_free(common->devlink);
3359}
3360
3361static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
3362{
3363	struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
3364	struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
3365	struct device *dev = common->dev;
3366	struct am65_cpsw_port *port;
3367	int ret = 0, i;
3368
3369	/* init tx channels */
3370	ret = am65_cpsw_nuss_init_tx_chns(common);
3371	if (ret)
3372		return ret;
3373	ret = am65_cpsw_nuss_init_rx_chns(common);
3374	if (ret)
3375		goto err_remove_tx;
3376
3377	/* The DMA Channels are not guaranteed to be in a clean state.
3378	 * Reset and disable them to ensure that they are back to the
3379	 * clean state and ready to be used.
3380	 */
3381	for (i = 0; i < common->tx_ch_num; i++) {
3382		k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
3383					  am65_cpsw_nuss_tx_cleanup);
3384		k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
3385	}
3386
3387	for (i = 0; i < common->rx_ch_num_flows; i++)
3388		k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
3389					  rx_chan,
3390					  am65_cpsw_nuss_rx_cleanup, !!i);
3391
3392	k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
3393
3394	ret = am65_cpsw_nuss_register_devlink(common);
3395	if (ret)
3396		goto err_remove_rx;
3397
3398	for (i = 0; i < common->port_num; i++) {
3399		port = &common->ports[i];
3400
3401		if (!port->ndev)
3402			continue;
3403
3404		SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port);
3405
3406		ret = register_netdev(port->ndev);
3407		if (ret) {
3408			dev_err(dev, "error registering slave net device%i %d\n",
3409				i, ret);
3410			goto err_cleanup_ndev;
3411		}
3412	}
3413
3414	ret = am65_cpsw_register_notifiers(common);
3415	if (ret)
3416		goto err_cleanup_ndev;
3417
3418	/* can't auto unregister ndev using devm_add_action() due to
3419	 * devres release sequence in DD core for DMA
3420	 */
3421
3422	return 0;
3423
3424err_cleanup_ndev:
3425	am65_cpsw_nuss_cleanup_ndev(common);
3426	am65_cpsw_unregister_devlink(common);
3427err_remove_rx:
3428	am65_cpsw_nuss_remove_rx_chns(common);
3429err_remove_tx:
3430	am65_cpsw_nuss_remove_tx_chns(common);
3431
3432	return ret;
3433}
3434
3435int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
3436				     int num_tx, int num_rx)
3437{
3438	int ret;
3439
3440	am65_cpsw_nuss_remove_tx_chns(common);
3441	am65_cpsw_nuss_remove_rx_chns(common);
3442
3443	common->tx_ch_num = num_tx;
3444	common->rx_ch_num_flows = num_rx;
3445	ret = am65_cpsw_nuss_init_tx_chns(common);
3446	if (ret)
3447		return ret;
3448
3449	ret = am65_cpsw_nuss_init_rx_chns(common);
3450	if (ret)
3451		am65_cpsw_nuss_remove_tx_chns(common);
3452
3453	return ret;
3454}
3455
3456struct am65_cpsw_soc_pdata {
3457	u32	quirks_dis;
3458};
3459
3460static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
3461	.quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3462};
3463
3464static const struct soc_device_attribute am65_cpsw_socinfo[] = {
3465	{ .family = "AM65X",
3466	  .revision = "SR2.0",
3467	  .data = &am65x_soc_sr2_0
3468	},
3469	{/* sentinel */}
3470};
3471
3472static const struct am65_cpsw_pdata am65x_sr1_0 = {
3473	.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3474	.ale_dev_id = "am65x-cpsw2g",
3475	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3476};
3477
3478static const struct am65_cpsw_pdata j721e_pdata = {
3479	.quirks = 0,
3480	.ale_dev_id = "am65x-cpsw2g",
3481	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3482};
3483
3484static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
3485	.quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
3486	.ale_dev_id = "am64-cpswxg",
3487	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
3488};
3489
3490static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
3491	.quirks = 0,
3492	.ale_dev_id = "am64-cpswxg",
3493	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
3494	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
3495		       BIT(PHY_INTERFACE_MODE_USXGMII),
3496};
3497
3498static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
3499	.quirks = 0,
3500	.ale_dev_id = "am64-cpswxg",
3501	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3502	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
3503};
3504
3505static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
3506	.quirks = 0,
3507	.ale_dev_id = "am64-cpswxg",
3508	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3509	.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
3510		       BIT(PHY_INTERFACE_MODE_USXGMII),
3511};
3512
3513static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
3514	{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
3515	{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
3516	{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
3517	{ .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
3518	{ .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
3519	{ .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata},
3520	{ /* sentinel */ },
3521};
3522MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
3523
3524static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
3525{
3526	const struct soc_device_attribute *soc;
3527
3528	soc = soc_device_match(am65_cpsw_socinfo);
3529	if (soc && soc->data) {
3530		const struct am65_cpsw_soc_pdata *socdata = soc->data;
3531
3532		/* disable quirks */
3533		common->pdata.quirks &= ~socdata->quirks_dis;
3534	}
3535}
3536
3537static int am65_cpsw_nuss_probe(struct platform_device *pdev)
3538{
3539	struct cpsw_ale_params ale_params = { 0 };
3540	const struct of_device_id *of_id;
3541	struct device *dev = &pdev->dev;
3542	struct am65_cpsw_common *common;
3543	struct device_node *node;
3544	struct resource *res;
3545	struct clk *clk;
3546	int ale_entries;
3547	__be64 id_temp;
3548	int ret, i;
3549
3550	common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
3551	if (!common)
3552		return -ENOMEM;
3553	common->dev = dev;
3554
3555	of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
3556	if (!of_id)
3557		return -EINVAL;
3558	common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
3559
3560	am65_cpsw_nuss_apply_socinfo(common);
3561
3562	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
3563	common->ss_base = devm_ioremap_resource(&pdev->dev, res);
3564	if (IS_ERR(common->ss_base))
3565		return PTR_ERR(common->ss_base);
3566	common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
3567	/* Use device's physical base address as switch id */
3568	id_temp = cpu_to_be64(res->start);
3569	memcpy(common->switch_id, &id_temp, sizeof(res->start));
3570
3571	node = of_get_child_by_name(dev->of_node, "ethernet-ports");
3572	if (!node)
3573		return -ENOENT;
3574	common->port_num = of_get_child_count(node);
3575	of_node_put(node);
3576	if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
3577		return -ENOENT;
3578
3579	common->rx_flow_id_base = -1;
3580	init_completion(&common->tdown_complete);
3581	common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
3582	common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
3583	common->pf_p0_rx_ptype_rrobin = true;
3584	common->default_vlan = 1;
3585
3586	common->ports = devm_kcalloc(dev, common->port_num,
3587				     sizeof(*common->ports),
3588				     GFP_KERNEL);
3589	if (!common->ports)
3590		return -ENOMEM;
3591
3592	clk = devm_clk_get(dev, "fck");
3593	if (IS_ERR(clk))
3594		return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
3595	common->bus_freq = clk_get_rate(clk);
3596
3597	pm_runtime_enable(dev);
3598	ret = pm_runtime_resume_and_get(dev);
3599	if (ret < 0) {
3600		pm_runtime_disable(dev);
3601		return ret;
3602	}
3603
3604	node = of_get_child_by_name(dev->of_node, "mdio");
3605	if (!node) {
3606		dev_warn(dev, "MDIO node not found\n");
3607	} else if (of_device_is_available(node)) {
3608		struct platform_device *mdio_pdev;
3609
3610		mdio_pdev = of_platform_device_create(node, NULL, dev);
3611		if (!mdio_pdev) {
3612			ret = -ENODEV;
3613			goto err_pm_clear;
3614		}
3615
3616		common->mdio_dev =  &mdio_pdev->dev;
3617	}
3618	of_node_put(node);
3619
3620	am65_cpsw_nuss_get_ver(common);
3621
3622	ret = am65_cpsw_nuss_init_host_p(common);
3623	if (ret)
3624		goto err_of_clear;
3625
3626	ret = am65_cpsw_nuss_init_slave_ports(common);
3627	if (ret)
3628		goto err_of_clear;
3629
3630	/* init common data */
3631	ale_params.dev = dev;
3632	ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
3633	ale_params.ale_ports = common->port_num + 1;
3634	ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
3635	ale_params.dev_id = common->pdata.ale_dev_id;
3636	ale_params.bus_freq = common->bus_freq;
3637
3638	common->ale = cpsw_ale_create(&ale_params);
3639	if (IS_ERR(common->ale)) {
3640		dev_err(dev, "error initializing ale engine\n");
3641		ret = PTR_ERR(common->ale);
3642		goto err_of_clear;
3643	}
3644
3645	ale_entries = common->ale->params.ale_entries;
3646	common->ale_context = devm_kzalloc(dev,
3647					   ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
3648					   GFP_KERNEL);
3649	ret = am65_cpsw_init_cpts(common);
3650	if (ret)
3651		goto err_of_clear;
3652
3653	/* init ports */
3654	for (i = 0; i < common->port_num; i++)
3655		am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
3656
3657	dev_set_drvdata(dev, common);
3658
3659	common->is_emac_mode = true;
3660
3661	ret = am65_cpsw_nuss_init_ndevs(common);
3662	if (ret)
3663		goto err_ndevs_clear;
3664
3665	ret = am65_cpsw_nuss_register_ndevs(common);
3666	if (ret)
3667		goto err_ndevs_clear;
3668
3669	pm_runtime_put(dev);
3670	return 0;
3671
3672err_ndevs_clear:
3673	am65_cpsw_nuss_cleanup_ndev(common);
3674	am65_cpsw_nuss_phylink_cleanup(common);
3675	am65_cpts_release(common->cpts);
3676err_of_clear:
3677	if (common->mdio_dev)
3678		of_platform_device_destroy(common->mdio_dev, NULL);
3679err_pm_clear:
3680	pm_runtime_put_sync(dev);
3681	pm_runtime_disable(dev);
3682	return ret;
3683}
3684
3685static void am65_cpsw_nuss_remove(struct platform_device *pdev)
3686{
3687	struct device *dev = &pdev->dev;
3688	struct am65_cpsw_common *common;
3689	int ret;
3690
3691	common = dev_get_drvdata(dev);
3692
3693	ret = pm_runtime_resume_and_get(&pdev->dev);
3694	if (ret < 0) {
3695		/* Note, if this error path is taken, we're leaking some
3696		 * resources.
3697		 */
3698		dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
3699			ERR_PTR(ret));
3700		return;
3701	}
3702
3703	am65_cpsw_unregister_notifiers(common);
3704
3705	/* must unregister ndevs here because DD release_driver routine calls
3706	 * dma_deconfigure(dev) before devres_release_all(dev)
3707	 */
3708	am65_cpsw_nuss_cleanup_ndev(common);
3709	am65_cpsw_unregister_devlink(common);
3710	am65_cpsw_nuss_remove_rx_chns(common);
3711	am65_cpsw_nuss_remove_tx_chns(common);
3712	am65_cpsw_nuss_phylink_cleanup(common);
3713	am65_cpts_release(common->cpts);
3714	am65_cpsw_disable_serdes_phy(common);
3715
3716	if (common->mdio_dev)
3717		of_platform_device_destroy(common->mdio_dev, NULL);
3718
3719	pm_runtime_put_sync(&pdev->dev);
3720	pm_runtime_disable(&pdev->dev);
3721}
3722
3723static int am65_cpsw_nuss_suspend(struct device *dev)
3724{
3725	struct am65_cpsw_common *common = dev_get_drvdata(dev);
3726	struct am65_cpsw_host *host_p = am65_common_get_host(common);
3727	struct am65_cpsw_port *port;
3728	struct net_device *ndev;
3729	int i, ret;
3730
3731	cpsw_ale_dump(common->ale, common->ale_context);
3732	host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3733	for (i = 0; i < common->port_num; i++) {
3734		port = &common->ports[i];
3735		ndev = port->ndev;
3736
3737		if (!ndev)
3738			continue;
3739
3740		port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3741		netif_device_detach(ndev);
3742		if (netif_running(ndev)) {
3743			rtnl_lock();
3744			ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
3745			rtnl_unlock();
3746			if (ret < 0) {
3747				netdev_err(ndev, "failed to stop: %d", ret);
3748				return ret;
3749			}
3750		}
3751	}
3752
3753	am65_cpts_suspend(common->cpts);
3754
3755	am65_cpsw_nuss_remove_rx_chns(common);
3756	am65_cpsw_nuss_remove_tx_chns(common);
3757
3758	return 0;
3759}
3760
3761static int am65_cpsw_nuss_resume(struct device *dev)
3762{
3763	struct am65_cpsw_common *common = dev_get_drvdata(dev);
3764	struct am65_cpsw_host *host_p = am65_common_get_host(common);
3765	struct am65_cpsw_port *port;
3766	struct net_device *ndev;
3767	int i, ret;
3768
3769	ret = am65_cpsw_nuss_init_tx_chns(common);
3770	if (ret)
3771		return ret;
3772	ret = am65_cpsw_nuss_init_rx_chns(common);
3773	if (ret) {
3774		am65_cpsw_nuss_remove_tx_chns(common);
3775		return ret;
3776	}
3777
3778	/* If RX IRQ was disabled before suspend, keep it disabled */
3779	for (i = 0; i < common->rx_ch_num_flows; i++) {
3780		if (common->rx_chns.flows[i].irq_disabled)
3781			disable_irq(common->rx_chns.flows[i].irq);
3782	}
3783
3784	am65_cpts_resume(common->cpts);
3785
3786	for (i = 0; i < common->port_num; i++) {
3787		port = &common->ports[i];
3788		ndev = port->ndev;
3789
3790		if (!ndev)
3791			continue;
3792
3793		if (netif_running(ndev)) {
3794			rtnl_lock();
3795			ret = am65_cpsw_nuss_ndo_slave_open(ndev);
3796			rtnl_unlock();
3797			if (ret < 0) {
3798				netdev_err(ndev, "failed to start: %d", ret);
3799				return ret;
3800			}
3801		}
3802
3803		netif_device_attach(ndev);
3804		writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3805	}
3806
3807	writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3808	cpsw_ale_restore(common->ale, common->ale_context);
3809
3810	return 0;
3811}
3812
3813static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
3814	SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
3815};
3816
3817static struct platform_driver am65_cpsw_nuss_driver = {
3818	.driver = {
3819		.name	 = AM65_CPSW_DRV_NAME,
3820		.of_match_table = am65_cpsw_nuss_of_mtable,
3821		.pm = &am65_cpsw_nuss_dev_pm_ops,
3822	},
3823	.probe = am65_cpsw_nuss_probe,
3824	.remove = am65_cpsw_nuss_remove,
3825};
3826
3827module_platform_driver(am65_cpsw_nuss_driver);
3828
3829MODULE_LICENSE("GPL v2");
3830MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
3831MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");