Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2024 AIROHA Inc
   4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
   5 */
   6#include <linux/etherdevice.h>
   7#include <linux/iopoll.h>
   8#include <linux/kernel.h>
   9#include <linux/netdevice.h>
  10#include <linux/of.h>
  11#include <linux/of_net.h>
  12#include <linux/platform_device.h>
  13#include <linux/reset.h>
  14#include <linux/tcp.h>
  15#include <linux/u64_stats_sync.h>
  16#include <net/dsa.h>
  17#include <net/page_pool/helpers.h>
  18#include <uapi/linux/ppp_defs.h>
  19
  20#define AIROHA_MAX_NUM_GDM_PORTS	1
  21#define AIROHA_MAX_NUM_QDMA		2
  22#define AIROHA_MAX_NUM_RSTS		3
  23#define AIROHA_MAX_NUM_XSI_RSTS		5
  24#define AIROHA_MAX_MTU			2000
  25#define AIROHA_MAX_PACKET_SIZE		2048
  26#define AIROHA_NUM_TX_RING		32
  27#define AIROHA_NUM_RX_RING		32
  28#define AIROHA_FE_MC_MAX_VLAN_TABLE	64
  29#define AIROHA_FE_MC_MAX_VLAN_PORT	16
  30#define AIROHA_NUM_TX_IRQ		2
  31#define HW_DSCP_NUM			2048
  32#define IRQ_QUEUE_LEN(_n)		((_n) ? 1024 : 2048)
  33#define TX_DSCP_NUM			1024
  34#define RX_DSCP_NUM(_n)			\
  35	((_n) ==  2 ? 128 :		\
  36	 (_n) == 11 ? 128 :		\
  37	 (_n) == 15 ? 128 :		\
  38	 (_n) ==  0 ? 1024 : 16)
  39
  40#define PSE_RSV_PAGES			128
  41#define PSE_QUEUE_RSV_PAGES		64
  42
  43/* FE */
  44#define PSE_BASE			0x0100
  45#define CSR_IFC_BASE			0x0200
  46#define CDM1_BASE			0x0400
  47#define GDM1_BASE			0x0500
  48#define PPE1_BASE			0x0c00
  49
  50#define CDM2_BASE			0x1400
  51#define GDM2_BASE			0x1500
  52
  53#define GDM3_BASE			0x1100
  54#define GDM4_BASE			0x2500
  55
  56#define GDM_BASE(_n)			\
  57	((_n) == 4 ? GDM4_BASE :	\
  58	 (_n) == 3 ? GDM3_BASE :	\
  59	 (_n) == 2 ? GDM2_BASE : GDM1_BASE)
  60
  61#define REG_FE_DMA_GLO_CFG		0x0000
  62#define FE_DMA_GLO_L2_SPACE_MASK	GENMASK(7, 4)
  63#define FE_DMA_GLO_PG_SZ_MASK		BIT(3)
  64
  65#define REG_FE_RST_GLO_CFG		0x0004
  66#define FE_RST_GDM4_MBI_ARB_MASK	BIT(3)
  67#define FE_RST_GDM3_MBI_ARB_MASK	BIT(2)
  68#define FE_RST_CORE_MASK		BIT(0)
  69
  70#define REG_FE_WAN_MAC_H		0x0030
  71#define REG_FE_LAN_MAC_H		0x0040
  72
  73#define REG_FE_MAC_LMIN(_n)		((_n) + 0x04)
  74#define REG_FE_MAC_LMAX(_n)		((_n) + 0x08)
  75
  76#define REG_FE_CDM1_OQ_MAP0		0x0050
  77#define REG_FE_CDM1_OQ_MAP1		0x0054
  78#define REG_FE_CDM1_OQ_MAP2		0x0058
  79#define REG_FE_CDM1_OQ_MAP3		0x005c
  80
  81#define REG_FE_PCE_CFG			0x0070
  82#define PCE_DPI_EN_MASK			BIT(2)
  83#define PCE_KA_EN_MASK			BIT(1)
  84#define PCE_MC_EN_MASK			BIT(0)
  85
  86#define REG_FE_PSE_QUEUE_CFG_WR		0x0080
  87#define PSE_CFG_PORT_ID_MASK		GENMASK(27, 24)
  88#define PSE_CFG_QUEUE_ID_MASK		GENMASK(20, 16)
  89#define PSE_CFG_WR_EN_MASK		BIT(8)
  90#define PSE_CFG_OQRSV_SEL_MASK		BIT(0)
  91
  92#define REG_FE_PSE_QUEUE_CFG_VAL	0x0084
  93#define PSE_CFG_OQ_RSV_MASK		GENMASK(13, 0)
  94
  95#define PSE_FQ_CFG			0x008c
  96#define PSE_FQ_LIMIT_MASK		GENMASK(14, 0)
  97
  98#define REG_FE_PSE_BUF_SET		0x0090
  99#define PSE_SHARE_USED_LTHD_MASK	GENMASK(31, 16)
 100#define PSE_ALLRSV_MASK			GENMASK(14, 0)
 101
 102#define REG_PSE_SHARE_USED_THD		0x0094
 103#define PSE_SHARE_USED_MTHD_MASK	GENMASK(31, 16)
 104#define PSE_SHARE_USED_HTHD_MASK	GENMASK(15, 0)
 105
 106#define REG_GDM_MISC_CFG		0x0148
 107#define GDM2_RDM_ACK_WAIT_PREF_MASK	BIT(9)
 108#define GDM2_CHN_VLD_MODE_MASK		BIT(5)
 109
 110#define REG_FE_CSR_IFC_CFG		CSR_IFC_BASE
 111#define FE_IFC_EN_MASK			BIT(0)
 112
 113#define REG_FE_VIP_PORT_EN		0x01f0
 114#define REG_FE_IFC_PORT_EN		0x01f4
 115
 116#define REG_PSE_IQ_REV1			(PSE_BASE + 0x08)
 117#define PSE_IQ_RES1_P2_MASK		GENMASK(23, 16)
 118
 119#define REG_PSE_IQ_REV2			(PSE_BASE + 0x0c)
 120#define PSE_IQ_RES2_P5_MASK		GENMASK(15, 8)
 121#define PSE_IQ_RES2_P4_MASK		GENMASK(7, 0)
 122
 123#define REG_FE_VIP_EN(_n)		(0x0300 + ((_n) << 3))
 124#define PATN_FCPU_EN_MASK		BIT(7)
 125#define PATN_SWP_EN_MASK		BIT(6)
 126#define PATN_DP_EN_MASK			BIT(5)
 127#define PATN_SP_EN_MASK			BIT(4)
 128#define PATN_TYPE_MASK			GENMASK(3, 1)
 129#define PATN_EN_MASK			BIT(0)
 130
 131#define REG_FE_VIP_PATN(_n)		(0x0304 + ((_n) << 3))
 132#define PATN_DP_MASK			GENMASK(31, 16)
 133#define PATN_SP_MASK			GENMASK(15, 0)
 134
 135#define REG_CDM1_VLAN_CTRL		CDM1_BASE
 136#define CDM1_VLAN_MASK			GENMASK(31, 16)
 137
 138#define REG_CDM1_FWD_CFG		(CDM1_BASE + 0x08)
 139#define CDM1_VIP_QSEL_MASK		GENMASK(24, 20)
 140
 141#define REG_CDM1_CRSN_QSEL(_n)		(CDM1_BASE + 0x10 + ((_n) << 2))
 142#define CDM1_CRSN_QSEL_REASON_MASK(_n)	\
 143	GENMASK(4 + (((_n) % 4) << 3),	(((_n) % 4) << 3))
 144
 145#define REG_CDM2_FWD_CFG		(CDM2_BASE + 0x08)
 146#define CDM2_OAM_QSEL_MASK		GENMASK(31, 27)
 147#define CDM2_VIP_QSEL_MASK		GENMASK(24, 20)
 148
 149#define REG_CDM2_CRSN_QSEL(_n)		(CDM2_BASE + 0x10 + ((_n) << 2))
 150#define CDM2_CRSN_QSEL_REASON_MASK(_n)	\
 151	GENMASK(4 + (((_n) % 4) << 3),	(((_n) % 4) << 3))
 152
 153#define REG_GDM_FWD_CFG(_n)		GDM_BASE(_n)
 154#define GDM_DROP_CRC_ERR		BIT(23)
 155#define GDM_IP4_CKSUM			BIT(22)
 156#define GDM_TCP_CKSUM			BIT(21)
 157#define GDM_UDP_CKSUM			BIT(20)
 158#define GDM_UCFQ_MASK			GENMASK(15, 12)
 159#define GDM_BCFQ_MASK			GENMASK(11, 8)
 160#define GDM_MCFQ_MASK			GENMASK(7, 4)
 161#define GDM_OCFQ_MASK			GENMASK(3, 0)
 162
 163#define REG_GDM_INGRESS_CFG(_n)		(GDM_BASE(_n) + 0x10)
 164#define GDM_INGRESS_FC_EN_MASK		BIT(1)
 165#define GDM_STAG_EN_MASK		BIT(0)
 166
 167#define REG_GDM_LEN_CFG(_n)		(GDM_BASE(_n) + 0x14)
 168#define GDM_SHORT_LEN_MASK		GENMASK(13, 0)
 169#define GDM_LONG_LEN_MASK		GENMASK(29, 16)
 170
 171#define REG_FE_CPORT_CFG		(GDM1_BASE + 0x40)
 172#define FE_CPORT_PAD			BIT(26)
 173#define FE_CPORT_PORT_XFC_MASK		BIT(25)
 174#define FE_CPORT_QUEUE_XFC_MASK		BIT(24)
 175
 176#define REG_FE_GDM_MIB_CLEAR(_n)	(GDM_BASE(_n) + 0xf0)
 177#define FE_GDM_MIB_RX_CLEAR_MASK	BIT(1)
 178#define FE_GDM_MIB_TX_CLEAR_MASK	BIT(0)
 179
 180#define REG_FE_GDM1_MIB_CFG		(GDM1_BASE + 0xf4)
 181#define FE_STRICT_RFC2819_MODE_MASK	BIT(31)
 182#define FE_GDM1_TX_MIB_SPLIT_EN_MASK	BIT(17)
 183#define FE_GDM1_RX_MIB_SPLIT_EN_MASK	BIT(16)
 184#define FE_TX_MIB_ID_MASK		GENMASK(15, 8)
 185#define FE_RX_MIB_ID_MASK		GENMASK(7, 0)
 186
 187#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x104)
 188#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)		(GDM_BASE(_n) + 0x10c)
 189#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x110)
 190#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)	(GDM_BASE(_n) + 0x114)
 191#define REG_FE_GDM_TX_ETH_DROP_CNT(_n)		(GDM_BASE(_n) + 0x118)
 192#define REG_FE_GDM_TX_ETH_BC_CNT(_n)		(GDM_BASE(_n) + 0x11c)
 193#define REG_FE_GDM_TX_ETH_MC_CNT(_n)		(GDM_BASE(_n) + 0x120)
 194#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)		(GDM_BASE(_n) + 0x124)
 195#define REG_FE_GDM_TX_ETH_LONG_CNT(_n)		(GDM_BASE(_n) + 0x128)
 196#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)		(GDM_BASE(_n) + 0x12c)
 197#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)		(GDM_BASE(_n) + 0x130)
 198#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)	(GDM_BASE(_n) + 0x134)
 199#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)	(GDM_BASE(_n) + 0x138)
 200#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)	(GDM_BASE(_n) + 0x13c)
 201#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)	(GDM_BASE(_n) + 0x140)
 202
 203#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x148)
 204#define REG_FE_GDM_RX_FC_DROP_CNT(_n)		(GDM_BASE(_n) + 0x14c)
 205#define REG_FE_GDM_RX_RC_DROP_CNT(_n)		(GDM_BASE(_n) + 0x150)
 206#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)	(GDM_BASE(_n) + 0x154)
 207#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)	(GDM_BASE(_n) + 0x158)
 208#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)		(GDM_BASE(_n) + 0x15c)
 209#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x160)
 210#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)	(GDM_BASE(_n) + 0x164)
 211#define REG_FE_GDM_RX_ETH_DROP_CNT(_n)		(GDM_BASE(_n) + 0x168)
 212#define REG_FE_GDM_RX_ETH_BC_CNT(_n)		(GDM_BASE(_n) + 0x16c)
 213#define REG_FE_GDM_RX_ETH_MC_CNT(_n)		(GDM_BASE(_n) + 0x170)
 214#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)	(GDM_BASE(_n) + 0x174)
 215#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)		(GDM_BASE(_n) + 0x178)
 216#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)	(GDM_BASE(_n) + 0x17c)
 217#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)		(GDM_BASE(_n) + 0x180)
 218#define REG_FE_GDM_RX_ETH_LONG_CNT(_n)		(GDM_BASE(_n) + 0x184)
 219#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)		(GDM_BASE(_n) + 0x188)
 220#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)		(GDM_BASE(_n) + 0x18c)
 221#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)	(GDM_BASE(_n) + 0x190)
 222#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)	(GDM_BASE(_n) + 0x194)
 223#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)	(GDM_BASE(_n) + 0x198)
 224#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)	(GDM_BASE(_n) + 0x19c)
 225
 226#define REG_PPE1_TB_HASH_CFG		(PPE1_BASE + 0x250)
 227#define PPE1_SRAM_TABLE_EN_MASK		BIT(0)
 228#define PPE1_SRAM_HASH1_EN_MASK		BIT(8)
 229#define PPE1_DRAM_TABLE_EN_MASK		BIT(16)
 230#define PPE1_DRAM_HASH1_EN_MASK		BIT(24)
 231
 232#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x280)
 233#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)		(GDM_BASE(_n) + 0x284)
 234#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x288)
 235#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)	(GDM_BASE(_n) + 0x28c)
 236
 237#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x290)
 238#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)		(GDM_BASE(_n) + 0x294)
 239#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x298)
 240#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)	(GDM_BASE(_n) + 0x29c)
 241#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)		(GDM_BASE(_n) + 0x2b8)
 242#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)		(GDM_BASE(_n) + 0x2bc)
 243#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)	(GDM_BASE(_n) + 0x2c0)
 244#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)	(GDM_BASE(_n) + 0x2c4)
 245#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)	(GDM_BASE(_n) + 0x2c8)
 246#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)	(GDM_BASE(_n) + 0x2cc)
 247#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)		(GDM_BASE(_n) + 0x2e8)
 248#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)		(GDM_BASE(_n) + 0x2ec)
 249#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)	(GDM_BASE(_n) + 0x2f0)
 250#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)	(GDM_BASE(_n) + 0x2f4)
 251#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)	(GDM_BASE(_n) + 0x2f8)
 252#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)	(GDM_BASE(_n) + 0x2fc)
 253
 254#define REG_GDM2_CHN_RLS		(GDM2_BASE + 0x20)
 255#define MBI_RX_AGE_SEL_MASK		GENMASK(26, 25)
 256#define MBI_TX_AGE_SEL_MASK		GENMASK(18, 17)
 257
 258#define REG_GDM3_FWD_CFG		GDM3_BASE
 259#define GDM3_PAD_EN_MASK		BIT(28)
 260
 261#define REG_GDM4_FWD_CFG		GDM4_BASE
 262#define GDM4_PAD_EN_MASK		BIT(28)
 263#define GDM4_SPORT_OFFSET0_MASK		GENMASK(11, 8)
 264
 265#define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x23c)
 266#define GDM4_SPORT_OFF2_MASK		GENMASK(19, 16)
 267#define GDM4_SPORT_OFF1_MASK		GENMASK(15, 12)
 268#define GDM4_SPORT_OFF0_MASK		GENMASK(11, 8)
 269
 270#define REG_IP_FRAG_FP			0x2010
 271#define IP_ASSEMBLE_PORT_MASK		GENMASK(24, 21)
 272#define IP_ASSEMBLE_NBQ_MASK		GENMASK(20, 16)
 273#define IP_FRAGMENT_PORT_MASK		GENMASK(8, 5)
 274#define IP_FRAGMENT_NBQ_MASK		GENMASK(4, 0)
 275
 276#define REG_MC_VLAN_EN			0x2100
 277#define MC_VLAN_EN_MASK			BIT(0)
 278
 279#define REG_MC_VLAN_CFG			0x2104
 280#define MC_VLAN_CFG_CMD_DONE_MASK	BIT(31)
 281#define MC_VLAN_CFG_TABLE_ID_MASK	GENMASK(21, 16)
 282#define MC_VLAN_CFG_PORT_ID_MASK	GENMASK(11, 8)
 283#define MC_VLAN_CFG_TABLE_SEL_MASK	BIT(4)
 284#define MC_VLAN_CFG_RW_MASK		BIT(0)
 285
 286#define REG_MC_VLAN_DATA		0x2108
 287
 288#define REG_CDM5_RX_OQ1_DROP_CNT	0x29d4
 289
 290/* QDMA */
 291#define REG_QDMA_GLOBAL_CFG			0x0004
 292#define GLOBAL_CFG_RX_2B_OFFSET_MASK		BIT(31)
 293#define GLOBAL_CFG_DMA_PREFERENCE_MASK		GENMASK(30, 29)
 294#define GLOBAL_CFG_CPU_TXR_RR_MASK		BIT(28)
 295#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK		BIT(27)
 296#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK	BIT(26)
 297#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK	BIT(25)
 298#define GLOBAL_CFG_OAM_MODIFY_MASK		BIT(24)
 299#define GLOBAL_CFG_RESET_MASK			BIT(23)
 300#define GLOBAL_CFG_RESET_DONE_MASK		BIT(22)
 301#define GLOBAL_CFG_MULTICAST_EN_MASK		BIT(21)
 302#define GLOBAL_CFG_IRQ1_EN_MASK			BIT(20)
 303#define GLOBAL_CFG_IRQ0_EN_MASK			BIT(19)
 304#define GLOBAL_CFG_LOOPCNT_EN_MASK		BIT(18)
 305#define GLOBAL_CFG_RD_BYPASS_WR_MASK		BIT(17)
 306#define GLOBAL_CFG_QDMA_LOOPBACK_MASK		BIT(16)
 307#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK		GENMASK(13, 8)
 308#define GLOBAL_CFG_CHECK_DONE_MASK		BIT(7)
 309#define GLOBAL_CFG_TX_WB_DONE_MASK		BIT(6)
 310#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK		GENMASK(5, 4)
 311#define GLOBAL_CFG_RX_DMA_BUSY_MASK		BIT(3)
 312#define GLOBAL_CFG_RX_DMA_EN_MASK		BIT(2)
 313#define GLOBAL_CFG_TX_DMA_BUSY_MASK		BIT(1)
 314#define GLOBAL_CFG_TX_DMA_EN_MASK		BIT(0)
 315
 316#define REG_FWD_DSCP_BASE			0x0010
 317#define REG_FWD_BUF_BASE			0x0014
 318
 319#define REG_HW_FWD_DSCP_CFG			0x0018
 320#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK		GENMASK(29, 28)
 321#define HW_FWD_DSCP_SCATTER_LEN_MASK		GENMASK(17, 16)
 322#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK	GENMASK(15, 0)
 323
 324#define REG_INT_STATUS(_n)		\
 325	(((_n) == 4) ? 0x0730 :		\
 326	 ((_n) == 3) ? 0x0724 :		\
 327	 ((_n) == 2) ? 0x0720 :		\
 328	 ((_n) == 1) ? 0x0024 : 0x0020)
 329
 330#define REG_INT_ENABLE(_n)		\
 331	(((_n) == 4) ? 0x0750 :		\
 332	 ((_n) == 3) ? 0x0744 :		\
 333	 ((_n) == 2) ? 0x0740 :		\
 334	 ((_n) == 1) ? 0x002c : 0x0028)
 335
 336/* QDMA_CSR_INT_ENABLE1 */
 337#define RX15_COHERENT_INT_MASK		BIT(31)
 338#define RX14_COHERENT_INT_MASK		BIT(30)
 339#define RX13_COHERENT_INT_MASK		BIT(29)
 340#define RX12_COHERENT_INT_MASK		BIT(28)
 341#define RX11_COHERENT_INT_MASK		BIT(27)
 342#define RX10_COHERENT_INT_MASK		BIT(26)
 343#define RX9_COHERENT_INT_MASK		BIT(25)
 344#define RX8_COHERENT_INT_MASK		BIT(24)
 345#define RX7_COHERENT_INT_MASK		BIT(23)
 346#define RX6_COHERENT_INT_MASK		BIT(22)
 347#define RX5_COHERENT_INT_MASK		BIT(21)
 348#define RX4_COHERENT_INT_MASK		BIT(20)
 349#define RX3_COHERENT_INT_MASK		BIT(19)
 350#define RX2_COHERENT_INT_MASK		BIT(18)
 351#define RX1_COHERENT_INT_MASK		BIT(17)
 352#define RX0_COHERENT_INT_MASK		BIT(16)
 353#define TX7_COHERENT_INT_MASK		BIT(15)
 354#define TX6_COHERENT_INT_MASK		BIT(14)
 355#define TX5_COHERENT_INT_MASK		BIT(13)
 356#define TX4_COHERENT_INT_MASK		BIT(12)
 357#define TX3_COHERENT_INT_MASK		BIT(11)
 358#define TX2_COHERENT_INT_MASK		BIT(10)
 359#define TX1_COHERENT_INT_MASK		BIT(9)
 360#define TX0_COHERENT_INT_MASK		BIT(8)
 361#define CNT_OVER_FLOW_INT_MASK		BIT(7)
 362#define IRQ1_FULL_INT_MASK		BIT(5)
 363#define IRQ1_INT_MASK			BIT(4)
 364#define HWFWD_DSCP_LOW_INT_MASK		BIT(3)
 365#define HWFWD_DSCP_EMPTY_INT_MASK	BIT(2)
 366#define IRQ0_FULL_INT_MASK		BIT(1)
 367#define IRQ0_INT_MASK			BIT(0)
 368
 369#define TX_DONE_INT_MASK(_n)					\
 370	((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK		\
 371	      : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
 372
 373#define INT_TX_MASK						\
 374	(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |			\
 375	 IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
 376
 377#define INT_IDX0_MASK						\
 378	(TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |	\
 379	 TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |	\
 380	 TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |	\
 381	 TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |	\
 382	 RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |	\
 383	 RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |	\
 384	 RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |	\
 385	 RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |	\
 386	 RX15_COHERENT_INT_MASK | INT_TX_MASK)
 387
 388/* QDMA_CSR_INT_ENABLE2 */
 389#define RX15_NO_CPU_DSCP_INT_MASK	BIT(31)
 390#define RX14_NO_CPU_DSCP_INT_MASK	BIT(30)
 391#define RX13_NO_CPU_DSCP_INT_MASK	BIT(29)
 392#define RX12_NO_CPU_DSCP_INT_MASK	BIT(28)
 393#define RX11_NO_CPU_DSCP_INT_MASK	BIT(27)
 394#define RX10_NO_CPU_DSCP_INT_MASK	BIT(26)
 395#define RX9_NO_CPU_DSCP_INT_MASK	BIT(25)
 396#define RX8_NO_CPU_DSCP_INT_MASK	BIT(24)
 397#define RX7_NO_CPU_DSCP_INT_MASK	BIT(23)
 398#define RX6_NO_CPU_DSCP_INT_MASK	BIT(22)
 399#define RX5_NO_CPU_DSCP_INT_MASK	BIT(21)
 400#define RX4_NO_CPU_DSCP_INT_MASK	BIT(20)
 401#define RX3_NO_CPU_DSCP_INT_MASK	BIT(19)
 402#define RX2_NO_CPU_DSCP_INT_MASK	BIT(18)
 403#define RX1_NO_CPU_DSCP_INT_MASK	BIT(17)
 404#define RX0_NO_CPU_DSCP_INT_MASK	BIT(16)
 405#define RX15_DONE_INT_MASK		BIT(15)
 406#define RX14_DONE_INT_MASK		BIT(14)
 407#define RX13_DONE_INT_MASK		BIT(13)
 408#define RX12_DONE_INT_MASK		BIT(12)
 409#define RX11_DONE_INT_MASK		BIT(11)
 410#define RX10_DONE_INT_MASK		BIT(10)
 411#define RX9_DONE_INT_MASK		BIT(9)
 412#define RX8_DONE_INT_MASK		BIT(8)
 413#define RX7_DONE_INT_MASK		BIT(7)
 414#define RX6_DONE_INT_MASK		BIT(6)
 415#define RX5_DONE_INT_MASK		BIT(5)
 416#define RX4_DONE_INT_MASK		BIT(4)
 417#define RX3_DONE_INT_MASK		BIT(3)
 418#define RX2_DONE_INT_MASK		BIT(2)
 419#define RX1_DONE_INT_MASK		BIT(1)
 420#define RX0_DONE_INT_MASK		BIT(0)
 421
 422#define RX_DONE_INT_MASK					\
 423	(RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |		\
 424	 RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |		\
 425	 RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |		\
 426	 RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |		\
 427	 RX15_DONE_INT_MASK)
 428#define INT_IDX1_MASK						\
 429	(RX_DONE_INT_MASK |					\
 430	 RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |	\
 431	 RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |	\
 432	 RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |	\
 433	 RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |	\
 434	 RX15_NO_CPU_DSCP_INT_MASK)
 435
 436/* QDMA_CSR_INT_ENABLE5 */
 437#define TX31_COHERENT_INT_MASK		BIT(31)
 438#define TX30_COHERENT_INT_MASK		BIT(30)
 439#define TX29_COHERENT_INT_MASK		BIT(29)
 440#define TX28_COHERENT_INT_MASK		BIT(28)
 441#define TX27_COHERENT_INT_MASK		BIT(27)
 442#define TX26_COHERENT_INT_MASK		BIT(26)
 443#define TX25_COHERENT_INT_MASK		BIT(25)
 444#define TX24_COHERENT_INT_MASK		BIT(24)
 445#define TX23_COHERENT_INT_MASK		BIT(23)
 446#define TX22_COHERENT_INT_MASK		BIT(22)
 447#define TX21_COHERENT_INT_MASK		BIT(21)
 448#define TX20_COHERENT_INT_MASK		BIT(20)
 449#define TX19_COHERENT_INT_MASK		BIT(19)
 450#define TX18_COHERENT_INT_MASK		BIT(18)
 451#define TX17_COHERENT_INT_MASK		BIT(17)
 452#define TX16_COHERENT_INT_MASK		BIT(16)
 453#define TX15_COHERENT_INT_MASK		BIT(15)
 454#define TX14_COHERENT_INT_MASK		BIT(14)
 455#define TX13_COHERENT_INT_MASK		BIT(13)
 456#define TX12_COHERENT_INT_MASK		BIT(12)
 457#define TX11_COHERENT_INT_MASK		BIT(11)
 458#define TX10_COHERENT_INT_MASK		BIT(10)
 459#define TX9_COHERENT_INT_MASK		BIT(9)
 460#define TX8_COHERENT_INT_MASK		BIT(8)
 461
 462#define INT_IDX4_MASK						\
 463	(TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |	\
 464	 TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |	\
 465	 TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |	\
 466	 TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |	\
 467	 TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |	\
 468	 TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |	\
 469	 TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |	\
 470	 TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |	\
 471	 TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |	\
 472	 TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |	\
 473	 TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |	\
 474	 TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
 475
 476#define REG_TX_IRQ_BASE(_n)		((_n) ? 0x0048 : 0x0050)
 477
 478#define REG_TX_IRQ_CFG(_n)		((_n) ? 0x004c : 0x0054)
 479#define TX_IRQ_THR_MASK			GENMASK(27, 16)
 480#define TX_IRQ_DEPTH_MASK		GENMASK(11, 0)
 481
 482#define REG_IRQ_CLEAR_LEN(_n)		((_n) ? 0x0064 : 0x0058)
 483#define IRQ_CLEAR_LEN_MASK		GENMASK(7, 0)
 484
 485#define REG_IRQ_STATUS(_n)		((_n) ? 0x0068 : 0x005c)
 486#define IRQ_ENTRY_LEN_MASK		GENMASK(27, 16)
 487#define IRQ_HEAD_IDX_MASK		GENMASK(11, 0)
 488
 489#define REG_TX_RING_BASE(_n)	\
 490	(((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
 491
 492#define REG_TX_RING_BLOCKING(_n)	\
 493	(((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
 494
 495#define TX_RING_IRQ_BLOCKING_MAP_MASK			BIT(6)
 496#define TX_RING_IRQ_BLOCKING_CFG_MASK			BIT(4)
 497#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK		BIT(2)
 498#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK	BIT(1)
 499#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK	BIT(0)
 500
 501#define REG_TX_CPU_IDX(_n)	\
 502	(((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
 503
 504#define TX_RING_CPU_IDX_MASK		GENMASK(15, 0)
 505
 506#define REG_TX_DMA_IDX(_n)	\
 507	(((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
 508
 509#define TX_RING_DMA_IDX_MASK		GENMASK(15, 0)
 510
 511#define IRQ_RING_IDX_MASK		GENMASK(20, 16)
 512#define IRQ_DESC_IDX_MASK		GENMASK(15, 0)
 513
 514#define REG_RX_RING_BASE(_n)	\
 515	(((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
 516
 517#define REG_RX_RING_SIZE(_n)	\
 518	(((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
 519
 520#define RX_RING_THR_MASK		GENMASK(31, 16)
 521#define RX_RING_SIZE_MASK		GENMASK(15, 0)
 522
 523#define REG_RX_CPU_IDX(_n)	\
 524	(((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
 525
 526#define RX_RING_CPU_IDX_MASK		GENMASK(15, 0)
 527
 528#define REG_RX_DMA_IDX(_n)	\
 529	(((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
 530
 531#define REG_RX_DELAY_INT_IDX(_n)	\
 532	(((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
 533
 534#define RX_DELAY_INT_MASK		GENMASK(15, 0)
 535
 536#define RX_RING_DMA_IDX_MASK		GENMASK(15, 0)
 537
 538#define REG_INGRESS_TRTCM_CFG		0x0070
 539#define INGRESS_TRTCM_EN_MASK		BIT(31)
 540#define INGRESS_TRTCM_MODE_MASK		BIT(30)
 541#define INGRESS_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
 542#define INGRESS_FAST_TICK_MASK		GENMASK(15, 0)
 543
 544#define REG_TXQ_DIS_CFG_BASE(_n)	((_n) ? 0x20a0 : 0x00a0)
 545#define REG_TXQ_DIS_CFG(_n, _m)		(REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
 546
 547#define REG_LMGR_INIT_CFG		0x1000
 548#define LMGR_INIT_START			BIT(31)
 549#define LMGR_SRAM_MODE_MASK		BIT(30)
 550#define HW_FWD_PKTSIZE_OVERHEAD_MASK	GENMASK(27, 20)
 551#define HW_FWD_DESC_NUM_MASK		GENMASK(16, 0)
 552
 553#define REG_FWD_DSCP_LOW_THR		0x1004
 554#define FWD_DSCP_LOW_THR_MASK		GENMASK(17, 0)
 555
 556#define REG_EGRESS_RATE_METER_CFG		0x100c
 557#define EGRESS_RATE_METER_EN_MASK		BIT(31)
 558#define EGRESS_RATE_METER_EQ_RATE_EN_MASK	BIT(17)
 559#define EGRESS_RATE_METER_WINDOW_SZ_MASK	GENMASK(16, 12)
 560#define EGRESS_RATE_METER_TIMESLICE_MASK	GENMASK(10, 0)
 561
 562#define REG_EGRESS_TRTCM_CFG		0x1010
 563#define EGRESS_TRTCM_EN_MASK		BIT(31)
 564#define EGRESS_TRTCM_MODE_MASK		BIT(30)
 565#define EGRESS_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
 566#define EGRESS_FAST_TICK_MASK		GENMASK(15, 0)
 567
 568#define REG_TXWRR_MODE_CFG		0x1020
 569#define TWRR_WEIGHT_SCALE_MASK		BIT(31)
 570#define TWRR_WEIGHT_BASE_MASK		BIT(3)
 571
 572#define REG_PSE_BUF_USAGE_CFG		0x1028
 573#define PSE_BUF_ESTIMATE_EN_MASK	BIT(29)
 574
 575#define REG_GLB_TRTCM_CFG		0x1080
 576#define GLB_TRTCM_EN_MASK		BIT(31)
 577#define GLB_TRTCM_MODE_MASK		BIT(30)
 578#define GLB_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
 579#define GLB_FAST_TICK_MASK		GENMASK(15, 0)
 580
 581#define REG_TXQ_CNGST_CFG		0x10a0
 582#define TXQ_CNGST_DROP_EN		BIT(31)
 583#define TXQ_CNGST_DEI_DROP_EN		BIT(30)
 584
 585#define REG_SLA_TRTCM_CFG		0x1150
 586#define SLA_TRTCM_EN_MASK		BIT(31)
 587#define SLA_TRTCM_MODE_MASK		BIT(30)
 588#define SLA_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
 589#define SLA_FAST_TICK_MASK		GENMASK(15, 0)
 590
 591/* CTRL */
 592#define QDMA_DESC_DONE_MASK		BIT(31)
 593#define QDMA_DESC_DROP_MASK		BIT(30) /* tx: drop - rx: overflow */
 594#define QDMA_DESC_MORE_MASK		BIT(29) /* more SG elements */
 595#define QDMA_DESC_DEI_MASK		BIT(25)
 596#define QDMA_DESC_NO_DROP_MASK		BIT(24)
 597#define QDMA_DESC_LEN_MASK		GENMASK(15, 0)
 598/* DATA */
 599#define QDMA_DESC_NEXT_ID_MASK		GENMASK(15, 0)
 600/* TX MSG0 */
 601#define QDMA_ETH_TXMSG_MIC_IDX_MASK	BIT(30)
 602#define QDMA_ETH_TXMSG_SP_TAG_MASK	GENMASK(29, 14)
 603#define QDMA_ETH_TXMSG_ICO_MASK		BIT(13)
 604#define QDMA_ETH_TXMSG_UCO_MASK		BIT(12)
 605#define QDMA_ETH_TXMSG_TCO_MASK		BIT(11)
 606#define QDMA_ETH_TXMSG_TSO_MASK		BIT(10)
 607#define QDMA_ETH_TXMSG_FAST_MASK	BIT(9)
 608#define QDMA_ETH_TXMSG_OAM_MASK		BIT(8)
 609#define QDMA_ETH_TXMSG_CHAN_MASK	GENMASK(7, 3)
 610#define QDMA_ETH_TXMSG_QUEUE_MASK	GENMASK(2, 0)
 611/* TX MSG1 */
 612#define QDMA_ETH_TXMSG_NO_DROP		BIT(31)
 613#define QDMA_ETH_TXMSG_METER_MASK	GENMASK(30, 24)	/* 0x7f no meters */
 614#define QDMA_ETH_TXMSG_FPORT_MASK	GENMASK(23, 20)
 615#define QDMA_ETH_TXMSG_NBOQ_MASK	GENMASK(19, 15)
 616#define QDMA_ETH_TXMSG_HWF_MASK		BIT(14)
 617#define QDMA_ETH_TXMSG_HOP_MASK		BIT(13)
 618#define QDMA_ETH_TXMSG_PTP_MASK		BIT(12)
 619#define QDMA_ETH_TXMSG_ACNT_G1_MASK	GENMASK(10, 6)	/* 0x1f do not count */
 620#define QDMA_ETH_TXMSG_ACNT_G0_MASK	GENMASK(5, 0)	/* 0x3f do not count */
 621
 622/* RX MSG1 */
 623#define QDMA_ETH_RXMSG_DEI_MASK		BIT(31)
 624#define QDMA_ETH_RXMSG_IP6_MASK		BIT(30)
 625#define QDMA_ETH_RXMSG_IP4_MASK		BIT(29)
 626#define QDMA_ETH_RXMSG_IP4F_MASK	BIT(28)
 627#define QDMA_ETH_RXMSG_L4_VALID_MASK	BIT(27)
 628#define QDMA_ETH_RXMSG_L4F_MASK		BIT(26)
 629#define QDMA_ETH_RXMSG_SPORT_MASK	GENMASK(25, 21)
 630#define QDMA_ETH_RXMSG_CRSN_MASK	GENMASK(20, 16)
 631#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK	GENMASK(15, 0)
 632
 633struct airoha_qdma_desc {
 634	__le32 rsv;
 635	__le32 ctrl;
 636	__le32 addr;
 637	__le32 data;
 638	__le32 msg0;
 639	__le32 msg1;
 640	__le32 msg2;
 641	__le32 msg3;
 642};
 643
 644/* CTRL0 */
 645#define QDMA_FWD_DESC_CTX_MASK		BIT(31)
 646#define QDMA_FWD_DESC_RING_MASK		GENMASK(30, 28)
 647#define QDMA_FWD_DESC_IDX_MASK		GENMASK(27, 16)
 648#define QDMA_FWD_DESC_LEN_MASK		GENMASK(15, 0)
 649/* CTRL1 */
 650#define QDMA_FWD_DESC_FIRST_IDX_MASK	GENMASK(15, 0)
 651/* CTRL2 */
 652#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK	GENMASK(2, 0)
 653
 654struct airoha_qdma_fwd_desc {
 655	__le32 addr;
 656	__le32 ctrl0;
 657	__le32 ctrl1;
 658	__le32 ctrl2;
 659	__le32 msg0;
 660	__le32 msg1;
 661	__le32 rsv0;
 662	__le32 rsv1;
 663};
 664
 665enum {
 666	QDMA_INT_REG_IDX0,
 667	QDMA_INT_REG_IDX1,
 668	QDMA_INT_REG_IDX2,
 669	QDMA_INT_REG_IDX3,
 670	QDMA_INT_REG_IDX4,
 671	QDMA_INT_REG_MAX
 672};
 673
 674enum {
 675	XSI_PCIE0_PORT,
 676	XSI_PCIE1_PORT,
 677	XSI_USB_PORT,
 678	XSI_AE_PORT,
 679	XSI_ETH_PORT,
 680};
 681
 682enum {
 683	XSI_PCIE0_VIP_PORT_MASK	= BIT(22),
 684	XSI_PCIE1_VIP_PORT_MASK	= BIT(23),
 685	XSI_USB_VIP_PORT_MASK	= BIT(25),
 686	XSI_ETH_VIP_PORT_MASK	= BIT(24),
 687};
 688
 689enum {
 690	DEV_STATE_INITIALIZED,
 691};
 692
 693enum {
 694	CDM_CRSN_QSEL_Q1 = 1,
 695	CDM_CRSN_QSEL_Q5 = 5,
 696	CDM_CRSN_QSEL_Q6 = 6,
 697	CDM_CRSN_QSEL_Q15 = 15,
 698};
 699
 700enum {
 701	CRSN_08 = 0x8,
 702	CRSN_21 = 0x15, /* KA */
 703	CRSN_22 = 0x16, /* hit bind and force route to CPU */
 704	CRSN_24 = 0x18,
 705	CRSN_25 = 0x19,
 706};
 707
 708enum {
 709	FE_PSE_PORT_CDM1,
 710	FE_PSE_PORT_GDM1,
 711	FE_PSE_PORT_GDM2,
 712	FE_PSE_PORT_GDM3,
 713	FE_PSE_PORT_PPE1,
 714	FE_PSE_PORT_CDM2,
 715	FE_PSE_PORT_CDM3,
 716	FE_PSE_PORT_CDM4,
 717	FE_PSE_PORT_PPE2,
 718	FE_PSE_PORT_GDM4,
 719	FE_PSE_PORT_CDM5,
 720	FE_PSE_PORT_DROP = 0xf,
 721};
 722
 723struct airoha_queue_entry {
 724	union {
 725		void *buf;
 726		struct sk_buff *skb;
 727	};
 728	dma_addr_t dma_addr;
 729	u16 dma_len;
 730};
 731
 732struct airoha_queue {
 733	struct airoha_qdma *qdma;
 734
 735	/* protect concurrent queue accesses */
 736	spinlock_t lock;
 737	struct airoha_queue_entry *entry;
 738	struct airoha_qdma_desc *desc;
 739	u16 head;
 740	u16 tail;
 741
 742	int queued;
 743	int ndesc;
 744	int free_thr;
 745	int buf_size;
 746
 747	struct napi_struct napi;
 748	struct page_pool *page_pool;
 749};
 750
 751struct airoha_tx_irq_queue {
 752	struct airoha_qdma *qdma;
 753
 754	struct napi_struct napi;
 755
 756	int size;
 757	u32 *q;
 758};
 759
 760struct airoha_hw_stats {
 761	/* protect concurrent hw_stats accesses */
 762	spinlock_t lock;
 763	struct u64_stats_sync syncp;
 764
 765	/* get_stats64 */
 766	u64 rx_ok_pkts;
 767	u64 tx_ok_pkts;
 768	u64 rx_ok_bytes;
 769	u64 tx_ok_bytes;
 770	u64 rx_multicast;
 771	u64 rx_errors;
 772	u64 rx_drops;
 773	u64 tx_drops;
 774	u64 rx_crc_error;
 775	u64 rx_over_errors;
 776	/* ethtool stats */
 777	u64 tx_broadcast;
 778	u64 tx_multicast;
 779	u64 tx_len[7];
 780	u64 rx_broadcast;
 781	u64 rx_fragment;
 782	u64 rx_jabber;
 783	u64 rx_len[7];
 784};
 785
 786struct airoha_qdma {
 787	struct airoha_eth *eth;
 788	void __iomem *regs;
 789
 790	/* protect concurrent irqmask accesses */
 791	spinlock_t irq_lock;
 792	u32 irqmask[QDMA_INT_REG_MAX];
 793	int irq;
 794
 795	struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
 796
 797	struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
 798	struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
 799
 800	/* descriptor and packet buffers for qdma hw forward */
 801	struct {
 802		void *desc;
 803		void *q;
 804	} hfwd;
 805};
 806
 807struct airoha_gdm_port {
 808	struct airoha_qdma *qdma;
 809	struct net_device *dev;
 810	int id;
 811
 812	struct airoha_hw_stats stats;
 813};
 814
 815struct airoha_eth {
 816	struct device *dev;
 817
 818	unsigned long state;
 819	void __iomem *fe_regs;
 820
 821	struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
 822	struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
 823
 824	struct net_device *napi_dev;
 825
 826	struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
 827	struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
 828};
 829
 830static u32 airoha_rr(void __iomem *base, u32 offset)
 831{
 832	return readl(base + offset);
 833}
 834
 835static void airoha_wr(void __iomem *base, u32 offset, u32 val)
 836{
 837	writel(val, base + offset);
 838}
 839
 840static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
 841{
 842	val |= (airoha_rr(base, offset) & ~mask);
 843	airoha_wr(base, offset, val);
 844
 845	return val;
 846}
 847
 848#define airoha_fe_rr(eth, offset)				\
 849	airoha_rr((eth)->fe_regs, (offset))
 850#define airoha_fe_wr(eth, offset, val)				\
 851	airoha_wr((eth)->fe_regs, (offset), (val))
 852#define airoha_fe_rmw(eth, offset, mask, val)			\
 853	airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
 854#define airoha_fe_set(eth, offset, val)				\
 855	airoha_rmw((eth)->fe_regs, (offset), 0, (val))
 856#define airoha_fe_clear(eth, offset, val)			\
 857	airoha_rmw((eth)->fe_regs, (offset), (val), 0)
 858
 859#define airoha_qdma_rr(qdma, offset)				\
 860	airoha_rr((qdma)->regs, (offset))
 861#define airoha_qdma_wr(qdma, offset, val)			\
 862	airoha_wr((qdma)->regs, (offset), (val))
 863#define airoha_qdma_rmw(qdma, offset, mask, val)		\
 864	airoha_rmw((qdma)->regs, (offset), (mask), (val))
 865#define airoha_qdma_set(qdma, offset, val)			\
 866	airoha_rmw((qdma)->regs, (offset), 0, (val))
 867#define airoha_qdma_clear(qdma, offset, val)			\
 868	airoha_rmw((qdma)->regs, (offset), (val), 0)
 869
 870static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
 871				    u32 clear, u32 set)
 872{
 873	unsigned long flags;
 874
 875	if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
 876		return;
 877
 878	spin_lock_irqsave(&qdma->irq_lock, flags);
 879
 880	qdma->irqmask[index] &= ~clear;
 881	qdma->irqmask[index] |= set;
 882	airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
 883	/* Read irq_enable register in order to guarantee the update above
 884	 * completes in the spinlock critical section.
 885	 */
 886	airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
 887
 888	spin_unlock_irqrestore(&qdma->irq_lock, flags);
 889}
 890
 891static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
 892				   u32 mask)
 893{
 894	airoha_qdma_set_irqmask(qdma, index, 0, mask);
 895}
 896
 897static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
 898				    u32 mask)
 899{
 900	airoha_qdma_set_irqmask(qdma, index, mask, 0);
 901}
 902
 903static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
 904{
 905	/* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
 906	 * GDM{2,3,4} can be used as wan port connected to an external
 907	 * phy module.
 908	 */
 909	return port->id == 1;
 910}
 911
 912static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
 913{
 914	struct airoha_eth *eth = port->qdma->eth;
 915	u32 val, reg;
 916
 917	reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
 918					   : REG_FE_WAN_MAC_H;
 919	val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
 920	airoha_fe_wr(eth, reg, val);
 921
 922	val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
 923	airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
 924	airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
 925}
 926
 927static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
 928					u32 val)
 929{
 930	airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
 931		      FIELD_PREP(GDM_OCFQ_MASK, val));
 932	airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
 933		      FIELD_PREP(GDM_MCFQ_MASK, val));
 934	airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
 935		      FIELD_PREP(GDM_BCFQ_MASK, val));
 936	airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
 937		      FIELD_PREP(GDM_UCFQ_MASK, val));
 938}
 939
 940static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
 941{
 942	u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
 943	u32 vip_port, cfg_addr;
 944
 945	switch (port) {
 946	case XSI_PCIE0_PORT:
 947		vip_port = XSI_PCIE0_VIP_PORT_MASK;
 948		cfg_addr = REG_GDM_FWD_CFG(3);
 949		break;
 950	case XSI_PCIE1_PORT:
 951		vip_port = XSI_PCIE1_VIP_PORT_MASK;
 952		cfg_addr = REG_GDM_FWD_CFG(3);
 953		break;
 954	case XSI_USB_PORT:
 955		vip_port = XSI_USB_VIP_PORT_MASK;
 956		cfg_addr = REG_GDM_FWD_CFG(4);
 957		break;
 958	case XSI_ETH_PORT:
 959		vip_port = XSI_ETH_VIP_PORT_MASK;
 960		cfg_addr = REG_GDM_FWD_CFG(4);
 961		break;
 962	default:
 963		return -EINVAL;
 964	}
 965
 966	if (enable) {
 967		airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
 968		airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
 969	} else {
 970		airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
 971		airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
 972	}
 973
 974	airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
 975
 976	return 0;
 977}
 978
 979static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
 980{
 981	const int port_list[] = {
 982		XSI_PCIE0_PORT,
 983		XSI_PCIE1_PORT,
 984		XSI_USB_PORT,
 985		XSI_ETH_PORT
 986	};
 987	int i, err;
 988
 989	for (i = 0; i < ARRAY_SIZE(port_list); i++) {
 990		err = airoha_set_gdm_port(eth, port_list[i], enable);
 991		if (err)
 992			goto error;
 993	}
 994
 995	return 0;
 996
 997error:
 998	for (i--; i >= 0; i--)
 999		airoha_set_gdm_port(eth, port_list[i], false);
1000
1001	return err;
1002}
1003
1004static void airoha_fe_maccr_init(struct airoha_eth *eth)
1005{
1006	int p;
1007
1008	for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
1009		airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
1010			      GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
1011			      GDM_DROP_CRC_ERR);
1012		airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
1013					    FE_PSE_PORT_CDM1);
1014		airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
1015			      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1016			      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1017			      FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
1018	}
1019
1020	airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
1021		      FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
1022
1023	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
1024}
1025
1026static void airoha_fe_vip_setup(struct airoha_eth *eth)
1027{
1028	airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
1029	airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
1030
1031	airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
1032	airoha_fe_wr(eth, REG_FE_VIP_EN(4),
1033		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1034		     PATN_EN_MASK);
1035
1036	airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
1037	airoha_fe_wr(eth, REG_FE_VIP_EN(6),
1038		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1039		     PATN_EN_MASK);
1040
1041	airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
1042	airoha_fe_wr(eth, REG_FE_VIP_EN(7),
1043		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1044		     PATN_EN_MASK);
1045
1046	/* BOOTP (0x43) */
1047	airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
1048	airoha_fe_wr(eth, REG_FE_VIP_EN(8),
1049		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1050		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1051
1052	/* BOOTP (0x44) */
1053	airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
1054	airoha_fe_wr(eth, REG_FE_VIP_EN(9),
1055		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1056		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1057
1058	/* ISAKMP */
1059	airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
1060	airoha_fe_wr(eth, REG_FE_VIP_EN(10),
1061		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1062		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1063
1064	airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
1065	airoha_fe_wr(eth, REG_FE_VIP_EN(11),
1066		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1067		     PATN_EN_MASK);
1068
1069	/* DHCPv6 */
1070	airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
1071	airoha_fe_wr(eth, REG_FE_VIP_EN(12),
1072		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1073		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1074
1075	airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
1076	airoha_fe_wr(eth, REG_FE_VIP_EN(19),
1077		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1078		     PATN_EN_MASK);
1079
1080	/* ETH->ETH_P_1905 (0x893a) */
1081	airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
1082	airoha_fe_wr(eth, REG_FE_VIP_EN(20),
1083		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
1084
1085	airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
1086	airoha_fe_wr(eth, REG_FE_VIP_EN(21),
1087		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
1088}
1089
1090static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
1091					     u32 port, u32 queue)
1092{
1093	u32 val;
1094
1095	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1096		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
1097		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1098		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
1099	val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
1100
1101	return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
1102}
1103
1104static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
1105					      u32 port, u32 queue, u32 val)
1106{
1107	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
1108		      FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
1109	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1110		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
1111		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
1112		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1113		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
1114		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
1115}
1116
1117static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
1118{
1119	u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
1120
1121	return FIELD_GET(PSE_ALLRSV_MASK, val);
1122}
1123
1124static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
1125				    u32 port, u32 queue, u32 val)
1126{
1127	u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
1128	u32 tmp, all_rsv, fq_limit;
1129
1130	airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
1131
1132	/* modify all rsv */
1133	all_rsv = airoha_fe_get_pse_all_rsv(eth);
1134	all_rsv += (val - orig_val);
1135	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
1136		      FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
1137
1138	/* modify hthd */
1139	tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
1140	fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
1141	tmp = fq_limit - all_rsv - 0x20;
1142	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1143		      PSE_SHARE_USED_HTHD_MASK,
1144		      FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
1145
1146	tmp = fq_limit - all_rsv - 0x100;
1147	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1148		      PSE_SHARE_USED_MTHD_MASK,
1149		      FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
1150	tmp = (3 * tmp) >> 2;
1151	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
1152		      PSE_SHARE_USED_LTHD_MASK,
1153		      FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
1154
1155	return 0;
1156}
1157
1158static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
1159{
1160	const u32 pse_port_num_queues[] = {
1161		[FE_PSE_PORT_CDM1] = 6,
1162		[FE_PSE_PORT_GDM1] = 6,
1163		[FE_PSE_PORT_GDM2] = 32,
1164		[FE_PSE_PORT_GDM3] = 6,
1165		[FE_PSE_PORT_PPE1] = 4,
1166		[FE_PSE_PORT_CDM2] = 6,
1167		[FE_PSE_PORT_CDM3] = 8,
1168		[FE_PSE_PORT_CDM4] = 10,
1169		[FE_PSE_PORT_PPE2] = 4,
1170		[FE_PSE_PORT_GDM4] = 2,
1171		[FE_PSE_PORT_CDM5] = 2,
1172	};
1173	u32 all_rsv;
1174	int q;
1175
1176	all_rsv = airoha_fe_get_pse_all_rsv(eth);
1177	/* hw misses PPE2 oq rsv */
1178	all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
1179	airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
1180
1181	/* CMD1 */
1182	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
1183		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
1184					 PSE_QUEUE_RSV_PAGES);
1185	/* GMD1 */
1186	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
1187		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
1188					 PSE_QUEUE_RSV_PAGES);
1189	/* GMD2 */
1190	for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
1191		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
1192	/* GMD3 */
1193	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
1194		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
1195					 PSE_QUEUE_RSV_PAGES);
1196	/* PPE1 */
1197	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
1198		if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
1199			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
1200						 PSE_QUEUE_RSV_PAGES);
1201		else
1202			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
1203	}
1204	/* CDM2 */
1205	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
1206		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
1207					 PSE_QUEUE_RSV_PAGES);
1208	/* CDM3 */
1209	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
1210		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
1211	/* CDM4 */
1212	for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
1213		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
1214					 PSE_QUEUE_RSV_PAGES);
1215	/* PPE2 */
1216	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
1217		if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
1218			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
1219						 PSE_QUEUE_RSV_PAGES);
1220		else
1221			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
1222	}
1223	/* GMD4 */
1224	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
1225		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
1226					 PSE_QUEUE_RSV_PAGES);
1227	/* CDM5 */
1228	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
1229		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
1230					 PSE_QUEUE_RSV_PAGES);
1231}
1232
1233static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
1234{
1235	int i;
1236
1237	for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
1238		int err, j;
1239		u32 val;
1240
1241		airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1242
1243		val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1244		      MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
1245		airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1246		err = read_poll_timeout(airoha_fe_rr, val,
1247					val & MC_VLAN_CFG_CMD_DONE_MASK,
1248					USEC_PER_MSEC, 5 * USEC_PER_MSEC,
1249					false, eth, REG_MC_VLAN_CFG);
1250		if (err)
1251			return err;
1252
1253		for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
1254			airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1255
1256			val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1257			      FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
1258			      MC_VLAN_CFG_RW_MASK;
1259			airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1260			err = read_poll_timeout(airoha_fe_rr, val,
1261						val & MC_VLAN_CFG_CMD_DONE_MASK,
1262						USEC_PER_MSEC,
1263						5 * USEC_PER_MSEC, false, eth,
1264						REG_MC_VLAN_CFG);
1265			if (err)
1266				return err;
1267		}
1268	}
1269
1270	return 0;
1271}
1272
1273static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
1274{
1275	/* CDM1_CRSN_QSEL */
1276	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
1277		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1278		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1279				 CDM_CRSN_QSEL_Q1));
1280	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
1281		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1282		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1283				 CDM_CRSN_QSEL_Q1));
1284	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
1285		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1286		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1287				 CDM_CRSN_QSEL_Q1));
1288	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
1289		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1290		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1291				 CDM_CRSN_QSEL_Q6));
1292	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
1293		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1294		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1295				 CDM_CRSN_QSEL_Q1));
1296	/* CDM2_CRSN_QSEL */
1297	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
1298		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1299		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1300				 CDM_CRSN_QSEL_Q1));
1301	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
1302		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1303		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1304				 CDM_CRSN_QSEL_Q1));
1305	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
1306		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1307		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1308				 CDM_CRSN_QSEL_Q1));
1309	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
1310		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1311		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1312				 CDM_CRSN_QSEL_Q6));
1313	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
1314		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1315		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1316				 CDM_CRSN_QSEL_Q1));
1317}
1318
1319static int airoha_fe_init(struct airoha_eth *eth)
1320{
1321	airoha_fe_maccr_init(eth);
1322
1323	/* PSE IQ reserve */
1324	airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
1325		      FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
1326	airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
1327		      PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
1328		      FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
1329		      FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
1330
1331	/* enable FE copy engine for MC/KA/DPI */
1332	airoha_fe_wr(eth, REG_FE_PCE_CFG,
1333		     PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
1334	/* set vip queue selection to ring 1 */
1335	airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
1336		      FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
1337	airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
1338		      FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
1339	/* set GDM4 source interface offset to 8 */
1340	airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
1341		      GDM4_SPORT_OFF2_MASK |
1342		      GDM4_SPORT_OFF1_MASK |
1343		      GDM4_SPORT_OFF0_MASK,
1344		      FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
1345		      FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
1346		      FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
1347
1348	/* set PSE Page as 128B */
1349	airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
1350		      FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
1351		      FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
1352		      FE_DMA_GLO_PG_SZ_MASK);
1353	airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
1354		     FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
1355		     FE_RST_GDM4_MBI_ARB_MASK);
1356	usleep_range(1000, 2000);
1357
1358	/* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
1359	 * connect other rings to PSE Port0 OQ-0
1360	 */
1361	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
1362	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
1363	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
1364	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
1365
1366	airoha_fe_vip_setup(eth);
1367	airoha_fe_pse_ports_init(eth);
1368
1369	airoha_fe_set(eth, REG_GDM_MISC_CFG,
1370		      GDM2_RDM_ACK_WAIT_PREF_MASK |
1371		      GDM2_CHN_VLD_MODE_MASK);
1372	airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
1373		      FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
1374
1375	/* init fragment and assemble Force Port */
1376	/* NPU Core-3, NPU Bridge Channel-3 */
1377	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1378		      IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
1379		      FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
1380		      FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
1381	/* QDMA LAN, RX Ring-22 */
1382	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1383		      IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
1384		      FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
1385		      FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
1386
1387	airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
1388	airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
1389
1390	airoha_fe_crsn_qsel_init(eth);
1391
1392	airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
1393	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
1394
1395	/* default aging mode for mbi unlock issue */
1396	airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
1397		      MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
1398		      FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
1399		      FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
1400
1401	/* disable IFC by default */
1402	airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
1403
1404	/* enable 1:N vlan action, init vlan table */
1405	airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
1406
1407	return airoha_fe_mc_vlan_clear(eth);
1408}
1409
1410static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
1411{
1412	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1413	struct airoha_qdma *qdma = q->qdma;
1414	struct airoha_eth *eth = qdma->eth;
1415	int qid = q - &qdma->q_rx[0];
1416	int nframes = 0;
1417
1418	while (q->queued < q->ndesc - 1) {
1419		struct airoha_queue_entry *e = &q->entry[q->head];
1420		struct airoha_qdma_desc *desc = &q->desc[q->head];
1421		struct page *page;
1422		int offset;
1423		u32 val;
1424
1425		page = page_pool_dev_alloc_frag(q->page_pool, &offset,
1426						q->buf_size);
1427		if (!page)
1428			break;
1429
1430		q->head = (q->head + 1) % q->ndesc;
1431		q->queued++;
1432		nframes++;
1433
1434		e->buf = page_address(page) + offset;
1435		e->dma_addr = page_pool_get_dma_addr(page) + offset;
1436		e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
1437
1438		dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
1439					   dir);
1440
1441		val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
1442		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
1443		WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
1444		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
1445		WRITE_ONCE(desc->data, cpu_to_le32(val));
1446		WRITE_ONCE(desc->msg0, 0);
1447		WRITE_ONCE(desc->msg1, 0);
1448		WRITE_ONCE(desc->msg2, 0);
1449		WRITE_ONCE(desc->msg3, 0);
1450
1451		airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
1452				RX_RING_CPU_IDX_MASK,
1453				FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
1454	}
1455
1456	return nframes;
1457}
1458
1459static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
1460				    struct airoha_qdma_desc *desc)
1461{
1462	u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
1463
1464	sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
1465	switch (sport) {
1466	case 0x10 ... 0x13:
1467		port = 0;
1468		break;
1469	case 0x2 ... 0x4:
1470		port = sport - 1;
1471		break;
1472	default:
1473		return -EINVAL;
1474	}
1475
1476	return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
1477}
1478
1479static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
1480{
1481	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1482	struct airoha_qdma *qdma = q->qdma;
1483	struct airoha_eth *eth = qdma->eth;
1484	int qid = q - &qdma->q_rx[0];
1485	int done = 0;
1486
1487	while (done < budget) {
1488		struct airoha_queue_entry *e = &q->entry[q->tail];
1489		struct airoha_qdma_desc *desc = &q->desc[q->tail];
1490		dma_addr_t dma_addr = le32_to_cpu(desc->addr);
1491		u32 desc_ctrl = le32_to_cpu(desc->ctrl);
1492		struct sk_buff *skb;
1493		int len, p;
1494
1495		if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
1496			break;
1497
1498		if (!dma_addr)
1499			break;
1500
1501		len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
1502		if (!len)
1503			break;
1504
1505		q->tail = (q->tail + 1) % q->ndesc;
1506		q->queued--;
1507
1508		dma_sync_single_for_cpu(eth->dev, dma_addr,
1509					SKB_WITH_OVERHEAD(q->buf_size), dir);
1510
1511		p = airoha_qdma_get_gdm_port(eth, desc);
1512		if (p < 0 || !eth->ports[p]) {
1513			page_pool_put_full_page(q->page_pool,
1514						virt_to_head_page(e->buf),
1515						true);
1516			continue;
1517		}
1518
1519		skb = napi_build_skb(e->buf, q->buf_size);
1520		if (!skb) {
1521			page_pool_put_full_page(q->page_pool,
1522						virt_to_head_page(e->buf),
1523						true);
1524			break;
1525		}
1526
1527		skb_reserve(skb, 2);
1528		__skb_put(skb, len);
1529		skb_mark_for_recycle(skb);
1530		skb->dev = eth->ports[p]->dev;
1531		skb->protocol = eth_type_trans(skb, skb->dev);
1532		skb->ip_summed = CHECKSUM_UNNECESSARY;
1533		skb_record_rx_queue(skb, qid);
1534		napi_gro_receive(&q->napi, skb);
1535
1536		done++;
1537	}
1538	airoha_qdma_fill_rx_queue(q);
1539
1540	return done;
1541}
1542
1543static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
1544{
1545	struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
1546	int cur, done = 0;
1547
1548	do {
1549		cur = airoha_qdma_rx_process(q, budget - done);
1550		done += cur;
1551	} while (cur && done < budget);
1552
1553	if (done < budget && napi_complete(napi))
1554		airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
1555				       RX_DONE_INT_MASK);
1556
1557	return done;
1558}
1559
1560static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
1561				     struct airoha_qdma *qdma, int ndesc)
1562{
1563	const struct page_pool_params pp_params = {
1564		.order = 0,
1565		.pool_size = 256,
1566		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1567		.dma_dir = DMA_FROM_DEVICE,
1568		.max_len = PAGE_SIZE,
1569		.nid = NUMA_NO_NODE,
1570		.dev = qdma->eth->dev,
1571		.napi = &q->napi,
1572	};
1573	struct airoha_eth *eth = qdma->eth;
1574	int qid = q - &qdma->q_rx[0], thr;
1575	dma_addr_t dma_addr;
1576
1577	q->buf_size = PAGE_SIZE / 2;
1578	q->ndesc = ndesc;
1579	q->qdma = qdma;
1580
1581	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1582				GFP_KERNEL);
1583	if (!q->entry)
1584		return -ENOMEM;
1585
1586	q->page_pool = page_pool_create(&pp_params);
1587	if (IS_ERR(q->page_pool)) {
1588		int err = PTR_ERR(q->page_pool);
1589
1590		q->page_pool = NULL;
1591		return err;
1592	}
1593
1594	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1595				      &dma_addr, GFP_KERNEL);
1596	if (!q->desc)
1597		return -ENOMEM;
1598
1599	netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
1600
1601	airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
1602	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
1603			RX_RING_SIZE_MASK,
1604			FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
1605
1606	thr = clamp(ndesc >> 3, 1, 32);
1607	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
1608			FIELD_PREP(RX_RING_THR_MASK, thr));
1609	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
1610			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
1611
1612	airoha_qdma_fill_rx_queue(q);
1613
1614	return 0;
1615}
1616
1617static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
1618{
1619	struct airoha_eth *eth = q->qdma->eth;
1620
1621	while (q->queued) {
1622		struct airoha_queue_entry *e = &q->entry[q->tail];
1623		struct page *page = virt_to_head_page(e->buf);
1624
1625		dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
1626					page_pool_get_dma_dir(q->page_pool));
1627		page_pool_put_full_page(q->page_pool, page, false);
1628		q->tail = (q->tail + 1) % q->ndesc;
1629		q->queued--;
1630	}
1631}
1632
1633static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
1634{
1635	int i;
1636
1637	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1638		int err;
1639
1640		if (!(RX_DONE_INT_MASK & BIT(i))) {
1641			/* rx-queue not binded to irq */
1642			continue;
1643		}
1644
1645		err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
1646						RX_DSCP_NUM(i));
1647		if (err)
1648			return err;
1649	}
1650
1651	return 0;
1652}
1653
1654static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1655{
1656	struct airoha_tx_irq_queue *irq_q;
1657	int id, done = 0, irq_queued;
1658	struct airoha_qdma *qdma;
1659	struct airoha_eth *eth;
1660	u32 status, head;
1661
1662	irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
1663	qdma = irq_q->qdma;
1664	id = irq_q - &qdma->q_tx_irq[0];
1665	eth = qdma->eth;
1666
1667	status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
1668	head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
1669	head = head % irq_q->size;
1670	irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
1671
1672	while (irq_queued > 0 && done < budget) {
1673		u32 qid, val = irq_q->q[head];
1674		struct airoha_qdma_desc *desc;
1675		struct airoha_queue_entry *e;
1676		struct airoha_queue *q;
1677		u32 index, desc_ctrl;
1678		struct sk_buff *skb;
1679
1680		if (val == 0xff)
1681			break;
1682
1683		irq_q->q[head] = 0xff; /* mark as done */
1684		head = (head + 1) % irq_q->size;
1685		irq_queued--;
1686		done++;
1687
1688		qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
1689		if (qid >= ARRAY_SIZE(qdma->q_tx))
1690			continue;
1691
1692		q = &qdma->q_tx[qid];
1693		if (!q->ndesc)
1694			continue;
1695
1696		index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
1697		if (index >= q->ndesc)
1698			continue;
1699
1700		spin_lock_bh(&q->lock);
1701
1702		if (!q->queued)
1703			goto unlock;
1704
1705		desc = &q->desc[index];
1706		desc_ctrl = le32_to_cpu(desc->ctrl);
1707
1708		if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
1709		    !(desc_ctrl & QDMA_DESC_DROP_MASK))
1710			goto unlock;
1711
1712		e = &q->entry[index];
1713		skb = e->skb;
1714
1715		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1716				 DMA_TO_DEVICE);
1717		memset(e, 0, sizeof(*e));
1718		WRITE_ONCE(desc->msg0, 0);
1719		WRITE_ONCE(desc->msg1, 0);
1720		q->queued--;
1721
1722		/* completion ring can report out-of-order indexes if hw QoS
1723		 * is enabled and packets with different priority are queued
1724		 * to same DMA ring. Take into account possible out-of-order
1725		 * reports incrementing DMA ring tail pointer
1726		 */
1727		while (q->tail != q->head && !q->entry[q->tail].dma_addr)
1728			q->tail = (q->tail + 1) % q->ndesc;
1729
1730		if (skb) {
1731			u16 queue = skb_get_queue_mapping(skb);
1732			struct netdev_queue *txq;
1733
1734			txq = netdev_get_tx_queue(skb->dev, queue);
1735			netdev_tx_completed_queue(txq, 1, skb->len);
1736			if (netif_tx_queue_stopped(txq) &&
1737			    q->ndesc - q->queued >= q->free_thr)
1738				netif_tx_wake_queue(txq);
1739
1740			dev_kfree_skb_any(skb);
1741		}
1742unlock:
1743		spin_unlock_bh(&q->lock);
1744	}
1745
1746	if (done) {
1747		int i, len = done >> 7;
1748
1749		for (i = 0; i < len; i++)
1750			airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1751					IRQ_CLEAR_LEN_MASK, 0x80);
1752		airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1753				IRQ_CLEAR_LEN_MASK, (done & 0x7f));
1754	}
1755
1756	if (done < budget && napi_complete(napi))
1757		airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
1758				       TX_DONE_INT_MASK(id));
1759
1760	return done;
1761}
1762
1763static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
1764				     struct airoha_qdma *qdma, int size)
1765{
1766	struct airoha_eth *eth = qdma->eth;
1767	int i, qid = q - &qdma->q_tx[0];
1768	dma_addr_t dma_addr;
1769
1770	spin_lock_init(&q->lock);
1771	q->ndesc = size;
1772	q->qdma = qdma;
1773	q->free_thr = 1 + MAX_SKB_FRAGS;
1774
1775	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1776				GFP_KERNEL);
1777	if (!q->entry)
1778		return -ENOMEM;
1779
1780	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1781				      &dma_addr, GFP_KERNEL);
1782	if (!q->desc)
1783		return -ENOMEM;
1784
1785	for (i = 0; i < q->ndesc; i++) {
1786		u32 val;
1787
1788		val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
1789		WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
1790	}
1791
1792	airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
1793	airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1794			FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
1795	airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1796			FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
1797
1798	return 0;
1799}
1800
1801static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
1802				   struct airoha_qdma *qdma, int size)
1803{
1804	int id = irq_q - &qdma->q_tx_irq[0];
1805	struct airoha_eth *eth = qdma->eth;
1806	dma_addr_t dma_addr;
1807
1808	netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
1809			  airoha_qdma_tx_napi_poll);
1810	irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
1811				       &dma_addr, GFP_KERNEL);
1812	if (!irq_q->q)
1813		return -ENOMEM;
1814
1815	memset(irq_q->q, 0xff, size * sizeof(u32));
1816	irq_q->size = size;
1817	irq_q->qdma = qdma;
1818
1819	airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
1820	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
1821			FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
1822	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
1823			FIELD_PREP(TX_IRQ_THR_MASK, 1));
1824
1825	return 0;
1826}
1827
1828static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
1829{
1830	int i, err;
1831
1832	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1833		err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
1834					      IRQ_QUEUE_LEN(i));
1835		if (err)
1836			return err;
1837	}
1838
1839	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1840		err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
1841						TX_DSCP_NUM);
1842		if (err)
1843			return err;
1844	}
1845
1846	return 0;
1847}
1848
1849static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
1850{
1851	struct airoha_eth *eth = q->qdma->eth;
1852
1853	spin_lock_bh(&q->lock);
1854	while (q->queued) {
1855		struct airoha_queue_entry *e = &q->entry[q->tail];
1856
1857		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1858				 DMA_TO_DEVICE);
1859		dev_kfree_skb_any(e->skb);
1860		e->skb = NULL;
1861
1862		q->tail = (q->tail + 1) % q->ndesc;
1863		q->queued--;
1864	}
1865	spin_unlock_bh(&q->lock);
1866}
1867
1868static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
1869{
1870	struct airoha_eth *eth = qdma->eth;
1871	dma_addr_t dma_addr;
1872	u32 status;
1873	int size;
1874
1875	size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
1876	qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1877					      GFP_KERNEL);
1878	if (!qdma->hfwd.desc)
1879		return -ENOMEM;
1880
1881	airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
1882
1883	size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
1884	qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1885					   GFP_KERNEL);
1886	if (!qdma->hfwd.q)
1887		return -ENOMEM;
1888
1889	airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
1890
1891	airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
1892			HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
1893			FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
1894	airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
1895			FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
1896	airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
1897			LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
1898			HW_FWD_DESC_NUM_MASK,
1899			FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
1900			LMGR_INIT_START);
1901
1902	return read_poll_timeout(airoha_qdma_rr, status,
1903				 !(status & LMGR_INIT_START), USEC_PER_MSEC,
1904				 30 * USEC_PER_MSEC, true, qdma,
1905				 REG_LMGR_INIT_CFG);
1906}
1907
1908static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
1909{
1910	airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
1911	airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
1912
1913	airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
1914			  PSE_BUF_ESTIMATE_EN_MASK);
1915
1916	airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
1917			EGRESS_RATE_METER_EN_MASK |
1918			EGRESS_RATE_METER_EQ_RATE_EN_MASK);
1919	/* 2047us x 31 = 63.457ms */
1920	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1921			EGRESS_RATE_METER_WINDOW_SZ_MASK,
1922			FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
1923	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1924			EGRESS_RATE_METER_TIMESLICE_MASK,
1925			FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
1926
1927	/* ratelimit init */
1928	airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
1929	/* fast-tick 25us */
1930	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
1931			FIELD_PREP(GLB_FAST_TICK_MASK, 25));
1932	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
1933			FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
1934
1935	airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
1936	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
1937			FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
1938	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
1939			EGRESS_SLOW_TICK_RATIO_MASK,
1940			FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
1941
1942	airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
1943	airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
1944			  INGRESS_TRTCM_MODE_MASK);
1945	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
1946			FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
1947	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
1948			INGRESS_SLOW_TICK_RATIO_MASK,
1949			FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
1950
1951	airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
1952	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
1953			FIELD_PREP(SLA_FAST_TICK_MASK, 25));
1954	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
1955			FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
1956}
1957
1958static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
1959{
1960	int i;
1961
1962	/* clear pending irqs */
1963	for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
1964		airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1965
1966	/* setup irqs */
1967	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
1968	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
1969	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
1970
1971	/* setup irq binding */
1972	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1973		if (!qdma->q_tx[i].ndesc)
1974			continue;
1975
1976		if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
1977			airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
1978					TX_RING_IRQ_BLOCKING_CFG_MASK);
1979		else
1980			airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
1981					  TX_RING_IRQ_BLOCKING_CFG_MASK);
1982	}
1983
1984	airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1985		       GLOBAL_CFG_RX_2B_OFFSET_MASK |
1986		       FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
1987		       GLOBAL_CFG_CPU_TXR_RR_MASK |
1988		       GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
1989		       GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
1990		       GLOBAL_CFG_MULTICAST_EN_MASK |
1991		       GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
1992		       GLOBAL_CFG_TX_WB_DONE_MASK |
1993		       FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
1994
1995	airoha_qdma_init_qos(qdma);
1996
1997	/* disable qdma rx delay interrupt */
1998	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1999		if (!qdma->q_rx[i].ndesc)
2000			continue;
2001
2002		airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
2003				  RX_DELAY_INT_MASK);
2004	}
2005
2006	airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
2007			TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
2008
2009	return 0;
2010}
2011
2012static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
2013{
2014	struct airoha_qdma *qdma = dev_instance;
2015	u32 intr[ARRAY_SIZE(qdma->irqmask)];
2016	int i;
2017
2018	for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
2019		intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
2020		intr[i] &= qdma->irqmask[i];
2021		airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
2022	}
2023
2024	if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
2025		return IRQ_NONE;
2026
2027	if (intr[1] & RX_DONE_INT_MASK) {
2028		airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
2029					RX_DONE_INT_MASK);
2030
2031		for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2032			if (!qdma->q_rx[i].ndesc)
2033				continue;
2034
2035			if (intr[1] & BIT(i))
2036				napi_schedule(&qdma->q_rx[i].napi);
2037		}
2038	}
2039
2040	if (intr[0] & INT_TX_MASK) {
2041		for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2042			if (!(intr[0] & TX_DONE_INT_MASK(i)))
2043				continue;
2044
2045			airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
2046						TX_DONE_INT_MASK(i));
2047			napi_schedule(&qdma->q_tx_irq[i].napi);
2048		}
2049	}
2050
2051	return IRQ_HANDLED;
2052}
2053
2054static int airoha_qdma_init(struct platform_device *pdev,
2055			    struct airoha_eth *eth,
2056			    struct airoha_qdma *qdma)
2057{
2058	int err, id = qdma - &eth->qdma[0];
2059	const char *res;
2060
2061	spin_lock_init(&qdma->irq_lock);
2062	qdma->eth = eth;
2063
2064	res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
2065	if (!res)
2066		return -ENOMEM;
2067
2068	qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
2069	if (IS_ERR(qdma->regs))
2070		return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
2071				     "failed to iomap qdma%d regs\n", id);
2072
2073	qdma->irq = platform_get_irq(pdev, 4 * id);
2074	if (qdma->irq < 0)
2075		return qdma->irq;
2076
2077	err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
2078			       IRQF_SHARED, KBUILD_MODNAME, qdma);
2079	if (err)
2080		return err;
2081
2082	err = airoha_qdma_init_rx(qdma);
2083	if (err)
2084		return err;
2085
2086	err = airoha_qdma_init_tx(qdma);
2087	if (err)
2088		return err;
2089
2090	err = airoha_qdma_init_hfwd_queues(qdma);
2091	if (err)
2092		return err;
2093
2094	return airoha_qdma_hw_init(qdma);
2095}
2096
2097static int airoha_hw_init(struct platform_device *pdev,
2098			  struct airoha_eth *eth)
2099{
2100	int err, i;
2101
2102	/* disable xsi */
2103	err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
2104					eth->xsi_rsts);
2105	if (err)
2106		return err;
2107
2108	err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
2109	if (err)
2110		return err;
2111
2112	msleep(20);
2113	err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
2114	if (err)
2115		return err;
2116
2117	msleep(20);
2118	err = airoha_fe_init(eth);
2119	if (err)
2120		return err;
2121
2122	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
2123		err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
2124		if (err)
2125			return err;
2126	}
2127
2128	set_bit(DEV_STATE_INITIALIZED, &eth->state);
2129
2130	return 0;
2131}
2132
2133static void airoha_hw_cleanup(struct airoha_qdma *qdma)
2134{
2135	int i;
2136
2137	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2138		if (!qdma->q_rx[i].ndesc)
2139			continue;
2140
2141		netif_napi_del(&qdma->q_rx[i].napi);
2142		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
2143		if (qdma->q_rx[i].page_pool)
2144			page_pool_destroy(qdma->q_rx[i].page_pool);
2145	}
2146
2147	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2148		netif_napi_del(&qdma->q_tx_irq[i].napi);
2149
2150	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2151		if (!qdma->q_tx[i].ndesc)
2152			continue;
2153
2154		airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2155	}
2156}
2157
2158static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
2159{
2160	int i;
2161
2162	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2163		napi_enable(&qdma->q_tx_irq[i].napi);
2164
2165	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2166		if (!qdma->q_rx[i].ndesc)
2167			continue;
2168
2169		napi_enable(&qdma->q_rx[i].napi);
2170	}
2171}
2172
2173static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
2174{
2175	int i;
2176
2177	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2178		napi_disable(&qdma->q_tx_irq[i].napi);
2179
2180	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2181		if (!qdma->q_rx[i].ndesc)
2182			continue;
2183
2184		napi_disable(&qdma->q_rx[i].napi);
2185	}
2186}
2187
2188static void airoha_update_hw_stats(struct airoha_gdm_port *port)
2189{
2190	struct airoha_eth *eth = port->qdma->eth;
2191	u32 val, i = 0;
2192
2193	spin_lock(&port->stats.lock);
2194	u64_stats_update_begin(&port->stats.syncp);
2195
2196	/* TX */
2197	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
2198	port->stats.tx_ok_pkts += ((u64)val << 32);
2199	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
2200	port->stats.tx_ok_pkts += val;
2201
2202	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
2203	port->stats.tx_ok_bytes += ((u64)val << 32);
2204	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
2205	port->stats.tx_ok_bytes += val;
2206
2207	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
2208	port->stats.tx_drops += val;
2209
2210	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
2211	port->stats.tx_broadcast += val;
2212
2213	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
2214	port->stats.tx_multicast += val;
2215
2216	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
2217	port->stats.tx_len[i] += val;
2218
2219	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
2220	port->stats.tx_len[i] += ((u64)val << 32);
2221	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
2222	port->stats.tx_len[i++] += val;
2223
2224	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
2225	port->stats.tx_len[i] += ((u64)val << 32);
2226	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
2227	port->stats.tx_len[i++] += val;
2228
2229	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
2230	port->stats.tx_len[i] += ((u64)val << 32);
2231	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
2232	port->stats.tx_len[i++] += val;
2233
2234	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
2235	port->stats.tx_len[i] += ((u64)val << 32);
2236	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
2237	port->stats.tx_len[i++] += val;
2238
2239	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
2240	port->stats.tx_len[i] += ((u64)val << 32);
2241	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
2242	port->stats.tx_len[i++] += val;
2243
2244	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
2245	port->stats.tx_len[i] += ((u64)val << 32);
2246	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
2247	port->stats.tx_len[i++] += val;
2248
2249	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
2250	port->stats.tx_len[i++] += val;
2251
2252	/* RX */
2253	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
2254	port->stats.rx_ok_pkts += ((u64)val << 32);
2255	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
2256	port->stats.rx_ok_pkts += val;
2257
2258	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
2259	port->stats.rx_ok_bytes += ((u64)val << 32);
2260	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
2261	port->stats.rx_ok_bytes += val;
2262
2263	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
2264	port->stats.rx_drops += val;
2265
2266	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
2267	port->stats.rx_broadcast += val;
2268
2269	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
2270	port->stats.rx_multicast += val;
2271
2272	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
2273	port->stats.rx_errors += val;
2274
2275	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
2276	port->stats.rx_crc_error += val;
2277
2278	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
2279	port->stats.rx_over_errors += val;
2280
2281	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
2282	port->stats.rx_fragment += val;
2283
2284	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
2285	port->stats.rx_jabber += val;
2286
2287	i = 0;
2288	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
2289	port->stats.rx_len[i] += val;
2290
2291	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
2292	port->stats.rx_len[i] += ((u64)val << 32);
2293	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
2294	port->stats.rx_len[i++] += val;
2295
2296	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
2297	port->stats.rx_len[i] += ((u64)val << 32);
2298	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
2299	port->stats.rx_len[i++] += val;
2300
2301	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
2302	port->stats.rx_len[i] += ((u64)val << 32);
2303	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
2304	port->stats.rx_len[i++] += val;
2305
2306	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
2307	port->stats.rx_len[i] += ((u64)val << 32);
2308	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
2309	port->stats.rx_len[i++] += val;
2310
2311	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
2312	port->stats.rx_len[i] += ((u64)val << 32);
2313	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
2314	port->stats.rx_len[i++] += val;
2315
2316	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
2317	port->stats.rx_len[i] += ((u64)val << 32);
2318	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
2319	port->stats.rx_len[i++] += val;
2320
2321	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
2322	port->stats.rx_len[i++] += val;
2323
2324	/* reset mib counters */
2325	airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
2326		      FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
2327
2328	u64_stats_update_end(&port->stats.syncp);
2329	spin_unlock(&port->stats.lock);
2330}
2331
2332static int airoha_dev_open(struct net_device *dev)
2333{
2334	struct airoha_gdm_port *port = netdev_priv(dev);
2335	struct airoha_qdma *qdma = port->qdma;
2336	int err;
2337
2338	netif_tx_start_all_queues(dev);
2339	err = airoha_set_gdm_ports(qdma->eth, true);
2340	if (err)
2341		return err;
2342
2343	if (netdev_uses_dsa(dev))
2344		airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2345			      GDM_STAG_EN_MASK);
2346	else
2347		airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2348				GDM_STAG_EN_MASK);
2349
2350	airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
2351			GLOBAL_CFG_TX_DMA_EN_MASK |
2352			GLOBAL_CFG_RX_DMA_EN_MASK);
2353
2354	return 0;
2355}
2356
2357static int airoha_dev_stop(struct net_device *dev)
2358{
2359	struct airoha_gdm_port *port = netdev_priv(dev);
2360	struct airoha_qdma *qdma = port->qdma;
2361	int i, err;
2362
2363	netif_tx_disable(dev);
2364	err = airoha_set_gdm_ports(qdma->eth, false);
2365	if (err)
2366		return err;
2367
2368	airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
2369			  GLOBAL_CFG_TX_DMA_EN_MASK |
2370			  GLOBAL_CFG_RX_DMA_EN_MASK);
2371
2372	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2373		if (!qdma->q_tx[i].ndesc)
2374			continue;
2375
2376		airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2377		netdev_tx_reset_subqueue(dev, i);
2378	}
2379
2380	return 0;
2381}
2382
2383static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
2384{
2385	struct airoha_gdm_port *port = netdev_priv(dev);
2386	int err;
2387
2388	err = eth_mac_addr(dev, p);
2389	if (err)
2390		return err;
2391
2392	airoha_set_macaddr(port, dev->dev_addr);
2393
2394	return 0;
2395}
2396
2397static int airoha_dev_init(struct net_device *dev)
2398{
2399	struct airoha_gdm_port *port = netdev_priv(dev);
2400
2401	airoha_set_macaddr(port, dev->dev_addr);
2402
2403	return 0;
2404}
2405
2406static void airoha_dev_get_stats64(struct net_device *dev,
2407				   struct rtnl_link_stats64 *storage)
2408{
2409	struct airoha_gdm_port *port = netdev_priv(dev);
2410	unsigned int start;
2411
2412	airoha_update_hw_stats(port);
2413	do {
2414		start = u64_stats_fetch_begin(&port->stats.syncp);
2415		storage->rx_packets = port->stats.rx_ok_pkts;
2416		storage->tx_packets = port->stats.tx_ok_pkts;
2417		storage->rx_bytes = port->stats.rx_ok_bytes;
2418		storage->tx_bytes = port->stats.tx_ok_bytes;
2419		storage->multicast = port->stats.rx_multicast;
2420		storage->rx_errors = port->stats.rx_errors;
2421		storage->rx_dropped = port->stats.rx_drops;
2422		storage->tx_dropped = port->stats.tx_drops;
2423		storage->rx_crc_errors = port->stats.rx_crc_error;
2424		storage->rx_over_errors = port->stats.rx_over_errors;
2425	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2426}
2427
2428static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
2429				   struct net_device *dev)
2430{
2431	struct skb_shared_info *sinfo = skb_shinfo(skb);
2432	struct airoha_gdm_port *port = netdev_priv(dev);
2433	u32 msg0 = 0, msg1, len = skb_headlen(skb);
2434	int i, qid = skb_get_queue_mapping(skb);
2435	struct airoha_qdma *qdma = port->qdma;
2436	u32 nr_frags = 1 + sinfo->nr_frags;
2437	struct netdev_queue *txq;
2438	struct airoha_queue *q;
2439	void *data = skb->data;
2440	u16 index;
2441	u8 fport;
2442
2443	if (skb->ip_summed == CHECKSUM_PARTIAL)
2444		msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
2445			FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
2446			FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
2447
2448	/* TSO: fill MSS info in tcp checksum field */
2449	if (skb_is_gso(skb)) {
2450		if (skb_cow_head(skb, 0))
2451			goto error;
2452
2453		if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2454			__be16 csum = cpu_to_be16(sinfo->gso_size);
2455
2456			tcp_hdr(skb)->check = (__force __sum16)csum;
2457			msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
2458		}
2459	}
2460
2461	fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
2462	msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
2463	       FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
2464
2465	q = &qdma->q_tx[qid];
2466	if (WARN_ON_ONCE(!q->ndesc))
2467		goto error;
2468
2469	spin_lock_bh(&q->lock);
2470
2471	txq = netdev_get_tx_queue(dev, qid);
2472	if (q->queued + nr_frags > q->ndesc) {
2473		/* not enough space in the queue */
2474		netif_tx_stop_queue(txq);
2475		spin_unlock_bh(&q->lock);
2476		return NETDEV_TX_BUSY;
2477	}
2478
2479	index = q->head;
2480	for (i = 0; i < nr_frags; i++) {
2481		struct airoha_qdma_desc *desc = &q->desc[index];
2482		struct airoha_queue_entry *e = &q->entry[index];
2483		skb_frag_t *frag = &sinfo->frags[i];
2484		dma_addr_t addr;
2485		u32 val;
2486
2487		addr = dma_map_single(dev->dev.parent, data, len,
2488				      DMA_TO_DEVICE);
2489		if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2490			goto error_unmap;
2491
2492		index = (index + 1) % q->ndesc;
2493
2494		val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2495		if (i < nr_frags - 1)
2496			val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2497		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2498		WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2499		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2500		WRITE_ONCE(desc->data, cpu_to_le32(val));
2501		WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2502		WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2503		WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2504
2505		e->skb = i ? NULL : skb;
2506		e->dma_addr = addr;
2507		e->dma_len = len;
2508
2509		data = skb_frag_address(frag);
2510		len = skb_frag_size(frag);
2511	}
2512
2513	q->head = index;
2514	q->queued += i;
2515
2516	skb_tx_timestamp(skb);
2517	netdev_tx_sent_queue(txq, skb->len);
2518
2519	if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2520		airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2521				TX_RING_CPU_IDX_MASK,
2522				FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
2523
2524	if (q->ndesc - q->queued < q->free_thr)
2525		netif_tx_stop_queue(txq);
2526
2527	spin_unlock_bh(&q->lock);
2528
2529	return NETDEV_TX_OK;
2530
2531error_unmap:
2532	for (i--; i >= 0; i--) {
2533		index = (q->head + i) % q->ndesc;
2534		dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
2535				 q->entry[index].dma_len, DMA_TO_DEVICE);
2536	}
2537
2538	spin_unlock_bh(&q->lock);
2539error:
2540	dev_kfree_skb_any(skb);
2541	dev->stats.tx_dropped++;
2542
2543	return NETDEV_TX_OK;
2544}
2545
2546static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2547				       struct ethtool_drvinfo *info)
2548{
2549	struct airoha_gdm_port *port = netdev_priv(dev);
2550	struct airoha_eth *eth = port->qdma->eth;
2551
2552	strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2553	strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2554}
2555
2556static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2557					 struct ethtool_eth_mac_stats *stats)
2558{
2559	struct airoha_gdm_port *port = netdev_priv(dev);
2560	unsigned int start;
2561
2562	airoha_update_hw_stats(port);
2563	do {
2564		start = u64_stats_fetch_begin(&port->stats.syncp);
2565		stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2566		stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2567		stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2568	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2569}
2570
2571static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2572	{    0,    64 },
2573	{   65,   127 },
2574	{  128,   255 },
2575	{  256,   511 },
2576	{  512,  1023 },
2577	{ 1024,  1518 },
2578	{ 1519, 10239 },
2579	{},
2580};
2581
2582static void
2583airoha_ethtool_get_rmon_stats(struct net_device *dev,
2584			      struct ethtool_rmon_stats *stats,
2585			      const struct ethtool_rmon_hist_range **ranges)
2586{
2587	struct airoha_gdm_port *port = netdev_priv(dev);
2588	struct airoha_hw_stats *hw_stats = &port->stats;
2589	unsigned int start;
2590
2591	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2592		     ARRAY_SIZE(hw_stats->tx_len) + 1);
2593	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2594		     ARRAY_SIZE(hw_stats->rx_len) + 1);
2595
2596	*ranges = airoha_ethtool_rmon_ranges;
2597	airoha_update_hw_stats(port);
2598	do {
2599		int i;
2600
2601		start = u64_stats_fetch_begin(&port->stats.syncp);
2602		stats->fragments = hw_stats->rx_fragment;
2603		stats->jabbers = hw_stats->rx_jabber;
2604		for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2605		     i++) {
2606			stats->hist[i] = hw_stats->rx_len[i];
2607			stats->hist_tx[i] = hw_stats->tx_len[i];
2608		}
2609	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2610}
2611
2612static const struct net_device_ops airoha_netdev_ops = {
2613	.ndo_init		= airoha_dev_init,
2614	.ndo_open		= airoha_dev_open,
2615	.ndo_stop		= airoha_dev_stop,
2616	.ndo_start_xmit		= airoha_dev_xmit,
2617	.ndo_get_stats64        = airoha_dev_get_stats64,
2618	.ndo_set_mac_address	= airoha_dev_set_macaddr,
2619};
2620
2621static const struct ethtool_ops airoha_ethtool_ops = {
2622	.get_drvinfo		= airoha_ethtool_get_drvinfo,
2623	.get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
2624	.get_rmon_stats		= airoha_ethtool_get_rmon_stats,
2625};
2626
2627static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
2628{
2629	const __be32 *id_ptr = of_get_property(np, "reg", NULL);
2630	struct airoha_gdm_port *port;
2631	struct airoha_qdma *qdma;
2632	struct net_device *dev;
2633	int err, index;
2634	u32 id;
2635
2636	if (!id_ptr) {
2637		dev_err(eth->dev, "missing gdm port id\n");
2638		return -EINVAL;
2639	}
2640
2641	id = be32_to_cpup(id_ptr);
2642	index = id - 1;
2643
2644	if (!id || id > ARRAY_SIZE(eth->ports)) {
2645		dev_err(eth->dev, "invalid gdm port id: %d\n", id);
2646		return -EINVAL;
2647	}
2648
2649	if (eth->ports[index]) {
2650		dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
2651		return -EINVAL;
2652	}
2653
2654	dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
2655				      AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
2656	if (!dev) {
2657		dev_err(eth->dev, "alloc_etherdev failed\n");
2658		return -ENOMEM;
2659	}
2660
2661	qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
2662	dev->netdev_ops = &airoha_netdev_ops;
2663	dev->ethtool_ops = &airoha_ethtool_ops;
2664	dev->max_mtu = AIROHA_MAX_MTU;
2665	dev->watchdog_timeo = 5 * HZ;
2666	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2667			   NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2668			   NETIF_F_SG | NETIF_F_TSO;
2669	dev->features |= dev->hw_features;
2670	dev->dev.of_node = np;
2671	dev->irq = qdma->irq;
2672	SET_NETDEV_DEV(dev, eth->dev);
2673
2674	err = of_get_ethdev_address(np, dev);
2675	if (err) {
2676		if (err == -EPROBE_DEFER)
2677			return err;
2678
2679		eth_hw_addr_random(dev);
2680		dev_info(eth->dev, "generated random MAC address %pM\n",
2681			 dev->dev_addr);
2682	}
2683
2684	port = netdev_priv(dev);
2685	u64_stats_init(&port->stats.syncp);
2686	spin_lock_init(&port->stats.lock);
2687	port->qdma = qdma;
2688	port->dev = dev;
2689	port->id = id;
2690	eth->ports[index] = port;
2691
2692	return register_netdev(dev);
2693}
2694
2695static int airoha_probe(struct platform_device *pdev)
2696{
2697	struct device_node *np;
2698	struct airoha_eth *eth;
2699	int i, err;
2700
2701	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2702	if (!eth)
2703		return -ENOMEM;
2704
2705	eth->dev = &pdev->dev;
2706
2707	err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
2708	if (err) {
2709		dev_err(eth->dev, "failed configuring DMA mask\n");
2710		return err;
2711	}
2712
2713	eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
2714	if (IS_ERR(eth->fe_regs))
2715		return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
2716				     "failed to iomap fe regs\n");
2717
2718	eth->rsts[0].id = "fe";
2719	eth->rsts[1].id = "pdma";
2720	eth->rsts[2].id = "qdma";
2721	err = devm_reset_control_bulk_get_exclusive(eth->dev,
2722						    ARRAY_SIZE(eth->rsts),
2723						    eth->rsts);
2724	if (err) {
2725		dev_err(eth->dev, "failed to get bulk reset lines\n");
2726		return err;
2727	}
2728
2729	eth->xsi_rsts[0].id = "xsi-mac";
2730	eth->xsi_rsts[1].id = "hsi0-mac";
2731	eth->xsi_rsts[2].id = "hsi1-mac";
2732	eth->xsi_rsts[3].id = "hsi-mac";
2733	eth->xsi_rsts[4].id = "xfp-mac";
2734	err = devm_reset_control_bulk_get_exclusive(eth->dev,
2735						    ARRAY_SIZE(eth->xsi_rsts),
2736						    eth->xsi_rsts);
2737	if (err) {
2738		dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
2739		return err;
2740	}
2741
2742	eth->napi_dev = alloc_netdev_dummy(0);
2743	if (!eth->napi_dev)
2744		return -ENOMEM;
2745
2746	/* Enable threaded NAPI by default */
2747	eth->napi_dev->threaded = true;
2748	strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
2749	platform_set_drvdata(pdev, eth);
2750
2751	err = airoha_hw_init(pdev, eth);
2752	if (err)
2753		goto error_hw_cleanup;
2754
2755	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2756		airoha_qdma_start_napi(&eth->qdma[i]);
2757
2758	for_each_child_of_node(pdev->dev.of_node, np) {
2759		if (!of_device_is_compatible(np, "airoha,eth-mac"))
2760			continue;
2761
2762		if (!of_device_is_available(np))
2763			continue;
2764
2765		err = airoha_alloc_gdm_port(eth, np);
2766		if (err) {
2767			of_node_put(np);
2768			goto error_napi_stop;
2769		}
2770	}
2771
2772	return 0;
2773
2774error_napi_stop:
2775	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2776		airoha_qdma_stop_napi(&eth->qdma[i]);
2777error_hw_cleanup:
2778	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2779		airoha_hw_cleanup(&eth->qdma[i]);
2780
2781	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2782		struct airoha_gdm_port *port = eth->ports[i];
2783
2784		if (port && port->dev->reg_state == NETREG_REGISTERED)
2785			unregister_netdev(port->dev);
2786	}
2787	free_netdev(eth->napi_dev);
2788	platform_set_drvdata(pdev, NULL);
2789
2790	return err;
2791}
2792
2793static void airoha_remove(struct platform_device *pdev)
2794{
2795	struct airoha_eth *eth = platform_get_drvdata(pdev);
2796	int i;
2797
2798	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
2799		airoha_qdma_stop_napi(&eth->qdma[i]);
2800		airoha_hw_cleanup(&eth->qdma[i]);
2801	}
2802
2803	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2804		struct airoha_gdm_port *port = eth->ports[i];
2805
2806		if (!port)
2807			continue;
2808
2809		airoha_dev_stop(port->dev);
2810		unregister_netdev(port->dev);
2811	}
2812	free_netdev(eth->napi_dev);
2813
2814	platform_set_drvdata(pdev, NULL);
2815}
2816
2817static const struct of_device_id of_airoha_match[] = {
2818	{ .compatible = "airoha,en7581-eth" },
2819	{ /* sentinel */ }
2820};
2821MODULE_DEVICE_TABLE(of, of_airoha_match);
2822
2823static struct platform_driver airoha_driver = {
2824	.probe = airoha_probe,
2825	.remove = airoha_remove,
2826	.driver = {
2827		.name = KBUILD_MODNAME,
2828		.of_match_table = of_airoha_match,
2829	},
2830};
2831module_platform_driver(airoha_driver);
2832
2833MODULE_LICENSE("GPL");
2834MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
2835MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");