Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
   4 *
   5 * Copyright (C) 2010 Lantiq Deutschland
   6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
   7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
   8 *
   9 * The VLAN and bridge model the GSWIP hardware uses does not directly
  10 * matches the model DSA uses.
  11 *
  12 * The hardware has 64 possible table entries for bridges with one VLAN
  13 * ID, one flow id and a list of ports for each bridge. All entries which
  14 * match the same flow ID are combined in the mac learning table, they
  15 * act as one global bridge.
  16 * The hardware does not support VLAN filter on the port, but on the
  17 * bridge, this driver converts the DSA model to the hardware.
  18 *
  19 * The CPU gets all the exception frames which do not match any forwarding
  20 * rule and the CPU port is also added to all bridges. This makes it possible
  21 * to handle all the special cases easily in software.
  22 * At the initialization the driver allocates one bridge table entry for
  23 * each switch port which is used when the port is used without an
  24 * explicit bridge. This prevents the frames from being forwarded
  25 * between all LAN ports by default.
  26 */
  27
  28#include <linux/clk.h>
  29#include <linux/delay.h>
  30#include <linux/etherdevice.h>
  31#include <linux/firmware.h>
  32#include <linux/if_bridge.h>
  33#include <linux/if_vlan.h>
  34#include <linux/iopoll.h>
  35#include <linux/mfd/syscon.h>
  36#include <linux/module.h>
  37#include <linux/of_mdio.h>
  38#include <linux/of_net.h>
  39#include <linux/of_platform.h>
  40#include <linux/phy.h>
  41#include <linux/phylink.h>
  42#include <linux/platform_device.h>
  43#include <linux/regmap.h>
  44#include <linux/reset.h>
  45#include <net/dsa.h>
  46#include <dt-bindings/mips/lantiq_rcu_gphy.h>
  47
  48#include "lantiq_pce.h"
  49
  50/* GSWIP MDIO Registers */
  51#define GSWIP_MDIO_GLOB			0x00
  52#define  GSWIP_MDIO_GLOB_ENABLE		BIT(15)
  53#define GSWIP_MDIO_CTRL			0x08
  54#define  GSWIP_MDIO_CTRL_BUSY		BIT(12)
  55#define  GSWIP_MDIO_CTRL_RD		BIT(11)
  56#define  GSWIP_MDIO_CTRL_WR		BIT(10)
  57#define  GSWIP_MDIO_CTRL_PHYAD_MASK	0x1f
  58#define  GSWIP_MDIO_CTRL_PHYAD_SHIFT	5
  59#define  GSWIP_MDIO_CTRL_REGAD_MASK	0x1f
  60#define GSWIP_MDIO_READ			0x09
  61#define GSWIP_MDIO_WRITE		0x0A
  62#define GSWIP_MDIO_MDC_CFG0		0x0B
  63#define GSWIP_MDIO_MDC_CFG1		0x0C
  64#define GSWIP_MDIO_PHYp(p)		(0x15 - (p))
  65#define  GSWIP_MDIO_PHY_LINK_MASK	0x6000
  66#define  GSWIP_MDIO_PHY_LINK_AUTO	0x0000
  67#define  GSWIP_MDIO_PHY_LINK_DOWN	0x4000
  68#define  GSWIP_MDIO_PHY_LINK_UP		0x2000
  69#define  GSWIP_MDIO_PHY_SPEED_MASK	0x1800
  70#define  GSWIP_MDIO_PHY_SPEED_AUTO	0x1800
  71#define  GSWIP_MDIO_PHY_SPEED_M10	0x0000
  72#define  GSWIP_MDIO_PHY_SPEED_M100	0x0800
  73#define  GSWIP_MDIO_PHY_SPEED_G1	0x1000
  74#define  GSWIP_MDIO_PHY_FDUP_MASK	0x0600
  75#define  GSWIP_MDIO_PHY_FDUP_AUTO	0x0000
  76#define  GSWIP_MDIO_PHY_FDUP_EN		0x0200
  77#define  GSWIP_MDIO_PHY_FDUP_DIS	0x0600
  78#define  GSWIP_MDIO_PHY_FCONTX_MASK	0x0180
  79#define  GSWIP_MDIO_PHY_FCONTX_AUTO	0x0000
  80#define  GSWIP_MDIO_PHY_FCONTX_EN	0x0100
  81#define  GSWIP_MDIO_PHY_FCONTX_DIS	0x0180
  82#define  GSWIP_MDIO_PHY_FCONRX_MASK	0x0060
  83#define  GSWIP_MDIO_PHY_FCONRX_AUTO	0x0000
  84#define  GSWIP_MDIO_PHY_FCONRX_EN	0x0020
  85#define  GSWIP_MDIO_PHY_FCONRX_DIS	0x0060
  86#define  GSWIP_MDIO_PHY_ADDR_MASK	0x001f
  87#define  GSWIP_MDIO_PHY_MASK		(GSWIP_MDIO_PHY_ADDR_MASK | \
  88					 GSWIP_MDIO_PHY_FCONRX_MASK | \
  89					 GSWIP_MDIO_PHY_FCONTX_MASK | \
  90					 GSWIP_MDIO_PHY_LINK_MASK | \
  91					 GSWIP_MDIO_PHY_SPEED_MASK | \
  92					 GSWIP_MDIO_PHY_FDUP_MASK)
  93
  94/* GSWIP MII Registers */
  95#define GSWIP_MII_CFGp(p)		(0x2 * (p))
  96#define  GSWIP_MII_CFG_RESET		BIT(15)
  97#define  GSWIP_MII_CFG_EN		BIT(14)
  98#define  GSWIP_MII_CFG_ISOLATE		BIT(13)
  99#define  GSWIP_MII_CFG_LDCLKDIS		BIT(12)
 100#define  GSWIP_MII_CFG_RGMII_IBS	BIT(8)
 101#define  GSWIP_MII_CFG_RMII_CLK		BIT(7)
 102#define  GSWIP_MII_CFG_MODE_MIIP	0x0
 103#define  GSWIP_MII_CFG_MODE_MIIM	0x1
 104#define  GSWIP_MII_CFG_MODE_RMIIP	0x2
 105#define  GSWIP_MII_CFG_MODE_RMIIM	0x3
 106#define  GSWIP_MII_CFG_MODE_RGMII	0x4
 107#define  GSWIP_MII_CFG_MODE_GMII	0x9
 108#define  GSWIP_MII_CFG_MODE_MASK	0xf
 109#define  GSWIP_MII_CFG_RATE_M2P5	0x00
 110#define  GSWIP_MII_CFG_RATE_M25	0x10
 111#define  GSWIP_MII_CFG_RATE_M125	0x20
 112#define  GSWIP_MII_CFG_RATE_M50	0x30
 113#define  GSWIP_MII_CFG_RATE_AUTO	0x40
 114#define  GSWIP_MII_CFG_RATE_MASK	0x70
 115#define GSWIP_MII_PCDU0			0x01
 116#define GSWIP_MII_PCDU1			0x03
 117#define GSWIP_MII_PCDU5			0x05
 118#define  GSWIP_MII_PCDU_TXDLY_MASK	GENMASK(2, 0)
 119#define  GSWIP_MII_PCDU_RXDLY_MASK	GENMASK(9, 7)
 120
 121/* GSWIP Core Registers */
 122#define GSWIP_SWRES			0x000
 123#define  GSWIP_SWRES_R1			BIT(1)	/* GSWIP Software reset */
 124#define  GSWIP_SWRES_R0			BIT(0)	/* GSWIP Hardware reset */
 125#define GSWIP_VERSION			0x013
 126#define  GSWIP_VERSION_REV_SHIFT	0
 127#define  GSWIP_VERSION_REV_MASK		GENMASK(7, 0)
 128#define  GSWIP_VERSION_MOD_SHIFT	8
 129#define  GSWIP_VERSION_MOD_MASK		GENMASK(15, 8)
 130#define   GSWIP_VERSION_2_0		0x100
 131#define   GSWIP_VERSION_2_1		0x021
 132#define   GSWIP_VERSION_2_2		0x122
 133#define   GSWIP_VERSION_2_2_ETC		0x022
 134
 135#define GSWIP_BM_RAM_VAL(x)		(0x043 - (x))
 136#define GSWIP_BM_RAM_ADDR		0x044
 137#define GSWIP_BM_RAM_CTRL		0x045
 138#define  GSWIP_BM_RAM_CTRL_BAS		BIT(15)
 139#define  GSWIP_BM_RAM_CTRL_OPMOD	BIT(5)
 140#define  GSWIP_BM_RAM_CTRL_ADDR_MASK	GENMASK(4, 0)
 141#define GSWIP_BM_QUEUE_GCTRL		0x04A
 142#define  GSWIP_BM_QUEUE_GCTRL_GL_MOD	BIT(10)
 143/* buffer management Port Configuration Register */
 144#define GSWIP_BM_PCFGp(p)		(0x080 + ((p) * 2))
 145#define  GSWIP_BM_PCFG_CNTEN		BIT(0)	/* RMON Counter Enable */
 146#define  GSWIP_BM_PCFG_IGCNT		BIT(1)	/* Ingres Special Tag RMON count */
 147/* buffer management Port Control Register */
 148#define GSWIP_BM_RMON_CTRLp(p)		(0x81 + ((p) * 2))
 149#define  GSWIP_BM_CTRL_RMON_RAM1_RES	BIT(0)	/* Software Reset for RMON RAM 1 */
 150#define  GSWIP_BM_CTRL_RMON_RAM2_RES	BIT(1)	/* Software Reset for RMON RAM 2 */
 151
 152/* PCE */
 153#define GSWIP_PCE_TBL_KEY(x)		(0x447 - (x))
 154#define GSWIP_PCE_TBL_MASK		0x448
 155#define GSWIP_PCE_TBL_VAL(x)		(0x44D - (x))
 156#define GSWIP_PCE_TBL_ADDR		0x44E
 157#define GSWIP_PCE_TBL_CTRL		0x44F
 158#define  GSWIP_PCE_TBL_CTRL_BAS		BIT(15)
 159#define  GSWIP_PCE_TBL_CTRL_TYPE	BIT(13)
 160#define  GSWIP_PCE_TBL_CTRL_VLD		BIT(12)
 161#define  GSWIP_PCE_TBL_CTRL_KEYFORM	BIT(11)
 162#define  GSWIP_PCE_TBL_CTRL_GMAP_MASK	GENMASK(10, 7)
 163#define  GSWIP_PCE_TBL_CTRL_OPMOD_MASK	GENMASK(6, 5)
 164#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADRD	0x00
 165#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR	0x20
 166#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSRD	0x40
 167#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSWR	0x60
 168#define  GSWIP_PCE_TBL_CTRL_ADDR_MASK	GENMASK(4, 0)
 169#define GSWIP_PCE_PMAP1			0x453	/* Monitoring port map */
 170#define GSWIP_PCE_PMAP2			0x454	/* Default Multicast port map */
 171#define GSWIP_PCE_PMAP3			0x455	/* Default Unknown Unicast port map */
 172#define GSWIP_PCE_GCTRL_0		0x456
 173#define  GSWIP_PCE_GCTRL_0_MTFL		BIT(0)  /* MAC Table Flushing */
 174#define  GSWIP_PCE_GCTRL_0_MC_VALID	BIT(3)
 175#define  GSWIP_PCE_GCTRL_0_VLAN		BIT(14) /* VLAN aware Switching */
 176#define GSWIP_PCE_GCTRL_1		0x457
 177#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK	BIT(2)	/* MAC Address table lock */
 178#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD	BIT(3) /* Mac address table lock forwarding mode */
 179#define GSWIP_PCE_PCTRL_0p(p)		(0x480 + ((p) * 0xA))
 180#define  GSWIP_PCE_PCTRL_0_TVM		BIT(5)	/* Transparent VLAN mode */
 181#define  GSWIP_PCE_PCTRL_0_VREP		BIT(6)	/* VLAN Replace Mode */
 182#define  GSWIP_PCE_PCTRL_0_INGRESS	BIT(11)	/* Accept special tag in ingress */
 183#define  GSWIP_PCE_PCTRL_0_PSTATE_LISTEN	0x0
 184#define  GSWIP_PCE_PCTRL_0_PSTATE_RX		0x1
 185#define  GSWIP_PCE_PCTRL_0_PSTATE_TX		0x2
 186#define  GSWIP_PCE_PCTRL_0_PSTATE_LEARNING	0x3
 187#define  GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING	0x7
 188#define  GSWIP_PCE_PCTRL_0_PSTATE_MASK	GENMASK(2, 0)
 189#define GSWIP_PCE_VCTRL(p)		(0x485 + ((p) * 0xA))
 190#define  GSWIP_PCE_VCTRL_UVR		BIT(0)	/* Unknown VLAN Rule */
 191#define  GSWIP_PCE_VCTRL_VIMR		BIT(3)	/* VLAN Ingress Member violation rule */
 192#define  GSWIP_PCE_VCTRL_VEMR		BIT(4)	/* VLAN Egress Member violation rule */
 193#define  GSWIP_PCE_VCTRL_VSR		BIT(5)	/* VLAN Security */
 194#define  GSWIP_PCE_VCTRL_VID0		BIT(6)	/* Priority Tagged Rule */
 195#define GSWIP_PCE_DEFPVID(p)		(0x486 + ((p) * 0xA))
 196
 197#define GSWIP_MAC_FLEN			0x8C5
 198#define GSWIP_MAC_CTRL_0p(p)		(0x903 + ((p) * 0xC))
 199#define  GSWIP_MAC_CTRL_0_PADEN		BIT(8)
 200#define  GSWIP_MAC_CTRL_0_FCS_EN	BIT(7)
 201#define  GSWIP_MAC_CTRL_0_FCON_MASK	0x0070
 202#define  GSWIP_MAC_CTRL_0_FCON_AUTO	0x0000
 203#define  GSWIP_MAC_CTRL_0_FCON_RX	0x0010
 204#define  GSWIP_MAC_CTRL_0_FCON_TX	0x0020
 205#define  GSWIP_MAC_CTRL_0_FCON_RXTX	0x0030
 206#define  GSWIP_MAC_CTRL_0_FCON_NONE	0x0040
 207#define  GSWIP_MAC_CTRL_0_FDUP_MASK	0x000C
 208#define  GSWIP_MAC_CTRL_0_FDUP_AUTO	0x0000
 209#define  GSWIP_MAC_CTRL_0_FDUP_EN	0x0004
 210#define  GSWIP_MAC_CTRL_0_FDUP_DIS	0x000C
 211#define  GSWIP_MAC_CTRL_0_GMII_MASK	0x0003
 212#define  GSWIP_MAC_CTRL_0_GMII_AUTO	0x0000
 213#define  GSWIP_MAC_CTRL_0_GMII_MII	0x0001
 214#define  GSWIP_MAC_CTRL_0_GMII_RGMII	0x0002
 215#define GSWIP_MAC_CTRL_2p(p)		(0x905 + ((p) * 0xC))
 216#define GSWIP_MAC_CTRL_2_LCHKL		BIT(2) /* Frame Length Check Long Enable */
 217#define GSWIP_MAC_CTRL_2_MLEN		BIT(3) /* Maximum Untagged Frame Lnegth */
 218
 219/* Ethernet Switch Fetch DMA Port Control Register */
 220#define GSWIP_FDMA_PCTRLp(p)		(0xA80 + ((p) * 0x6))
 221#define  GSWIP_FDMA_PCTRL_EN		BIT(0)	/* FDMA Port Enable */
 222#define  GSWIP_FDMA_PCTRL_STEN		BIT(1)	/* Special Tag Insertion Enable */
 223#define  GSWIP_FDMA_PCTRL_VLANMOD_MASK	GENMASK(4, 3)	/* VLAN Modification Control */
 224#define  GSWIP_FDMA_PCTRL_VLANMOD_SHIFT	3	/* VLAN Modification Control */
 225#define  GSWIP_FDMA_PCTRL_VLANMOD_DIS	(0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 226#define  GSWIP_FDMA_PCTRL_VLANMOD_PRIO	(0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 227#define  GSWIP_FDMA_PCTRL_VLANMOD_ID	(0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 228#define  GSWIP_FDMA_PCTRL_VLANMOD_BOTH	(0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 229
 230/* Ethernet Switch Store DMA Port Control Register */
 231#define GSWIP_SDMA_PCTRLp(p)		(0xBC0 + ((p) * 0x6))
 232#define  GSWIP_SDMA_PCTRL_EN		BIT(0)	/* SDMA Port Enable */
 233#define  GSWIP_SDMA_PCTRL_FCEN		BIT(1)	/* Flow Control Enable */
 234#define  GSWIP_SDMA_PCTRL_PAUFWD	BIT(3)	/* Pause Frame Forwarding */
 235
 236#define GSWIP_TABLE_ACTIVE_VLAN		0x01
 237#define GSWIP_TABLE_VLAN_MAPPING	0x02
 238#define GSWIP_TABLE_MAC_BRIDGE		0x0b
 239#define  GSWIP_TABLE_MAC_BRIDGE_KEY3_FID	GENMASK(5, 0)	/* Filtering identifier */
 240#define  GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT	GENMASK(7, 4)	/* Port on learned entries */
 241#define  GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC	BIT(0)		/* Static, non-aging entry */
 242
 243#define XRX200_GPHY_FW_ALIGN	(16 * 1024)
 244
 245/* Maximum packet size supported by the switch. In theory this should be 10240,
 246 * but long packets currently cause lock-ups with an MTU of over 2526. Medium
 247 * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
 248 * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
 249 * packet reception. This is probably caused by the PPA engine, which is on the
 250 * RX part of the device. Packet transmission works properly up to 10240.
 251 */
 252#define GSWIP_MAX_PACKET_LENGTH	2400
 253
 254struct gswip_hw_info {
 255	int max_ports;
 256	int cpu_port;
 257	const struct dsa_switch_ops *ops;
 258};
 259
 260struct xway_gphy_match_data {
 261	char *fe_firmware_name;
 262	char *ge_firmware_name;
 263};
 264
 265struct gswip_gphy_fw {
 266	struct clk *clk_gate;
 267	struct reset_control *reset;
 268	u32 fw_addr_offset;
 269	char *fw_name;
 270};
 271
 272struct gswip_vlan {
 273	struct net_device *bridge;
 274	u16 vid;
 275	u8 fid;
 276};
 277
 278struct gswip_priv {
 279	__iomem void *gswip;
 280	__iomem void *mdio;
 281	__iomem void *mii;
 282	const struct gswip_hw_info *hw_info;
 283	const struct xway_gphy_match_data *gphy_fw_name_cfg;
 284	struct dsa_switch *ds;
 285	struct device *dev;
 286	struct regmap *rcu_regmap;
 287	struct gswip_vlan vlans[64];
 288	int num_gphy_fw;
 289	struct gswip_gphy_fw *gphy_fw;
 290	u32 port_vlan_filter;
 291	struct mutex pce_table_lock;
 292};
 293
 294struct gswip_pce_table_entry {
 295	u16 index;      // PCE_TBL_ADDR.ADDR = pData->table_index
 296	u16 table;      // PCE_TBL_CTRL.ADDR = pData->table
 297	u16 key[8];
 298	u16 val[5];
 299	u16 mask;
 300	u8 gmap;
 301	bool type;
 302	bool valid;
 303	bool key_mode;
 304};
 305
 306struct gswip_rmon_cnt_desc {
 307	unsigned int size;
 308	unsigned int offset;
 309	const char *name;
 310};
 311
 312#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
 313
 314static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
 315	/** Receive Packet Count (only packets that are accepted and not discarded). */
 316	MIB_DESC(1, 0x1F, "RxGoodPkts"),
 317	MIB_DESC(1, 0x23, "RxUnicastPkts"),
 318	MIB_DESC(1, 0x22, "RxMulticastPkts"),
 319	MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
 320	MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
 321	MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
 322	MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
 323	MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
 324	MIB_DESC(1, 0x20, "RxGoodPausePkts"),
 325	MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
 326	MIB_DESC(1, 0x12, "Rx64BytePkts"),
 327	MIB_DESC(1, 0x13, "Rx127BytePkts"),
 328	MIB_DESC(1, 0x14, "Rx255BytePkts"),
 329	MIB_DESC(1, 0x15, "Rx511BytePkts"),
 330	MIB_DESC(1, 0x16, "Rx1023BytePkts"),
 331	/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
 332	MIB_DESC(1, 0x17, "RxMaxBytePkts"),
 333	MIB_DESC(1, 0x18, "RxDroppedPkts"),
 334	MIB_DESC(1, 0x19, "RxFilteredPkts"),
 335	MIB_DESC(2, 0x24, "RxGoodBytes"),
 336	MIB_DESC(2, 0x26, "RxBadBytes"),
 337	MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
 338	MIB_DESC(1, 0x0C, "TxGoodPkts"),
 339	MIB_DESC(1, 0x06, "TxUnicastPkts"),
 340	MIB_DESC(1, 0x07, "TxMulticastPkts"),
 341	MIB_DESC(1, 0x00, "Tx64BytePkts"),
 342	MIB_DESC(1, 0x01, "Tx127BytePkts"),
 343	MIB_DESC(1, 0x02, "Tx255BytePkts"),
 344	MIB_DESC(1, 0x03, "Tx511BytePkts"),
 345	MIB_DESC(1, 0x04, "Tx1023BytePkts"),
 346	/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
 347	MIB_DESC(1, 0x05, "TxMaxBytePkts"),
 348	MIB_DESC(1, 0x08, "TxSingleCollCount"),
 349	MIB_DESC(1, 0x09, "TxMultCollCount"),
 350	MIB_DESC(1, 0x0A, "TxLateCollCount"),
 351	MIB_DESC(1, 0x0B, "TxExcessCollCount"),
 352	MIB_DESC(1, 0x0D, "TxPauseCount"),
 353	MIB_DESC(1, 0x10, "TxDroppedPkts"),
 354	MIB_DESC(2, 0x0E, "TxGoodBytes"),
 355};
 356
 357static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
 358{
 359	return __raw_readl(priv->gswip + (offset * 4));
 360}
 361
 362static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
 363{
 364	__raw_writel(val, priv->gswip + (offset * 4));
 365}
 366
 367static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
 368			      u32 offset)
 369{
 370	u32 val = gswip_switch_r(priv, offset);
 371
 372	val &= ~(clear);
 373	val |= set;
 374	gswip_switch_w(priv, val, offset);
 375}
 376
 377static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
 378				  u32 cleared)
 379{
 380	u32 val;
 381
 382	return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
 383				  (val & cleared) == 0, 20, 50000);
 384}
 385
 386static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
 387{
 388	return __raw_readl(priv->mdio + (offset * 4));
 389}
 390
 391static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
 392{
 393	__raw_writel(val, priv->mdio + (offset * 4));
 394}
 395
 396static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
 397			    u32 offset)
 398{
 399	u32 val = gswip_mdio_r(priv, offset);
 400
 401	val &= ~(clear);
 402	val |= set;
 403	gswip_mdio_w(priv, val, offset);
 404}
 405
 406static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
 407{
 408	return __raw_readl(priv->mii + (offset * 4));
 409}
 410
 411static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
 412{
 413	__raw_writel(val, priv->mii + (offset * 4));
 414}
 415
 416static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
 417			   u32 offset)
 418{
 419	u32 val = gswip_mii_r(priv, offset);
 420
 421	val &= ~(clear);
 422	val |= set;
 423	gswip_mii_w(priv, val, offset);
 424}
 425
 426static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
 427			       int port)
 428{
 429	/* There's no MII_CFG register for the CPU port */
 430	if (!dsa_is_cpu_port(priv->ds, port))
 431		gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
 432}
 433
 434static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
 435				int port)
 436{
 437	switch (port) {
 438	case 0:
 439		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
 440		break;
 441	case 1:
 442		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
 443		break;
 444	case 5:
 445		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
 446		break;
 447	}
 448}
 449
 450static int gswip_mdio_poll(struct gswip_priv *priv)
 451{
 452	int cnt = 100;
 453
 454	while (likely(cnt--)) {
 455		u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
 456
 457		if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
 458			return 0;
 459		usleep_range(20, 40);
 460	}
 461
 462	return -ETIMEDOUT;
 463}
 464
 465static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
 466{
 467	struct gswip_priv *priv = bus->priv;
 468	int err;
 469
 470	err = gswip_mdio_poll(priv);
 471	if (err) {
 472		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 473		return err;
 474	}
 475
 476	gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
 477	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
 478		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
 479		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
 480		GSWIP_MDIO_CTRL);
 481
 482	return 0;
 483}
 484
 485static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
 486{
 487	struct gswip_priv *priv = bus->priv;
 488	int err;
 489
 490	err = gswip_mdio_poll(priv);
 491	if (err) {
 492		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 493		return err;
 494	}
 495
 496	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
 497		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
 498		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
 499		GSWIP_MDIO_CTRL);
 500
 501	err = gswip_mdio_poll(priv);
 502	if (err) {
 503		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 504		return err;
 505	}
 506
 507	return gswip_mdio_r(priv, GSWIP_MDIO_READ);
 508}
 509
 510static int gswip_mdio(struct gswip_priv *priv)
 511{
 512	struct device_node *mdio_np, *switch_np = priv->dev->of_node;
 513	struct device *dev = priv->dev;
 514	struct mii_bus *bus;
 515	int err = 0;
 516
 517	mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
 518	if (!of_device_is_available(mdio_np))
 519		goto out_put_node;
 520
 521	bus = devm_mdiobus_alloc(dev);
 522	if (!bus) {
 523		err = -ENOMEM;
 524		goto out_put_node;
 525	}
 526
 527	bus->priv = priv;
 528	bus->read = gswip_mdio_rd;
 529	bus->write = gswip_mdio_wr;
 530	bus->name = "lantiq,xrx200-mdio";
 531	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
 532	bus->parent = priv->dev;
 533
 534	err = devm_of_mdiobus_register(dev, bus, mdio_np);
 535
 536out_put_node:
 537	of_node_put(mdio_np);
 538
 539	return err;
 540}
 541
 542static int gswip_pce_table_entry_read(struct gswip_priv *priv,
 543				      struct gswip_pce_table_entry *tbl)
 544{
 545	int i;
 546	int err;
 547	u16 crtl;
 548	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
 549					GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
 550
 551	mutex_lock(&priv->pce_table_lock);
 552
 553	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 554				     GSWIP_PCE_TBL_CTRL_BAS);
 555	if (err) {
 556		mutex_unlock(&priv->pce_table_lock);
 557		return err;
 558	}
 559
 560	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
 561	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 562				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 563			  tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
 564			  GSWIP_PCE_TBL_CTRL);
 565
 566	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 567				     GSWIP_PCE_TBL_CTRL_BAS);
 568	if (err) {
 569		mutex_unlock(&priv->pce_table_lock);
 570		return err;
 571	}
 572
 573	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
 574		tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
 575
 576	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
 577		tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
 578
 579	tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
 580
 581	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
 582
 583	tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
 584	tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
 585	tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
 586
 587	mutex_unlock(&priv->pce_table_lock);
 588
 589	return 0;
 590}
 591
 592static int gswip_pce_table_entry_write(struct gswip_priv *priv,
 593				       struct gswip_pce_table_entry *tbl)
 594{
 595	int i;
 596	int err;
 597	u16 crtl;
 598	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
 599					GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
 600
 601	mutex_lock(&priv->pce_table_lock);
 602
 603	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 604				     GSWIP_PCE_TBL_CTRL_BAS);
 605	if (err) {
 606		mutex_unlock(&priv->pce_table_lock);
 607		return err;
 608	}
 609
 610	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
 611	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 612				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 613			  tbl->table | addr_mode,
 614			  GSWIP_PCE_TBL_CTRL);
 615
 616	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
 617		gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
 618
 619	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
 620		gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
 621
 622	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 623				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 624			  tbl->table | addr_mode,
 625			  GSWIP_PCE_TBL_CTRL);
 626
 627	gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
 628
 629	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
 630	crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
 631		  GSWIP_PCE_TBL_CTRL_GMAP_MASK);
 632	if (tbl->type)
 633		crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
 634	if (tbl->valid)
 635		crtl |= GSWIP_PCE_TBL_CTRL_VLD;
 636	crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
 637	crtl |= GSWIP_PCE_TBL_CTRL_BAS;
 638	gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
 639
 640	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 641				     GSWIP_PCE_TBL_CTRL_BAS);
 642
 643	mutex_unlock(&priv->pce_table_lock);
 644
 645	return err;
 646}
 647
 648/* Add the LAN port into a bridge with the CPU port by
 649 * default. This prevents automatic forwarding of
 650 * packages between the LAN ports when no explicit
 651 * bridge is configured.
 652 */
 653static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
 654{
 655	struct gswip_pce_table_entry vlan_active = {0,};
 656	struct gswip_pce_table_entry vlan_mapping = {0,};
 657	unsigned int cpu_port = priv->hw_info->cpu_port;
 658	int err;
 659
 660	vlan_active.index = port + 1;
 661	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 662	vlan_active.key[0] = 0; /* vid */
 663	vlan_active.val[0] = port + 1 /* fid */;
 664	vlan_active.valid = add;
 665	err = gswip_pce_table_entry_write(priv, &vlan_active);
 666	if (err) {
 667		dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
 668		return err;
 669	}
 670
 671	if (!add)
 672		return 0;
 673
 674	vlan_mapping.index = port + 1;
 675	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 676	vlan_mapping.val[0] = 0 /* vid */;
 677	vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
 678	vlan_mapping.val[2] = 0;
 679	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
 680	if (err) {
 681		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
 682		return err;
 683	}
 684
 685	return 0;
 686}
 687
 688static int gswip_port_enable(struct dsa_switch *ds, int port,
 689			     struct phy_device *phydev)
 690{
 691	struct gswip_priv *priv = ds->priv;
 692	int err;
 693
 694	if (!dsa_is_cpu_port(ds, port)) {
 695		u32 mdio_phy = 0;
 696
 697		err = gswip_add_single_port_br(priv, port, true);
 698		if (err)
 699			return err;
 700
 701		if (phydev)
 702			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
 703
 704		gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
 705				GSWIP_MDIO_PHYp(port));
 706	}
 707
 708	/* RMON Counter Enable for port */
 709	gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
 710
 711	/* enable port fetch/store dma & VLAN Modification */
 712	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
 713				   GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
 714			 GSWIP_FDMA_PCTRLp(port));
 715	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
 716			  GSWIP_SDMA_PCTRLp(port));
 717
 718	return 0;
 719}
 720
 721static void gswip_port_disable(struct dsa_switch *ds, int port)
 722{
 723	struct gswip_priv *priv = ds->priv;
 724
 725	gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
 726			  GSWIP_FDMA_PCTRLp(port));
 727	gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
 728			  GSWIP_SDMA_PCTRLp(port));
 729}
 730
 731static int gswip_pce_load_microcode(struct gswip_priv *priv)
 732{
 733	int i;
 734	int err;
 735
 736	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 737				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 738			  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
 739	gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
 740
 741	for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
 742		gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
 743		gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
 744			       GSWIP_PCE_TBL_VAL(0));
 745		gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
 746			       GSWIP_PCE_TBL_VAL(1));
 747		gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
 748			       GSWIP_PCE_TBL_VAL(2));
 749		gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
 750			       GSWIP_PCE_TBL_VAL(3));
 751
 752		/* start the table access: */
 753		gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
 754				  GSWIP_PCE_TBL_CTRL);
 755		err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 756					     GSWIP_PCE_TBL_CTRL_BAS);
 757		if (err)
 758			return err;
 759	}
 760
 761	/* tell the switch that the microcode is loaded */
 762	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
 763			  GSWIP_PCE_GCTRL_0);
 764
 765	return 0;
 766}
 767
 768static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
 769				     bool vlan_filtering,
 770				     struct netlink_ext_ack *extack)
 771{
 772	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
 773	struct gswip_priv *priv = ds->priv;
 774
 775	/* Do not allow changing the VLAN filtering options while in bridge */
 776	if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
 777		NL_SET_ERR_MSG_MOD(extack,
 778				   "Dynamic toggling of vlan_filtering not supported");
 779		return -EIO;
 780	}
 781
 782	if (vlan_filtering) {
 783		/* Use tag based VLAN */
 784		gswip_switch_mask(priv,
 785				  GSWIP_PCE_VCTRL_VSR,
 786				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
 787				  GSWIP_PCE_VCTRL_VEMR,
 788				  GSWIP_PCE_VCTRL(port));
 789		gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
 790				  GSWIP_PCE_PCTRL_0p(port));
 791	} else {
 792		/* Use port based VLAN */
 793		gswip_switch_mask(priv,
 794				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
 795				  GSWIP_PCE_VCTRL_VEMR,
 796				  GSWIP_PCE_VCTRL_VSR,
 797				  GSWIP_PCE_VCTRL(port));
 798		gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
 799				  GSWIP_PCE_PCTRL_0p(port));
 800	}
 801
 802	return 0;
 803}
 804
 805static int gswip_setup(struct dsa_switch *ds)
 806{
 807	struct gswip_priv *priv = ds->priv;
 808	unsigned int cpu_port = priv->hw_info->cpu_port;
 809	int i;
 810	int err;
 811
 812	gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
 813	usleep_range(5000, 10000);
 814	gswip_switch_w(priv, 0, GSWIP_SWRES);
 815
 816	/* disable port fetch/store dma on all ports */
 817	for (i = 0; i < priv->hw_info->max_ports; i++) {
 818		gswip_port_disable(ds, i);
 819		gswip_port_vlan_filtering(ds, i, false, NULL);
 820	}
 821
 822	/* enable Switch */
 823	gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
 824
 825	err = gswip_pce_load_microcode(priv);
 826	if (err) {
 827		dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
 828		return err;
 829	}
 830
 831	/* Default unknown Broadcast/Multicast/Unicast port maps */
 832	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
 833	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
 834	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
 835
 836	/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
 837	 * interoperability problem with this auto polling mechanism because
 838	 * their status registers think that the link is in a different state
 839	 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
 840	 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
 841	 * auto polling state machine consider the link being negotiated with
 842	 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
 843	 * to the switch port being completely dead (RX and TX are both not
 844	 * working).
 845	 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
 846	 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
 847	 * it would work fine for a few minutes to hours and then stop, on
 848	 * other device it would no traffic could be sent or received at all.
 849	 * Testing shows that when PHY auto polling is disabled these problems
 850	 * go away.
 851	 */
 852	gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
 853
 854	/* Configure the MDIO Clock 2.5 MHz */
 855	gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 856
 857	/* Disable the xMII interface and clear it's isolation bit */
 858	for (i = 0; i < priv->hw_info->max_ports; i++)
 859		gswip_mii_mask_cfg(priv,
 860				   GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
 861				   0, i);
 862
 863	/* enable special tag insertion on cpu port */
 864	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
 865			  GSWIP_FDMA_PCTRLp(cpu_port));
 866
 867	/* accept special tag in ingress direction */
 868	gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
 869			  GSWIP_PCE_PCTRL_0p(cpu_port));
 870
 871	gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
 872			  GSWIP_BM_QUEUE_GCTRL);
 873
 874	/* VLAN aware Switching */
 875	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
 876
 877	/* Flush MAC Table */
 878	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
 879
 880	err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
 881				     GSWIP_PCE_GCTRL_0_MTFL);
 882	if (err) {
 883		dev_err(priv->dev, "MAC flushing didn't finish\n");
 884		return err;
 885	}
 886
 887	ds->mtu_enforcement_ingress = true;
 888
 889	ds->configure_vlan_while_not_filtering = false;
 890
 891	return 0;
 892}
 893
 894static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
 895						    int port,
 896						    enum dsa_tag_protocol mp)
 897{
 898	return DSA_TAG_PROTO_GSWIP;
 899}
 900
 901static int gswip_vlan_active_create(struct gswip_priv *priv,
 902				    struct net_device *bridge,
 903				    int fid, u16 vid)
 904{
 905	struct gswip_pce_table_entry vlan_active = {0,};
 906	unsigned int max_ports = priv->hw_info->max_ports;
 907	int idx = -1;
 908	int err;
 909	int i;
 910
 911	/* Look for a free slot */
 912	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
 913		if (!priv->vlans[i].bridge) {
 914			idx = i;
 915			break;
 916		}
 917	}
 918
 919	if (idx == -1)
 920		return -ENOSPC;
 921
 922	if (fid == -1)
 923		fid = idx;
 924
 925	vlan_active.index = idx;
 926	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 927	vlan_active.key[0] = vid;
 928	vlan_active.val[0] = fid;
 929	vlan_active.valid = true;
 930
 931	err = gswip_pce_table_entry_write(priv, &vlan_active);
 932	if (err) {
 933		dev_err(priv->dev, "failed to write active VLAN: %d\n",	err);
 934		return err;
 935	}
 936
 937	priv->vlans[idx].bridge = bridge;
 938	priv->vlans[idx].vid = vid;
 939	priv->vlans[idx].fid = fid;
 940
 941	return idx;
 942}
 943
 944static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
 945{
 946	struct gswip_pce_table_entry vlan_active = {0,};
 947	int err;
 948
 949	vlan_active.index = idx;
 950	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 951	vlan_active.valid = false;
 952	err = gswip_pce_table_entry_write(priv, &vlan_active);
 953	if (err)
 954		dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
 955	priv->vlans[idx].bridge = NULL;
 956
 957	return err;
 958}
 959
 960static int gswip_vlan_add_unaware(struct gswip_priv *priv,
 961				  struct net_device *bridge, int port)
 962{
 963	struct gswip_pce_table_entry vlan_mapping = {0,};
 964	unsigned int max_ports = priv->hw_info->max_ports;
 965	unsigned int cpu_port = priv->hw_info->cpu_port;
 966	bool active_vlan_created = false;
 967	int idx = -1;
 968	int i;
 969	int err;
 970
 971	/* Check if there is already a page for this bridge */
 972	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
 973		if (priv->vlans[i].bridge == bridge) {
 974			idx = i;
 975			break;
 976		}
 977	}
 978
 979	/* If this bridge is not programmed yet, add a Active VLAN table
 980	 * entry in a free slot and prepare the VLAN mapping table entry.
 981	 */
 982	if (idx == -1) {
 983		idx = gswip_vlan_active_create(priv, bridge, -1, 0);
 984		if (idx < 0)
 985			return idx;
 986		active_vlan_created = true;
 987
 988		vlan_mapping.index = idx;
 989		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 990		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
 991		vlan_mapping.val[0] = 0;
 992	} else {
 993		/* Read the existing VLAN mapping entry from the switch */
 994		vlan_mapping.index = idx;
 995		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 996		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
 997		if (err) {
 998			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
 999				err);
1000			return err;
1001		}
1002	}
1003
1004	/* Update the VLAN mapping entry and write it to the switch */
1005	vlan_mapping.val[1] |= BIT(cpu_port);
1006	vlan_mapping.val[1] |= BIT(port);
1007	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1008	if (err) {
1009		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1010		/* In case an Active VLAN was creaetd delete it again */
1011		if (active_vlan_created)
1012			gswip_vlan_active_remove(priv, idx);
1013		return err;
1014	}
1015
1016	gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
1017	return 0;
1018}
1019
1020static int gswip_vlan_add_aware(struct gswip_priv *priv,
1021				struct net_device *bridge, int port,
1022				u16 vid, bool untagged,
1023				bool pvid)
1024{
1025	struct gswip_pce_table_entry vlan_mapping = {0,};
1026	unsigned int max_ports = priv->hw_info->max_ports;
1027	unsigned int cpu_port = priv->hw_info->cpu_port;
1028	bool active_vlan_created = false;
1029	int idx = -1;
1030	int fid = -1;
1031	int i;
1032	int err;
1033
1034	/* Check if there is already a page for this bridge */
1035	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1036		if (priv->vlans[i].bridge == bridge) {
1037			if (fid != -1 && fid != priv->vlans[i].fid)
1038				dev_err(priv->dev, "one bridge with multiple flow ids\n");
1039			fid = priv->vlans[i].fid;
1040			if (priv->vlans[i].vid == vid) {
1041				idx = i;
1042				break;
1043			}
1044		}
1045	}
1046
1047	/* If this bridge is not programmed yet, add a Active VLAN table
1048	 * entry in a free slot and prepare the VLAN mapping table entry.
1049	 */
1050	if (idx == -1) {
1051		idx = gswip_vlan_active_create(priv, bridge, fid, vid);
1052		if (idx < 0)
1053			return idx;
1054		active_vlan_created = true;
1055
1056		vlan_mapping.index = idx;
1057		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1058		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
1059		vlan_mapping.val[0] = vid;
1060	} else {
1061		/* Read the existing VLAN mapping entry from the switch */
1062		vlan_mapping.index = idx;
1063		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1064		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1065		if (err) {
1066			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
1067				err);
1068			return err;
1069		}
1070	}
1071
1072	vlan_mapping.val[0] = vid;
1073	/* Update the VLAN mapping entry and write it to the switch */
1074	vlan_mapping.val[1] |= BIT(cpu_port);
1075	vlan_mapping.val[2] |= BIT(cpu_port);
1076	vlan_mapping.val[1] |= BIT(port);
1077	if (untagged)
1078		vlan_mapping.val[2] &= ~BIT(port);
1079	else
1080		vlan_mapping.val[2] |= BIT(port);
1081	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1082	if (err) {
1083		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1084		/* In case an Active VLAN was creaetd delete it again */
1085		if (active_vlan_created)
1086			gswip_vlan_active_remove(priv, idx);
1087		return err;
1088	}
1089
1090	if (pvid)
1091		gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
1092
1093	return 0;
1094}
1095
1096static int gswip_vlan_remove(struct gswip_priv *priv,
1097			     struct net_device *bridge, int port,
1098			     u16 vid, bool pvid, bool vlan_aware)
1099{
1100	struct gswip_pce_table_entry vlan_mapping = {0,};
1101	unsigned int max_ports = priv->hw_info->max_ports;
1102	unsigned int cpu_port = priv->hw_info->cpu_port;
1103	int idx = -1;
1104	int i;
1105	int err;
1106
1107	/* Check if there is already a page for this bridge */
1108	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1109		if (priv->vlans[i].bridge == bridge &&
1110		    (!vlan_aware || priv->vlans[i].vid == vid)) {
1111			idx = i;
1112			break;
1113		}
1114	}
1115
1116	if (idx == -1) {
1117		dev_err(priv->dev, "bridge to leave does not exists\n");
1118		return -ENOENT;
1119	}
1120
1121	vlan_mapping.index = idx;
1122	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1123	err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1124	if (err) {
1125		dev_err(priv->dev, "failed to read VLAN mapping: %d\n",	err);
1126		return err;
1127	}
1128
1129	vlan_mapping.val[1] &= ~BIT(port);
1130	vlan_mapping.val[2] &= ~BIT(port);
1131	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1132	if (err) {
1133		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1134		return err;
1135	}
1136
1137	/* In case all ports are removed from the bridge, remove the VLAN */
1138	if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
1139		err = gswip_vlan_active_remove(priv, idx);
1140		if (err) {
1141			dev_err(priv->dev, "failed to write active VLAN: %d\n",
1142				err);
1143			return err;
1144		}
1145	}
1146
1147	/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
1148	if (pvid)
1149		gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
1150
1151	return 0;
1152}
1153
1154static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
1155				  struct dsa_bridge bridge,
1156				  bool *tx_fwd_offload,
1157				  struct netlink_ext_ack *extack)
1158{
1159	struct net_device *br = bridge.dev;
1160	struct gswip_priv *priv = ds->priv;
1161	int err;
1162
1163	/* When the bridge uses VLAN filtering we have to configure VLAN
1164	 * specific bridges. No bridge is configured here.
1165	 */
1166	if (!br_vlan_enabled(br)) {
1167		err = gswip_vlan_add_unaware(priv, br, port);
1168		if (err)
1169			return err;
1170		priv->port_vlan_filter &= ~BIT(port);
1171	} else {
1172		priv->port_vlan_filter |= BIT(port);
1173	}
1174	return gswip_add_single_port_br(priv, port, false);
1175}
1176
1177static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
1178				    struct dsa_bridge bridge)
1179{
1180	struct net_device *br = bridge.dev;
1181	struct gswip_priv *priv = ds->priv;
1182
1183	gswip_add_single_port_br(priv, port, true);
1184
1185	/* When the bridge uses VLAN filtering we have to configure VLAN
1186	 * specific bridges. No bridge is configured here.
1187	 */
1188	if (!br_vlan_enabled(br))
1189		gswip_vlan_remove(priv, br, port, 0, true, false);
1190}
1191
1192static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
1193				   const struct switchdev_obj_port_vlan *vlan,
1194				   struct netlink_ext_ack *extack)
1195{
1196	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1197	struct gswip_priv *priv = ds->priv;
1198	unsigned int max_ports = priv->hw_info->max_ports;
1199	int pos = max_ports;
1200	int i, idx = -1;
1201
1202	/* We only support VLAN filtering on bridges */
1203	if (!dsa_is_cpu_port(ds, port) && !bridge)
1204		return -EOPNOTSUPP;
1205
1206	/* Check if there is already a page for this VLAN */
1207	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1208		if (priv->vlans[i].bridge == bridge &&
1209		    priv->vlans[i].vid == vlan->vid) {
1210			idx = i;
1211			break;
1212		}
1213	}
1214
1215	/* If this VLAN is not programmed yet, we have to reserve
1216	 * one entry in the VLAN table. Make sure we start at the
1217	 * next position round.
1218	 */
1219	if (idx == -1) {
1220		/* Look for a free slot */
1221		for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
1222			if (!priv->vlans[pos].bridge) {
1223				idx = pos;
1224				pos++;
1225				break;
1226			}
1227		}
1228
1229		if (idx == -1) {
1230			NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
1231			return -ENOSPC;
1232		}
1233	}
1234
1235	return 0;
1236}
1237
1238static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
1239			       const struct switchdev_obj_port_vlan *vlan,
1240			       struct netlink_ext_ack *extack)
1241{
1242	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1243	struct gswip_priv *priv = ds->priv;
1244	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1245	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1246	int err;
1247
1248	err = gswip_port_vlan_prepare(ds, port, vlan, extack);
1249	if (err)
1250		return err;
1251
1252	/* We have to receive all packets on the CPU port and should not
1253	 * do any VLAN filtering here. This is also called with bridge
1254	 * NULL and then we do not know for which bridge to configure
1255	 * this.
1256	 */
1257	if (dsa_is_cpu_port(ds, port))
1258		return 0;
1259
1260	return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
1261				    untagged, pvid);
1262}
1263
1264static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
1265			       const struct switchdev_obj_port_vlan *vlan)
1266{
1267	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1268	struct gswip_priv *priv = ds->priv;
1269	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1270
1271	/* We have to receive all packets on the CPU port and should not
1272	 * do any VLAN filtering here. This is also called with bridge
1273	 * NULL and then we do not know for which bridge to configure
1274	 * this.
1275	 */
1276	if (dsa_is_cpu_port(ds, port))
1277		return 0;
1278
1279	return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
1280}
1281
1282static void gswip_port_fast_age(struct dsa_switch *ds, int port)
1283{
1284	struct gswip_priv *priv = ds->priv;
1285	struct gswip_pce_table_entry mac_bridge = {0,};
1286	int i;
1287	int err;
1288
1289	for (i = 0; i < 2048; i++) {
1290		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1291		mac_bridge.index = i;
1292
1293		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1294		if (err) {
1295			dev_err(priv->dev, "failed to read mac bridge: %d\n",
1296				err);
1297			return;
1298		}
1299
1300		if (!mac_bridge.valid)
1301			continue;
1302
1303		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
1304			continue;
1305
1306		if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
1307				      mac_bridge.val[0]))
1308			continue;
1309
1310		mac_bridge.valid = false;
1311		err = gswip_pce_table_entry_write(priv, &mac_bridge);
1312		if (err) {
1313			dev_err(priv->dev, "failed to write mac bridge: %d\n",
1314				err);
1315			return;
1316		}
1317	}
1318}
1319
1320static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1321{
1322	struct gswip_priv *priv = ds->priv;
1323	u32 stp_state;
1324
1325	switch (state) {
1326	case BR_STATE_DISABLED:
1327		gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
1328				  GSWIP_SDMA_PCTRLp(port));
1329		return;
1330	case BR_STATE_BLOCKING:
1331	case BR_STATE_LISTENING:
1332		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
1333		break;
1334	case BR_STATE_LEARNING:
1335		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
1336		break;
1337	case BR_STATE_FORWARDING:
1338		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
1339		break;
1340	default:
1341		dev_err(priv->dev, "invalid STP state: %d\n", state);
1342		return;
1343	}
1344
1345	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
1346			  GSWIP_SDMA_PCTRLp(port));
1347	gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
1348			  GSWIP_PCE_PCTRL_0p(port));
1349}
1350
1351static int gswip_port_fdb(struct dsa_switch *ds, int port,
1352			  const unsigned char *addr, u16 vid, bool add)
1353{
1354	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1355	struct gswip_priv *priv = ds->priv;
1356	struct gswip_pce_table_entry mac_bridge = {0,};
1357	unsigned int max_ports = priv->hw_info->max_ports;
1358	int fid = -1;
1359	int i;
1360	int err;
1361
1362	if (!bridge)
1363		return -EINVAL;
1364
1365	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1366		if (priv->vlans[i].bridge == bridge) {
1367			fid = priv->vlans[i].fid;
1368			break;
1369		}
1370	}
1371
1372	if (fid == -1) {
1373		dev_err(priv->dev, "no FID found for bridge %s\n",
1374			bridge->name);
1375		return -EINVAL;
1376	}
1377
1378	mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1379	mac_bridge.key_mode = true;
1380	mac_bridge.key[0] = addr[5] | (addr[4] << 8);
1381	mac_bridge.key[1] = addr[3] | (addr[2] << 8);
1382	mac_bridge.key[2] = addr[1] | (addr[0] << 8);
1383	mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
1384	mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
1385	mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
1386	mac_bridge.valid = add;
1387
1388	err = gswip_pce_table_entry_write(priv, &mac_bridge);
1389	if (err)
1390		dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
1391
1392	return err;
1393}
1394
1395static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
1396			      const unsigned char *addr, u16 vid,
1397			      struct dsa_db db)
1398{
1399	return gswip_port_fdb(ds, port, addr, vid, true);
1400}
1401
1402static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
1403			      const unsigned char *addr, u16 vid,
1404			      struct dsa_db db)
1405{
1406	return gswip_port_fdb(ds, port, addr, vid, false);
1407}
1408
1409static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
1410			       dsa_fdb_dump_cb_t *cb, void *data)
1411{
1412	struct gswip_priv *priv = ds->priv;
1413	struct gswip_pce_table_entry mac_bridge = {0,};
1414	unsigned char addr[ETH_ALEN];
1415	int i;
1416	int err;
1417
1418	for (i = 0; i < 2048; i++) {
1419		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1420		mac_bridge.index = i;
1421
1422		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1423		if (err) {
1424			dev_err(priv->dev,
1425				"failed to read mac bridge entry %d: %d\n",
1426				i, err);
1427			return err;
1428		}
1429
1430		if (!mac_bridge.valid)
1431			continue;
1432
1433		addr[5] = mac_bridge.key[0] & 0xff;
1434		addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
1435		addr[3] = mac_bridge.key[1] & 0xff;
1436		addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
1437		addr[1] = mac_bridge.key[2] & 0xff;
1438		addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
1439		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
1440			if (mac_bridge.val[0] & BIT(port)) {
1441				err = cb(addr, 0, true, data);
1442				if (err)
1443					return err;
1444			}
1445		} else {
1446			if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
1447					      mac_bridge.val[0])) {
1448				err = cb(addr, 0, false, data);
1449				if (err)
1450					return err;
1451			}
1452		}
1453	}
1454	return 0;
1455}
1456
1457static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
1458{
1459	/* Includes 8 bytes for special header. */
1460	return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
1461}
1462
1463static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1464{
1465	struct gswip_priv *priv = ds->priv;
1466
1467	/* CPU port always has maximum mtu of user ports, so use it to set
1468	 * switch frame size, including 8 byte special header.
1469	 */
1470	if (dsa_is_cpu_port(ds, port)) {
1471		new_mtu += 8;
1472		gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
1473			       GSWIP_MAC_FLEN);
1474	}
1475
1476	/* Enable MLEN for ports with non-standard MTUs, including the special
1477	 * header on the CPU port added above.
1478	 */
1479	if (new_mtu != ETH_DATA_LEN)
1480		gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
1481				  GSWIP_MAC_CTRL_2p(port));
1482	else
1483		gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
1484				  GSWIP_MAC_CTRL_2p(port));
1485
1486	return 0;
1487}
1488
1489static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
1490					  struct phylink_config *config)
1491{
1492	switch (port) {
1493	case 0:
1494	case 1:
1495		phy_interface_set_rgmii(config->supported_interfaces);
1496		__set_bit(PHY_INTERFACE_MODE_MII,
1497			  config->supported_interfaces);
1498		__set_bit(PHY_INTERFACE_MODE_REVMII,
1499			  config->supported_interfaces);
1500		__set_bit(PHY_INTERFACE_MODE_RMII,
1501			  config->supported_interfaces);
1502		break;
1503
1504	case 2:
1505	case 3:
1506	case 4:
1507	case 6:
1508		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1509			  config->supported_interfaces);
1510		break;
1511
1512	case 5:
1513		phy_interface_set_rgmii(config->supported_interfaces);
1514		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1515			  config->supported_interfaces);
1516		break;
1517	}
1518
1519	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1520		MAC_10 | MAC_100 | MAC_1000;
1521}
1522
1523static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
1524					  struct phylink_config *config)
1525{
1526	switch (port) {
1527	case 0:
1528		phy_interface_set_rgmii(config->supported_interfaces);
1529		__set_bit(PHY_INTERFACE_MODE_GMII,
1530			  config->supported_interfaces);
1531		__set_bit(PHY_INTERFACE_MODE_RMII,
1532			  config->supported_interfaces);
1533		break;
1534
1535	case 1:
1536	case 2:
1537	case 3:
1538	case 4:
1539	case 6:
1540		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1541			  config->supported_interfaces);
1542		break;
1543
1544	case 5:
1545		phy_interface_set_rgmii(config->supported_interfaces);
1546		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1547			  config->supported_interfaces);
1548		__set_bit(PHY_INTERFACE_MODE_RMII,
1549			  config->supported_interfaces);
1550		break;
1551	}
1552
1553	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1554		MAC_10 | MAC_100 | MAC_1000;
1555}
1556
1557static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
1558{
1559	u32 mdio_phy;
1560
1561	if (link)
1562		mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
1563	else
1564		mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
1565
1566	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
1567			GSWIP_MDIO_PHYp(port));
1568}
1569
1570static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
1571				 phy_interface_t interface)
1572{
1573	u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
1574
1575	switch (speed) {
1576	case SPEED_10:
1577		mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
1578
1579		if (interface == PHY_INTERFACE_MODE_RMII)
1580			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1581		else
1582			mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
1583
1584		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1585		break;
1586
1587	case SPEED_100:
1588		mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
1589
1590		if (interface == PHY_INTERFACE_MODE_RMII)
1591			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1592		else
1593			mii_cfg = GSWIP_MII_CFG_RATE_M25;
1594
1595		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1596		break;
1597
1598	case SPEED_1000:
1599		mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
1600
1601		mii_cfg = GSWIP_MII_CFG_RATE_M125;
1602
1603		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
1604		break;
1605	}
1606
1607	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
1608			GSWIP_MDIO_PHYp(port));
1609	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
1610	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
1611			  GSWIP_MAC_CTRL_0p(port));
1612}
1613
1614static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
1615{
1616	u32 mac_ctrl_0, mdio_phy;
1617
1618	if (duplex == DUPLEX_FULL) {
1619		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
1620		mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
1621	} else {
1622		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
1623		mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
1624	}
1625
1626	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
1627			  GSWIP_MAC_CTRL_0p(port));
1628	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
1629			GSWIP_MDIO_PHYp(port));
1630}
1631
1632static void gswip_port_set_pause(struct gswip_priv *priv, int port,
1633				 bool tx_pause, bool rx_pause)
1634{
1635	u32 mac_ctrl_0, mdio_phy;
1636
1637	if (tx_pause && rx_pause) {
1638		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
1639		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1640			   GSWIP_MDIO_PHY_FCONRX_EN;
1641	} else if (tx_pause) {
1642		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
1643		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1644			   GSWIP_MDIO_PHY_FCONRX_DIS;
1645	} else if (rx_pause) {
1646		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
1647		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1648			   GSWIP_MDIO_PHY_FCONRX_EN;
1649	} else {
1650		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
1651		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1652			   GSWIP_MDIO_PHY_FCONRX_DIS;
1653	}
1654
1655	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
1656			  mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
1657	gswip_mdio_mask(priv,
1658			GSWIP_MDIO_PHY_FCONTX_MASK |
1659			GSWIP_MDIO_PHY_FCONRX_MASK,
1660			mdio_phy, GSWIP_MDIO_PHYp(port));
1661}
1662
1663static void gswip_phylink_mac_config(struct phylink_config *config,
1664				     unsigned int mode,
1665				     const struct phylink_link_state *state)
1666{
1667	struct dsa_port *dp = dsa_phylink_to_port(config);
1668	struct gswip_priv *priv = dp->ds->priv;
1669	int port = dp->index;
1670	u32 miicfg = 0;
1671
1672	miicfg |= GSWIP_MII_CFG_LDCLKDIS;
1673
1674	switch (state->interface) {
1675	case PHY_INTERFACE_MODE_MII:
1676	case PHY_INTERFACE_MODE_INTERNAL:
1677		miicfg |= GSWIP_MII_CFG_MODE_MIIM;
1678		break;
1679	case PHY_INTERFACE_MODE_REVMII:
1680		miicfg |= GSWIP_MII_CFG_MODE_MIIP;
1681		break;
1682	case PHY_INTERFACE_MODE_RMII:
1683		miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
1684		break;
1685	case PHY_INTERFACE_MODE_RGMII:
1686	case PHY_INTERFACE_MODE_RGMII_ID:
1687	case PHY_INTERFACE_MODE_RGMII_RXID:
1688	case PHY_INTERFACE_MODE_RGMII_TXID:
1689		miicfg |= GSWIP_MII_CFG_MODE_RGMII;
1690		break;
1691	case PHY_INTERFACE_MODE_GMII:
1692		miicfg |= GSWIP_MII_CFG_MODE_GMII;
1693		break;
1694	default:
1695		dev_err(dp->ds->dev,
1696			"Unsupported interface: %d\n", state->interface);
1697		return;
1698	}
1699
1700	gswip_mii_mask_cfg(priv,
1701			   GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
1702			   GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
1703			   miicfg, port);
1704
1705	switch (state->interface) {
1706	case PHY_INTERFACE_MODE_RGMII_ID:
1707		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
1708					  GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1709		break;
1710	case PHY_INTERFACE_MODE_RGMII_RXID:
1711		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1712		break;
1713	case PHY_INTERFACE_MODE_RGMII_TXID:
1714		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
1715		break;
1716	default:
1717		break;
1718	}
1719}
1720
1721static void gswip_phylink_mac_link_down(struct phylink_config *config,
1722					unsigned int mode,
1723					phy_interface_t interface)
1724{
1725	struct dsa_port *dp = dsa_phylink_to_port(config);
1726	struct gswip_priv *priv = dp->ds->priv;
1727	int port = dp->index;
1728
1729	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
1730
1731	if (!dsa_port_is_cpu(dp))
1732		gswip_port_set_link(priv, port, false);
1733}
1734
1735static void gswip_phylink_mac_link_up(struct phylink_config *config,
1736				      struct phy_device *phydev,
1737				      unsigned int mode,
1738				      phy_interface_t interface,
1739				      int speed, int duplex,
1740				      bool tx_pause, bool rx_pause)
1741{
1742	struct dsa_port *dp = dsa_phylink_to_port(config);
1743	struct gswip_priv *priv = dp->ds->priv;
1744	int port = dp->index;
1745
1746	if (!dsa_port_is_cpu(dp)) {
1747		gswip_port_set_link(priv, port, true);
1748		gswip_port_set_speed(priv, port, speed, interface);
1749		gswip_port_set_duplex(priv, port, duplex);
1750		gswip_port_set_pause(priv, port, tx_pause, rx_pause);
1751	}
1752
1753	gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
1754}
1755
1756static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1757			      uint8_t *data)
1758{
1759	int i;
1760
1761	if (stringset != ETH_SS_STATS)
1762		return;
1763
1764	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
1765		ethtool_puts(&data, gswip_rmon_cnt[i].name);
1766}
1767
1768static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
1769				    u32 index)
1770{
1771	u32 result;
1772	int err;
1773
1774	gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
1775	gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
1776				GSWIP_BM_RAM_CTRL_OPMOD,
1777			      table | GSWIP_BM_RAM_CTRL_BAS,
1778			      GSWIP_BM_RAM_CTRL);
1779
1780	err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
1781				     GSWIP_BM_RAM_CTRL_BAS);
1782	if (err) {
1783		dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
1784			table, index);
1785		return 0;
1786	}
1787
1788	result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
1789	result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
1790
1791	return result;
1792}
1793
1794static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
1795				    uint64_t *data)
1796{
1797	struct gswip_priv *priv = ds->priv;
1798	const struct gswip_rmon_cnt_desc *rmon_cnt;
1799	int i;
1800	u64 high;
1801
1802	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
1803		rmon_cnt = &gswip_rmon_cnt[i];
1804
1805		data[i] = gswip_bcm_ram_entry_read(priv, port,
1806						   rmon_cnt->offset);
1807		if (rmon_cnt->size == 2) {
1808			high = gswip_bcm_ram_entry_read(priv, port,
1809							rmon_cnt->offset + 1);
1810			data[i] |= high << 32;
1811		}
1812	}
1813}
1814
1815static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
1816{
1817	if (sset != ETH_SS_STATS)
1818		return 0;
1819
1820	return ARRAY_SIZE(gswip_rmon_cnt);
1821}
1822
1823static const struct phylink_mac_ops gswip_phylink_mac_ops = {
1824	.mac_config	= gswip_phylink_mac_config,
1825	.mac_link_down	= gswip_phylink_mac_link_down,
1826	.mac_link_up	= gswip_phylink_mac_link_up,
1827};
1828
1829static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
1830	.get_tag_protocol	= gswip_get_tag_protocol,
1831	.setup			= gswip_setup,
1832	.port_enable		= gswip_port_enable,
1833	.port_disable		= gswip_port_disable,
1834	.port_bridge_join	= gswip_port_bridge_join,
1835	.port_bridge_leave	= gswip_port_bridge_leave,
1836	.port_fast_age		= gswip_port_fast_age,
1837	.port_vlan_filtering	= gswip_port_vlan_filtering,
1838	.port_vlan_add		= gswip_port_vlan_add,
1839	.port_vlan_del		= gswip_port_vlan_del,
1840	.port_stp_state_set	= gswip_port_stp_state_set,
1841	.port_fdb_add		= gswip_port_fdb_add,
1842	.port_fdb_del		= gswip_port_fdb_del,
1843	.port_fdb_dump		= gswip_port_fdb_dump,
1844	.port_change_mtu	= gswip_port_change_mtu,
1845	.port_max_mtu		= gswip_port_max_mtu,
1846	.phylink_get_caps	= gswip_xrx200_phylink_get_caps,
1847	.get_strings		= gswip_get_strings,
1848	.get_ethtool_stats	= gswip_get_ethtool_stats,
1849	.get_sset_count		= gswip_get_sset_count,
1850};
1851
1852static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
1853	.get_tag_protocol	= gswip_get_tag_protocol,
1854	.setup			= gswip_setup,
1855	.port_enable		= gswip_port_enable,
1856	.port_disable		= gswip_port_disable,
1857	.port_bridge_join	= gswip_port_bridge_join,
1858	.port_bridge_leave	= gswip_port_bridge_leave,
1859	.port_fast_age		= gswip_port_fast_age,
1860	.port_vlan_filtering	= gswip_port_vlan_filtering,
1861	.port_vlan_add		= gswip_port_vlan_add,
1862	.port_vlan_del		= gswip_port_vlan_del,
1863	.port_stp_state_set	= gswip_port_stp_state_set,
1864	.port_fdb_add		= gswip_port_fdb_add,
1865	.port_fdb_del		= gswip_port_fdb_del,
1866	.port_fdb_dump		= gswip_port_fdb_dump,
1867	.port_change_mtu	= gswip_port_change_mtu,
1868	.port_max_mtu		= gswip_port_max_mtu,
1869	.phylink_get_caps	= gswip_xrx300_phylink_get_caps,
1870	.get_strings		= gswip_get_strings,
1871	.get_ethtool_stats	= gswip_get_ethtool_stats,
1872	.get_sset_count		= gswip_get_sset_count,
1873};
1874
1875static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
1876	.fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
1877	.ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
1878};
1879
1880static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
1881	.fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
1882	.ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
1883};
1884
1885static const struct xway_gphy_match_data xrx300_gphy_data = {
1886	.fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
1887	.ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
1888};
1889
1890static const struct of_device_id xway_gphy_match[] __maybe_unused = {
1891	{ .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
1892	{ .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
1893	{ .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
1894	{ .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
1895	{ .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
1896	{},
1897};
1898
1899static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
1900{
1901	struct device *dev = priv->dev;
1902	const struct firmware *fw;
1903	void *fw_addr;
1904	dma_addr_t dma_addr;
1905	dma_addr_t dev_addr;
1906	size_t size;
1907	int ret;
1908
1909	ret = clk_prepare_enable(gphy_fw->clk_gate);
1910	if (ret)
1911		return ret;
1912
1913	reset_control_assert(gphy_fw->reset);
1914
1915	/* The vendor BSP uses a 200ms delay after asserting the reset line.
1916	 * Without this some users are observing that the PHY is not coming up
1917	 * on the MDIO bus.
1918	 */
1919	msleep(200);
1920
1921	ret = request_firmware(&fw, gphy_fw->fw_name, dev);
1922	if (ret)
1923		return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
1924				     gphy_fw->fw_name);
1925
1926	/* GPHY cores need the firmware code in a persistent and contiguous
1927	 * memory area with a 16 kB boundary aligned start address.
1928	 */
1929	size = fw->size + XRX200_GPHY_FW_ALIGN;
1930
1931	fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
1932	if (fw_addr) {
1933		fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
1934		dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
1935		memcpy(fw_addr, fw->data, fw->size);
1936	} else {
1937		release_firmware(fw);
1938		return dev_err_probe(dev, -ENOMEM,
1939				     "failed to alloc firmware memory\n");
1940	}
1941
1942	release_firmware(fw);
1943
1944	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
1945	if (ret)
1946		return ret;
1947
1948	reset_control_deassert(gphy_fw->reset);
1949
1950	return ret;
1951}
1952
1953static int gswip_gphy_fw_probe(struct gswip_priv *priv,
1954			       struct gswip_gphy_fw *gphy_fw,
1955			       struct device_node *gphy_fw_np, int i)
1956{
1957	struct device *dev = priv->dev;
1958	u32 gphy_mode;
1959	int ret;
1960	char gphyname[10];
1961
1962	snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
1963
1964	gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
1965	if (IS_ERR(gphy_fw->clk_gate)) {
1966		return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
1967				     "Failed to lookup gate clock\n");
1968	}
1969
1970	ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
1971	if (ret)
1972		return ret;
1973
1974	ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
1975	/* Default to GE mode */
1976	if (ret)
1977		gphy_mode = GPHY_MODE_GE;
1978
1979	switch (gphy_mode) {
1980	case GPHY_MODE_FE:
1981		gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
1982		break;
1983	case GPHY_MODE_GE:
1984		gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
1985		break;
1986	default:
1987		return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
1988				     gphy_mode);
1989	}
1990
1991	gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
1992	if (IS_ERR(gphy_fw->reset))
1993		return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
1994				     "Failed to lookup gphy reset\n");
1995
1996	return gswip_gphy_fw_load(priv, gphy_fw);
1997}
1998
1999static void gswip_gphy_fw_remove(struct gswip_priv *priv,
2000				 struct gswip_gphy_fw *gphy_fw)
2001{
2002	int ret;
2003
2004	/* check if the device was fully probed */
2005	if (!gphy_fw->fw_name)
2006		return;
2007
2008	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
2009	if (ret)
2010		dev_err(priv->dev, "can not reset GPHY FW pointer\n");
2011
2012	clk_disable_unprepare(gphy_fw->clk_gate);
2013
2014	reset_control_put(gphy_fw->reset);
2015}
2016
2017static int gswip_gphy_fw_list(struct gswip_priv *priv,
2018			      struct device_node *gphy_fw_list_np, u32 version)
2019{
2020	struct device *dev = priv->dev;
2021	struct device_node *gphy_fw_np;
2022	const struct of_device_id *match;
2023	int err;
2024	int i = 0;
2025
2026	/* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
2027	 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
2028	 * needs a different GPHY firmware.
2029	 */
2030	if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
2031		switch (version) {
2032		case GSWIP_VERSION_2_0:
2033			priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
2034			break;
2035		case GSWIP_VERSION_2_1:
2036			priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
2037			break;
2038		default:
2039			return dev_err_probe(dev, -ENOENT,
2040					     "unknown GSWIP version: 0x%x\n",
2041					     version);
2042		}
2043	}
2044
2045	match = of_match_node(xway_gphy_match, gphy_fw_list_np);
2046	if (match && match->data)
2047		priv->gphy_fw_name_cfg = match->data;
2048
2049	if (!priv->gphy_fw_name_cfg)
2050		return dev_err_probe(dev, -ENOENT,
2051				     "GPHY compatible type not supported\n");
2052
2053	priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
2054	if (!priv->num_gphy_fw)
2055		return -ENOENT;
2056
2057	priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
2058							   "lantiq,rcu");
2059	if (IS_ERR(priv->rcu_regmap))
2060		return PTR_ERR(priv->rcu_regmap);
2061
2062	priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
2063					   sizeof(*priv->gphy_fw),
2064					   GFP_KERNEL | __GFP_ZERO);
2065	if (!priv->gphy_fw)
2066		return -ENOMEM;
2067
2068	for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
2069		err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
2070					  gphy_fw_np, i);
2071		if (err) {
2072			of_node_put(gphy_fw_np);
2073			goto remove_gphy;
2074		}
2075		i++;
2076	}
2077
2078	/* The standalone PHY11G requires 300ms to be fully
2079	 * initialized and ready for any MDIO communication after being
2080	 * taken out of reset. For the SoC-internal GPHY variant there
2081	 * is no (known) documentation for the minimum time after a
2082	 * reset. Use the same value as for the standalone variant as
2083	 * some users have reported internal PHYs not being detected
2084	 * without any delay.
2085	 */
2086	msleep(300);
2087
2088	return 0;
2089
2090remove_gphy:
2091	for (i = 0; i < priv->num_gphy_fw; i++)
2092		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2093	return err;
2094}
2095
2096static int gswip_probe(struct platform_device *pdev)
2097{
2098	struct device_node *np, *gphy_fw_np;
2099	struct device *dev = &pdev->dev;
2100	struct gswip_priv *priv;
2101	int err;
2102	int i;
2103	u32 version;
2104
2105	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2106	if (!priv)
2107		return -ENOMEM;
2108
2109	priv->gswip = devm_platform_ioremap_resource(pdev, 0);
2110	if (IS_ERR(priv->gswip))
2111		return PTR_ERR(priv->gswip);
2112
2113	priv->mdio = devm_platform_ioremap_resource(pdev, 1);
2114	if (IS_ERR(priv->mdio))
2115		return PTR_ERR(priv->mdio);
2116
2117	priv->mii = devm_platform_ioremap_resource(pdev, 2);
2118	if (IS_ERR(priv->mii))
2119		return PTR_ERR(priv->mii);
2120
2121	priv->hw_info = of_device_get_match_data(dev);
2122	if (!priv->hw_info)
2123		return -EINVAL;
2124
2125	priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
2126	if (!priv->ds)
2127		return -ENOMEM;
2128
2129	priv->ds->dev = dev;
2130	priv->ds->num_ports = priv->hw_info->max_ports;
2131	priv->ds->priv = priv;
2132	priv->ds->ops = priv->hw_info->ops;
2133	priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
2134	priv->dev = dev;
2135	mutex_init(&priv->pce_table_lock);
2136	version = gswip_switch_r(priv, GSWIP_VERSION);
2137
2138	np = dev->of_node;
2139	switch (version) {
2140	case GSWIP_VERSION_2_0:
2141	case GSWIP_VERSION_2_1:
2142		if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
2143			return -EINVAL;
2144		break;
2145	case GSWIP_VERSION_2_2:
2146	case GSWIP_VERSION_2_2_ETC:
2147		if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
2148		    !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
2149			return -EINVAL;
2150		break;
2151	default:
2152		return dev_err_probe(dev, -ENOENT,
2153				     "unknown GSWIP version: 0x%x\n", version);
2154	}
2155
2156	/* bring up the mdio bus */
2157	gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
2158	if (gphy_fw_np) {
2159		err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
2160		of_node_put(gphy_fw_np);
2161		if (err)
2162			return dev_err_probe(dev, err,
2163					     "gphy fw probe failed\n");
2164	}
2165
2166	/* bring up the mdio bus */
2167	err = gswip_mdio(priv);
2168	if (err) {
2169		dev_err_probe(dev, err, "mdio probe failed\n");
2170		goto gphy_fw_remove;
2171	}
2172
2173	err = dsa_register_switch(priv->ds);
2174	if (err) {
2175		dev_err_probe(dev, err, "dsa switch registration failed\n");
2176		goto gphy_fw_remove;
2177	}
2178	if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
2179		err = dev_err_probe(dev, -EINVAL,
2180				    "wrong CPU port defined, HW only supports port: %i\n",
2181				    priv->hw_info->cpu_port);
2182		goto disable_switch;
2183	}
2184
2185	platform_set_drvdata(pdev, priv);
2186
2187	dev_info(dev, "probed GSWIP version %lx mod %lx\n",
2188		 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
2189		 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
2190	return 0;
2191
2192disable_switch:
2193	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2194	dsa_unregister_switch(priv->ds);
2195gphy_fw_remove:
2196	for (i = 0; i < priv->num_gphy_fw; i++)
2197		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2198	return err;
2199}
2200
2201static void gswip_remove(struct platform_device *pdev)
2202{
2203	struct gswip_priv *priv = platform_get_drvdata(pdev);
2204	int i;
2205
2206	if (!priv)
2207		return;
2208
2209	/* disable the switch */
2210	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2211
2212	dsa_unregister_switch(priv->ds);
2213
2214	for (i = 0; i < priv->num_gphy_fw; i++)
2215		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2216}
2217
2218static void gswip_shutdown(struct platform_device *pdev)
2219{
2220	struct gswip_priv *priv = platform_get_drvdata(pdev);
2221
2222	if (!priv)
2223		return;
2224
2225	dsa_switch_shutdown(priv->ds);
2226
2227	platform_set_drvdata(pdev, NULL);
2228}
2229
2230static const struct gswip_hw_info gswip_xrx200 = {
2231	.max_ports = 7,
2232	.cpu_port = 6,
2233	.ops = &gswip_xrx200_switch_ops,
2234};
2235
2236static const struct gswip_hw_info gswip_xrx300 = {
2237	.max_ports = 7,
2238	.cpu_port = 6,
2239	.ops = &gswip_xrx300_switch_ops,
2240};
2241
2242static const struct of_device_id gswip_of_match[] = {
2243	{ .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
2244	{ .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
2245	{ .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
2246	{},
2247};
2248MODULE_DEVICE_TABLE(of, gswip_of_match);
2249
2250static struct platform_driver gswip_driver = {
2251	.probe = gswip_probe,
2252	.remove = gswip_remove,
2253	.shutdown = gswip_shutdown,
2254	.driver = {
2255		.name = "gswip",
2256		.of_match_table = gswip_of_match,
2257	},
2258};
2259
2260module_platform_driver(gswip_driver);
2261
2262MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
2263MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
2264MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
2265MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
2266MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
2267MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
2268MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
2269MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
2270MODULE_LICENSE("GPL v2");