Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
   4 *
   5 * Copyright (C) 2010 Lantiq Deutschland
   6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
   7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
   8 *
   9 * The VLAN and bridge model the GSWIP hardware uses does not directly
  10 * matches the model DSA uses.
  11 *
  12 * The hardware has 64 possible table entries for bridges with one VLAN
  13 * ID, one flow id and a list of ports for each bridge. All entries which
  14 * match the same flow ID are combined in the mac learning table, they
  15 * act as one global bridge.
  16 * The hardware does not support VLAN filter on the port, but on the
  17 * bridge, this driver converts the DSA model to the hardware.
  18 *
  19 * The CPU gets all the exception frames which do not match any forwarding
  20 * rule and the CPU port is also added to all bridges. This makes it possible
  21 * to handle all the special cases easily in software.
  22 * At the initialization the driver allocates one bridge table entry for
  23 * each switch port which is used when the port is used without an
  24 * explicit bridge. This prevents the frames from being forwarded
  25 * between all LAN ports by default.
  26 */
  27
  28#include <linux/clk.h>
  29#include <linux/delay.h>
  30#include <linux/etherdevice.h>
  31#include <linux/firmware.h>
  32#include <linux/if_bridge.h>
  33#include <linux/if_vlan.h>
  34#include <linux/iopoll.h>
  35#include <linux/mfd/syscon.h>
  36#include <linux/module.h>
  37#include <linux/of_mdio.h>
  38#include <linux/of_net.h>
  39#include <linux/of_platform.h>
  40#include <linux/phy.h>
  41#include <linux/phylink.h>
  42#include <linux/platform_device.h>
  43#include <linux/regmap.h>
  44#include <linux/reset.h>
  45#include <net/dsa.h>
  46#include <dt-bindings/mips/lantiq_rcu_gphy.h>
  47
  48#include "lantiq_pce.h"
  49
  50/* GSWIP MDIO Registers */
  51#define GSWIP_MDIO_GLOB			0x00
  52#define  GSWIP_MDIO_GLOB_ENABLE		BIT(15)
  53#define GSWIP_MDIO_CTRL			0x08
  54#define  GSWIP_MDIO_CTRL_BUSY		BIT(12)
  55#define  GSWIP_MDIO_CTRL_RD		BIT(11)
  56#define  GSWIP_MDIO_CTRL_WR		BIT(10)
  57#define  GSWIP_MDIO_CTRL_PHYAD_MASK	0x1f
  58#define  GSWIP_MDIO_CTRL_PHYAD_SHIFT	5
  59#define  GSWIP_MDIO_CTRL_REGAD_MASK	0x1f
  60#define GSWIP_MDIO_READ			0x09
  61#define GSWIP_MDIO_WRITE		0x0A
  62#define GSWIP_MDIO_MDC_CFG0		0x0B
  63#define GSWIP_MDIO_MDC_CFG1		0x0C
  64#define GSWIP_MDIO_PHYp(p)		(0x15 - (p))
  65#define  GSWIP_MDIO_PHY_LINK_MASK	0x6000
  66#define  GSWIP_MDIO_PHY_LINK_AUTO	0x0000
  67#define  GSWIP_MDIO_PHY_LINK_DOWN	0x4000
  68#define  GSWIP_MDIO_PHY_LINK_UP		0x2000
  69#define  GSWIP_MDIO_PHY_SPEED_MASK	0x1800
  70#define  GSWIP_MDIO_PHY_SPEED_AUTO	0x1800
  71#define  GSWIP_MDIO_PHY_SPEED_M10	0x0000
  72#define  GSWIP_MDIO_PHY_SPEED_M100	0x0800
  73#define  GSWIP_MDIO_PHY_SPEED_G1	0x1000
  74#define  GSWIP_MDIO_PHY_FDUP_MASK	0x0600
  75#define  GSWIP_MDIO_PHY_FDUP_AUTO	0x0000
  76#define  GSWIP_MDIO_PHY_FDUP_EN		0x0200
  77#define  GSWIP_MDIO_PHY_FDUP_DIS	0x0600
  78#define  GSWIP_MDIO_PHY_FCONTX_MASK	0x0180
  79#define  GSWIP_MDIO_PHY_FCONTX_AUTO	0x0000
  80#define  GSWIP_MDIO_PHY_FCONTX_EN	0x0100
  81#define  GSWIP_MDIO_PHY_FCONTX_DIS	0x0180
  82#define  GSWIP_MDIO_PHY_FCONRX_MASK	0x0060
  83#define  GSWIP_MDIO_PHY_FCONRX_AUTO	0x0000
  84#define  GSWIP_MDIO_PHY_FCONRX_EN	0x0020
  85#define  GSWIP_MDIO_PHY_FCONRX_DIS	0x0060
  86#define  GSWIP_MDIO_PHY_ADDR_MASK	0x001f
  87#define  GSWIP_MDIO_PHY_MASK		(GSWIP_MDIO_PHY_ADDR_MASK | \
  88					 GSWIP_MDIO_PHY_FCONRX_MASK | \
  89					 GSWIP_MDIO_PHY_FCONTX_MASK | \
  90					 GSWIP_MDIO_PHY_LINK_MASK | \
  91					 GSWIP_MDIO_PHY_SPEED_MASK | \
  92					 GSWIP_MDIO_PHY_FDUP_MASK)
  93
  94/* GSWIP MII Registers */
  95#define GSWIP_MII_CFGp(p)		(0x2 * (p))
  96#define  GSWIP_MII_CFG_RESET		BIT(15)
  97#define  GSWIP_MII_CFG_EN		BIT(14)
  98#define  GSWIP_MII_CFG_ISOLATE		BIT(13)
  99#define  GSWIP_MII_CFG_LDCLKDIS		BIT(12)
 100#define  GSWIP_MII_CFG_RGMII_IBS	BIT(8)
 101#define  GSWIP_MII_CFG_RMII_CLK		BIT(7)
 102#define  GSWIP_MII_CFG_MODE_MIIP	0x0
 103#define  GSWIP_MII_CFG_MODE_MIIM	0x1
 104#define  GSWIP_MII_CFG_MODE_RMIIP	0x2
 105#define  GSWIP_MII_CFG_MODE_RMIIM	0x3
 106#define  GSWIP_MII_CFG_MODE_RGMII	0x4
 107#define  GSWIP_MII_CFG_MODE_GMII	0x9
 108#define  GSWIP_MII_CFG_MODE_MASK	0xf
 109#define  GSWIP_MII_CFG_RATE_M2P5	0x00
 110#define  GSWIP_MII_CFG_RATE_M25	0x10
 111#define  GSWIP_MII_CFG_RATE_M125	0x20
 112#define  GSWIP_MII_CFG_RATE_M50	0x30
 113#define  GSWIP_MII_CFG_RATE_AUTO	0x40
 114#define  GSWIP_MII_CFG_RATE_MASK	0x70
 115#define GSWIP_MII_PCDU0			0x01
 116#define GSWIP_MII_PCDU1			0x03
 117#define GSWIP_MII_PCDU5			0x05
 118#define  GSWIP_MII_PCDU_TXDLY_MASK	GENMASK(2, 0)
 119#define  GSWIP_MII_PCDU_RXDLY_MASK	GENMASK(9, 7)
 120
 121/* GSWIP Core Registers */
 122#define GSWIP_SWRES			0x000
 123#define  GSWIP_SWRES_R1			BIT(1)	/* GSWIP Software reset */
 124#define  GSWIP_SWRES_R0			BIT(0)	/* GSWIP Hardware reset */
 125#define GSWIP_VERSION			0x013
 126#define  GSWIP_VERSION_REV_SHIFT	0
 127#define  GSWIP_VERSION_REV_MASK		GENMASK(7, 0)
 128#define  GSWIP_VERSION_MOD_SHIFT	8
 129#define  GSWIP_VERSION_MOD_MASK		GENMASK(15, 8)
 130#define   GSWIP_VERSION_2_0		0x100
 131#define   GSWIP_VERSION_2_1		0x021
 132#define   GSWIP_VERSION_2_2		0x122
 133#define   GSWIP_VERSION_2_2_ETC		0x022
 134
 135#define GSWIP_BM_RAM_VAL(x)		(0x043 - (x))
 136#define GSWIP_BM_RAM_ADDR		0x044
 137#define GSWIP_BM_RAM_CTRL		0x045
 138#define  GSWIP_BM_RAM_CTRL_BAS		BIT(15)
 139#define  GSWIP_BM_RAM_CTRL_OPMOD	BIT(5)
 140#define  GSWIP_BM_RAM_CTRL_ADDR_MASK	GENMASK(4, 0)
 141#define GSWIP_BM_QUEUE_GCTRL		0x04A
 142#define  GSWIP_BM_QUEUE_GCTRL_GL_MOD	BIT(10)
 143/* buffer management Port Configuration Register */
 144#define GSWIP_BM_PCFGp(p)		(0x080 + ((p) * 2))
 145#define  GSWIP_BM_PCFG_CNTEN		BIT(0)	/* RMON Counter Enable */
 146#define  GSWIP_BM_PCFG_IGCNT		BIT(1)	/* Ingres Special Tag RMON count */
 147/* buffer management Port Control Register */
 148#define GSWIP_BM_RMON_CTRLp(p)		(0x81 + ((p) * 2))
 149#define  GSWIP_BM_CTRL_RMON_RAM1_RES	BIT(0)	/* Software Reset for RMON RAM 1 */
 150#define  GSWIP_BM_CTRL_RMON_RAM2_RES	BIT(1)	/* Software Reset for RMON RAM 2 */
 151
 152/* PCE */
 153#define GSWIP_PCE_TBL_KEY(x)		(0x447 - (x))
 154#define GSWIP_PCE_TBL_MASK		0x448
 155#define GSWIP_PCE_TBL_VAL(x)		(0x44D - (x))
 156#define GSWIP_PCE_TBL_ADDR		0x44E
 157#define GSWIP_PCE_TBL_CTRL		0x44F
 158#define  GSWIP_PCE_TBL_CTRL_BAS		BIT(15)
 159#define  GSWIP_PCE_TBL_CTRL_TYPE	BIT(13)
 160#define  GSWIP_PCE_TBL_CTRL_VLD		BIT(12)
 161#define  GSWIP_PCE_TBL_CTRL_KEYFORM	BIT(11)
 162#define  GSWIP_PCE_TBL_CTRL_GMAP_MASK	GENMASK(10, 7)
 163#define  GSWIP_PCE_TBL_CTRL_OPMOD_MASK	GENMASK(6, 5)
 164#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADRD	0x00
 165#define  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR	0x20
 166#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSRD	0x40
 167#define  GSWIP_PCE_TBL_CTRL_OPMOD_KSWR	0x60
 168#define  GSWIP_PCE_TBL_CTRL_ADDR_MASK	GENMASK(4, 0)
 169#define GSWIP_PCE_PMAP1			0x453	/* Monitoring port map */
 170#define GSWIP_PCE_PMAP2			0x454	/* Default Multicast port map */
 171#define GSWIP_PCE_PMAP3			0x455	/* Default Unknown Unicast port map */
 172#define GSWIP_PCE_GCTRL_0		0x456
 173#define  GSWIP_PCE_GCTRL_0_MTFL		BIT(0)  /* MAC Table Flushing */
 174#define  GSWIP_PCE_GCTRL_0_MC_VALID	BIT(3)
 175#define  GSWIP_PCE_GCTRL_0_VLAN		BIT(14) /* VLAN aware Switching */
 176#define GSWIP_PCE_GCTRL_1		0x457
 177#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK	BIT(2)	/* MAC Address table lock */
 178#define  GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD	BIT(3) /* Mac address table lock forwarding mode */
 179#define GSWIP_PCE_PCTRL_0p(p)		(0x480 + ((p) * 0xA))
 180#define  GSWIP_PCE_PCTRL_0_TVM		BIT(5)	/* Transparent VLAN mode */
 181#define  GSWIP_PCE_PCTRL_0_VREP		BIT(6)	/* VLAN Replace Mode */
 182#define  GSWIP_PCE_PCTRL_0_INGRESS	BIT(11)	/* Accept special tag in ingress */
 183#define  GSWIP_PCE_PCTRL_0_PSTATE_LISTEN	0x0
 184#define  GSWIP_PCE_PCTRL_0_PSTATE_RX		0x1
 185#define  GSWIP_PCE_PCTRL_0_PSTATE_TX		0x2
 186#define  GSWIP_PCE_PCTRL_0_PSTATE_LEARNING	0x3
 187#define  GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING	0x7
 188#define  GSWIP_PCE_PCTRL_0_PSTATE_MASK	GENMASK(2, 0)
 189#define GSWIP_PCE_VCTRL(p)		(0x485 + ((p) * 0xA))
 190#define  GSWIP_PCE_VCTRL_UVR		BIT(0)	/* Unknown VLAN Rule */
 191#define  GSWIP_PCE_VCTRL_VIMR		BIT(3)	/* VLAN Ingress Member violation rule */
 192#define  GSWIP_PCE_VCTRL_VEMR		BIT(4)	/* VLAN Egress Member violation rule */
 193#define  GSWIP_PCE_VCTRL_VSR		BIT(5)	/* VLAN Security */
 194#define  GSWIP_PCE_VCTRL_VID0		BIT(6)	/* Priority Tagged Rule */
 195#define GSWIP_PCE_DEFPVID(p)		(0x486 + ((p) * 0xA))
 196
 197#define GSWIP_MAC_FLEN			0x8C5
 198#define GSWIP_MAC_CTRL_0p(p)		(0x903 + ((p) * 0xC))
 199#define  GSWIP_MAC_CTRL_0_PADEN		BIT(8)
 200#define  GSWIP_MAC_CTRL_0_FCS_EN	BIT(7)
 201#define  GSWIP_MAC_CTRL_0_FCON_MASK	0x0070
 202#define  GSWIP_MAC_CTRL_0_FCON_AUTO	0x0000
 203#define  GSWIP_MAC_CTRL_0_FCON_RX	0x0010
 204#define  GSWIP_MAC_CTRL_0_FCON_TX	0x0020
 205#define  GSWIP_MAC_CTRL_0_FCON_RXTX	0x0030
 206#define  GSWIP_MAC_CTRL_0_FCON_NONE	0x0040
 207#define  GSWIP_MAC_CTRL_0_FDUP_MASK	0x000C
 208#define  GSWIP_MAC_CTRL_0_FDUP_AUTO	0x0000
 209#define  GSWIP_MAC_CTRL_0_FDUP_EN	0x0004
 210#define  GSWIP_MAC_CTRL_0_FDUP_DIS	0x000C
 211#define  GSWIP_MAC_CTRL_0_GMII_MASK	0x0003
 212#define  GSWIP_MAC_CTRL_0_GMII_AUTO	0x0000
 213#define  GSWIP_MAC_CTRL_0_GMII_MII	0x0001
 214#define  GSWIP_MAC_CTRL_0_GMII_RGMII	0x0002
 215#define GSWIP_MAC_CTRL_2p(p)		(0x905 + ((p) * 0xC))
 216#define GSWIP_MAC_CTRL_2_MLEN		BIT(3) /* Maximum Untagged Frame Lnegth */
 217
 218/* Ethernet Switch Fetch DMA Port Control Register */
 219#define GSWIP_FDMA_PCTRLp(p)		(0xA80 + ((p) * 0x6))
 220#define  GSWIP_FDMA_PCTRL_EN		BIT(0)	/* FDMA Port Enable */
 221#define  GSWIP_FDMA_PCTRL_STEN		BIT(1)	/* Special Tag Insertion Enable */
 222#define  GSWIP_FDMA_PCTRL_VLANMOD_MASK	GENMASK(4, 3)	/* VLAN Modification Control */
 223#define  GSWIP_FDMA_PCTRL_VLANMOD_SHIFT	3	/* VLAN Modification Control */
 224#define  GSWIP_FDMA_PCTRL_VLANMOD_DIS	(0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 225#define  GSWIP_FDMA_PCTRL_VLANMOD_PRIO	(0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 226#define  GSWIP_FDMA_PCTRL_VLANMOD_ID	(0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 227#define  GSWIP_FDMA_PCTRL_VLANMOD_BOTH	(0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
 228
 229/* Ethernet Switch Store DMA Port Control Register */
 230#define GSWIP_SDMA_PCTRLp(p)		(0xBC0 + ((p) * 0x6))
 231#define  GSWIP_SDMA_PCTRL_EN		BIT(0)	/* SDMA Port Enable */
 232#define  GSWIP_SDMA_PCTRL_FCEN		BIT(1)	/* Flow Control Enable */
 233#define  GSWIP_SDMA_PCTRL_PAUFWD	BIT(3)	/* Pause Frame Forwarding */
 234
 235#define GSWIP_TABLE_ACTIVE_VLAN		0x01
 236#define GSWIP_TABLE_VLAN_MAPPING	0x02
 237#define GSWIP_TABLE_MAC_BRIDGE		0x0b
 238#define  GSWIP_TABLE_MAC_BRIDGE_STATIC	0x01	/* Static not, aging entry */
 239
 240#define XRX200_GPHY_FW_ALIGN	(16 * 1024)
 241
 242struct gswip_hw_info {
 243	int max_ports;
 244	int cpu_port;
 245	const struct dsa_switch_ops *ops;
 246};
 247
 248struct xway_gphy_match_data {
 249	char *fe_firmware_name;
 250	char *ge_firmware_name;
 251};
 252
 253struct gswip_gphy_fw {
 254	struct clk *clk_gate;
 255	struct reset_control *reset;
 256	u32 fw_addr_offset;
 257	char *fw_name;
 258};
 259
 260struct gswip_vlan {
 261	struct net_device *bridge;
 262	u16 vid;
 263	u8 fid;
 264};
 265
 266struct gswip_priv {
 267	__iomem void *gswip;
 268	__iomem void *mdio;
 269	__iomem void *mii;
 270	const struct gswip_hw_info *hw_info;
 271	const struct xway_gphy_match_data *gphy_fw_name_cfg;
 272	struct dsa_switch *ds;
 273	struct device *dev;
 274	struct regmap *rcu_regmap;
 275	struct gswip_vlan vlans[64];
 276	int num_gphy_fw;
 277	struct gswip_gphy_fw *gphy_fw;
 278	u32 port_vlan_filter;
 279};
 280
 281struct gswip_pce_table_entry {
 282	u16 index;      // PCE_TBL_ADDR.ADDR = pData->table_index
 283	u16 table;      // PCE_TBL_CTRL.ADDR = pData->table
 284	u16 key[8];
 285	u16 val[5];
 286	u16 mask;
 287	u8 gmap;
 288	bool type;
 289	bool valid;
 290	bool key_mode;
 291};
 292
 293struct gswip_rmon_cnt_desc {
 294	unsigned int size;
 295	unsigned int offset;
 296	const char *name;
 297};
 298
 299#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
 300
 301static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
 302	/** Receive Packet Count (only packets that are accepted and not discarded). */
 303	MIB_DESC(1, 0x1F, "RxGoodPkts"),
 304	MIB_DESC(1, 0x23, "RxUnicastPkts"),
 305	MIB_DESC(1, 0x22, "RxMulticastPkts"),
 306	MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
 307	MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
 308	MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
 309	MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
 310	MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
 311	MIB_DESC(1, 0x20, "RxGoodPausePkts"),
 312	MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
 313	MIB_DESC(1, 0x12, "Rx64BytePkts"),
 314	MIB_DESC(1, 0x13, "Rx127BytePkts"),
 315	MIB_DESC(1, 0x14, "Rx255BytePkts"),
 316	MIB_DESC(1, 0x15, "Rx511BytePkts"),
 317	MIB_DESC(1, 0x16, "Rx1023BytePkts"),
 318	/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
 319	MIB_DESC(1, 0x17, "RxMaxBytePkts"),
 320	MIB_DESC(1, 0x18, "RxDroppedPkts"),
 321	MIB_DESC(1, 0x19, "RxFilteredPkts"),
 322	MIB_DESC(2, 0x24, "RxGoodBytes"),
 323	MIB_DESC(2, 0x26, "RxBadBytes"),
 324	MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
 325	MIB_DESC(1, 0x0C, "TxGoodPkts"),
 326	MIB_DESC(1, 0x06, "TxUnicastPkts"),
 327	MIB_DESC(1, 0x07, "TxMulticastPkts"),
 328	MIB_DESC(1, 0x00, "Tx64BytePkts"),
 329	MIB_DESC(1, 0x01, "Tx127BytePkts"),
 330	MIB_DESC(1, 0x02, "Tx255BytePkts"),
 331	MIB_DESC(1, 0x03, "Tx511BytePkts"),
 332	MIB_DESC(1, 0x04, "Tx1023BytePkts"),
 333	/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
 334	MIB_DESC(1, 0x05, "TxMaxBytePkts"),
 335	MIB_DESC(1, 0x08, "TxSingleCollCount"),
 336	MIB_DESC(1, 0x09, "TxMultCollCount"),
 337	MIB_DESC(1, 0x0A, "TxLateCollCount"),
 338	MIB_DESC(1, 0x0B, "TxExcessCollCount"),
 339	MIB_DESC(1, 0x0D, "TxPauseCount"),
 340	MIB_DESC(1, 0x10, "TxDroppedPkts"),
 341	MIB_DESC(2, 0x0E, "TxGoodBytes"),
 342};
 343
 344static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
 345{
 346	return __raw_readl(priv->gswip + (offset * 4));
 347}
 348
 349static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
 350{
 351	__raw_writel(val, priv->gswip + (offset * 4));
 352}
 353
 354static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
 355			      u32 offset)
 356{
 357	u32 val = gswip_switch_r(priv, offset);
 358
 359	val &= ~(clear);
 360	val |= set;
 361	gswip_switch_w(priv, val, offset);
 362}
 363
 364static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
 365				  u32 cleared)
 366{
 367	u32 val;
 368
 369	return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
 370				  (val & cleared) == 0, 20, 50000);
 371}
 372
 373static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
 374{
 375	return __raw_readl(priv->mdio + (offset * 4));
 376}
 377
 378static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
 379{
 380	__raw_writel(val, priv->mdio + (offset * 4));
 381}
 382
 383static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
 384			    u32 offset)
 385{
 386	u32 val = gswip_mdio_r(priv, offset);
 387
 388	val &= ~(clear);
 389	val |= set;
 390	gswip_mdio_w(priv, val, offset);
 391}
 392
 393static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
 394{
 395	return __raw_readl(priv->mii + (offset * 4));
 396}
 397
 398static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
 399{
 400	__raw_writel(val, priv->mii + (offset * 4));
 401}
 402
 403static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
 404			   u32 offset)
 405{
 406	u32 val = gswip_mii_r(priv, offset);
 407
 408	val &= ~(clear);
 409	val |= set;
 410	gswip_mii_w(priv, val, offset);
 411}
 412
 413static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
 414			       int port)
 415{
 416	/* There's no MII_CFG register for the CPU port */
 417	if (!dsa_is_cpu_port(priv->ds, port))
 418		gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
 419}
 420
 421static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
 422				int port)
 423{
 424	switch (port) {
 425	case 0:
 426		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
 427		break;
 428	case 1:
 429		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
 430		break;
 431	case 5:
 432		gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
 433		break;
 434	}
 435}
 436
 437static int gswip_mdio_poll(struct gswip_priv *priv)
 438{
 439	int cnt = 100;
 440
 441	while (likely(cnt--)) {
 442		u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
 443
 444		if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
 445			return 0;
 446		usleep_range(20, 40);
 447	}
 448
 449	return -ETIMEDOUT;
 450}
 451
 452static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
 453{
 454	struct gswip_priv *priv = bus->priv;
 455	int err;
 456
 457	err = gswip_mdio_poll(priv);
 458	if (err) {
 459		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 460		return err;
 461	}
 462
 463	gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
 464	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
 465		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
 466		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
 467		GSWIP_MDIO_CTRL);
 468
 469	return 0;
 470}
 471
 472static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
 473{
 474	struct gswip_priv *priv = bus->priv;
 475	int err;
 476
 477	err = gswip_mdio_poll(priv);
 478	if (err) {
 479		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 480		return err;
 481	}
 482
 483	gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
 484		((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
 485		(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
 486		GSWIP_MDIO_CTRL);
 487
 488	err = gswip_mdio_poll(priv);
 489	if (err) {
 490		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
 491		return err;
 492	}
 493
 494	return gswip_mdio_r(priv, GSWIP_MDIO_READ);
 495}
 496
 497static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
 498{
 499	struct dsa_switch *ds = priv->ds;
 500
 501	ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
 502	if (!ds->slave_mii_bus)
 503		return -ENOMEM;
 504
 505	ds->slave_mii_bus->priv = priv;
 506	ds->slave_mii_bus->read = gswip_mdio_rd;
 507	ds->slave_mii_bus->write = gswip_mdio_wr;
 508	ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
 509	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
 510		 dev_name(priv->dev));
 511	ds->slave_mii_bus->parent = priv->dev;
 512	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 513
 514	return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
 515}
 516
 517static int gswip_pce_table_entry_read(struct gswip_priv *priv,
 518				      struct gswip_pce_table_entry *tbl)
 519{
 520	int i;
 521	int err;
 522	u16 crtl;
 523	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
 524					GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
 525
 526	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 527				     GSWIP_PCE_TBL_CTRL_BAS);
 528	if (err)
 529		return err;
 530
 531	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
 532	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 533				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 534			  tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
 535			  GSWIP_PCE_TBL_CTRL);
 536
 537	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 538				     GSWIP_PCE_TBL_CTRL_BAS);
 539	if (err)
 540		return err;
 541
 542	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
 543		tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
 544
 545	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
 546		tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
 547
 548	tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
 549
 550	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
 551
 552	tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
 553	tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
 554	tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
 555
 556	return 0;
 557}
 558
 559static int gswip_pce_table_entry_write(struct gswip_priv *priv,
 560				       struct gswip_pce_table_entry *tbl)
 561{
 562	int i;
 563	int err;
 564	u16 crtl;
 565	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
 566					GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
 567
 568	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 569				     GSWIP_PCE_TBL_CTRL_BAS);
 570	if (err)
 571		return err;
 572
 573	gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
 574	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 575				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 576			  tbl->table | addr_mode,
 577			  GSWIP_PCE_TBL_CTRL);
 578
 579	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
 580		gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
 581
 582	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
 583		gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
 584
 585	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 586				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 587			  tbl->table | addr_mode,
 588			  GSWIP_PCE_TBL_CTRL);
 589
 590	gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
 591
 592	crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
 593	crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
 594		  GSWIP_PCE_TBL_CTRL_GMAP_MASK);
 595	if (tbl->type)
 596		crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
 597	if (tbl->valid)
 598		crtl |= GSWIP_PCE_TBL_CTRL_VLD;
 599	crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
 600	crtl |= GSWIP_PCE_TBL_CTRL_BAS;
 601	gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
 602
 603	return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 604				      GSWIP_PCE_TBL_CTRL_BAS);
 605}
 606
 607/* Add the LAN port into a bridge with the CPU port by
 608 * default. This prevents automatic forwarding of
 609 * packages between the LAN ports when no explicit
 610 * bridge is configured.
 611 */
 612static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
 613{
 614	struct gswip_pce_table_entry vlan_active = {0,};
 615	struct gswip_pce_table_entry vlan_mapping = {0,};
 616	unsigned int cpu_port = priv->hw_info->cpu_port;
 617	unsigned int max_ports = priv->hw_info->max_ports;
 618	int err;
 619
 620	if (port >= max_ports) {
 621		dev_err(priv->dev, "single port for %i supported\n", port);
 622		return -EIO;
 623	}
 624
 625	vlan_active.index = port + 1;
 626	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 627	vlan_active.key[0] = 0; /* vid */
 628	vlan_active.val[0] = port + 1 /* fid */;
 629	vlan_active.valid = add;
 630	err = gswip_pce_table_entry_write(priv, &vlan_active);
 631	if (err) {
 632		dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
 633		return err;
 634	}
 635
 636	if (!add)
 637		return 0;
 638
 639	vlan_mapping.index = port + 1;
 640	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 641	vlan_mapping.val[0] = 0 /* vid */;
 642	vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
 643	vlan_mapping.val[2] = 0;
 644	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
 645	if (err) {
 646		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
 647		return err;
 648	}
 649
 650	return 0;
 651}
 652
 653static int gswip_port_enable(struct dsa_switch *ds, int port,
 654			     struct phy_device *phydev)
 655{
 656	struct gswip_priv *priv = ds->priv;
 657	int err;
 658
 659	if (!dsa_is_user_port(ds, port))
 660		return 0;
 661
 662	if (!dsa_is_cpu_port(ds, port)) {
 663		err = gswip_add_single_port_br(priv, port, true);
 664		if (err)
 665			return err;
 666	}
 667
 668	/* RMON Counter Enable for port */
 669	gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
 670
 671	/* enable port fetch/store dma & VLAN Modification */
 672	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
 673				   GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
 674			 GSWIP_FDMA_PCTRLp(port));
 675	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
 676			  GSWIP_SDMA_PCTRLp(port));
 677
 678	if (!dsa_is_cpu_port(ds, port)) {
 679		u32 mdio_phy = 0;
 680
 681		if (phydev)
 682			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
 683
 684		gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
 685				GSWIP_MDIO_PHYp(port));
 686	}
 687
 688	return 0;
 689}
 690
 691static void gswip_port_disable(struct dsa_switch *ds, int port)
 692{
 693	struct gswip_priv *priv = ds->priv;
 694
 695	if (!dsa_is_user_port(ds, port))
 696		return;
 697
 698	gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
 699			  GSWIP_FDMA_PCTRLp(port));
 700	gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
 701			  GSWIP_SDMA_PCTRLp(port));
 702}
 703
 704static int gswip_pce_load_microcode(struct gswip_priv *priv)
 705{
 706	int i;
 707	int err;
 708
 709	gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
 710				GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
 711			  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
 712	gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
 713
 714	for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
 715		gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
 716		gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
 717			       GSWIP_PCE_TBL_VAL(0));
 718		gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
 719			       GSWIP_PCE_TBL_VAL(1));
 720		gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
 721			       GSWIP_PCE_TBL_VAL(2));
 722		gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
 723			       GSWIP_PCE_TBL_VAL(3));
 724
 725		/* start the table access: */
 726		gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
 727				  GSWIP_PCE_TBL_CTRL);
 728		err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
 729					     GSWIP_PCE_TBL_CTRL_BAS);
 730		if (err)
 731			return err;
 732	}
 733
 734	/* tell the switch that the microcode is loaded */
 735	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
 736			  GSWIP_PCE_GCTRL_0);
 737
 738	return 0;
 739}
 740
 741static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
 742				     bool vlan_filtering,
 743				     struct netlink_ext_ack *extack)
 744{
 745	struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
 746	struct gswip_priv *priv = ds->priv;
 747
 748	/* Do not allow changing the VLAN filtering options while in bridge */
 749	if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
 750		NL_SET_ERR_MSG_MOD(extack,
 751				   "Dynamic toggling of vlan_filtering not supported");
 752		return -EIO;
 753	}
 754
 755	if (vlan_filtering) {
 756		/* Use port based VLAN tag */
 757		gswip_switch_mask(priv,
 758				  GSWIP_PCE_VCTRL_VSR,
 759				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
 760				  GSWIP_PCE_VCTRL_VEMR,
 761				  GSWIP_PCE_VCTRL(port));
 762		gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
 763				  GSWIP_PCE_PCTRL_0p(port));
 764	} else {
 765		/* Use port based VLAN tag */
 766		gswip_switch_mask(priv,
 767				  GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
 768				  GSWIP_PCE_VCTRL_VEMR,
 769				  GSWIP_PCE_VCTRL_VSR,
 770				  GSWIP_PCE_VCTRL(port));
 771		gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
 772				  GSWIP_PCE_PCTRL_0p(port));
 773	}
 774
 775	return 0;
 776}
 777
 778static int gswip_setup(struct dsa_switch *ds)
 779{
 780	struct gswip_priv *priv = ds->priv;
 781	unsigned int cpu_port = priv->hw_info->cpu_port;
 782	int i;
 783	int err;
 784
 785	gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
 786	usleep_range(5000, 10000);
 787	gswip_switch_w(priv, 0, GSWIP_SWRES);
 788
 789	/* disable port fetch/store dma on all ports */
 790	for (i = 0; i < priv->hw_info->max_ports; i++) {
 791		gswip_port_disable(ds, i);
 792		gswip_port_vlan_filtering(ds, i, false, NULL);
 793	}
 794
 795	/* enable Switch */
 796	gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
 797
 798	err = gswip_pce_load_microcode(priv);
 799	if (err) {
 800		dev_err(priv->dev, "writing PCE microcode failed, %i", err);
 801		return err;
 802	}
 803
 804	/* Default unknown Broadcast/Multicast/Unicast port maps */
 805	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
 806	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
 807	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
 808
 809	/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
 810	 * interoperability problem with this auto polling mechanism because
 811	 * their status registers think that the link is in a different state
 812	 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
 813	 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
 814	 * auto polling state machine consider the link being negotiated with
 815	 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
 816	 * to the switch port being completely dead (RX and TX are both not
 817	 * working).
 818	 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
 819	 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
 820	 * it would work fine for a few minutes to hours and then stop, on
 821	 * other device it would no traffic could be sent or received at all.
 822	 * Testing shows that when PHY auto polling is disabled these problems
 823	 * go away.
 824	 */
 825	gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
 826
 827	/* Configure the MDIO Clock 2.5 MHz */
 828	gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 829
 830	/* Disable the xMII interface and clear it's isolation bit */
 831	for (i = 0; i < priv->hw_info->max_ports; i++)
 832		gswip_mii_mask_cfg(priv,
 833				   GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
 834				   0, i);
 835
 836	/* enable special tag insertion on cpu port */
 837	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
 838			  GSWIP_FDMA_PCTRLp(cpu_port));
 839
 840	/* accept special tag in ingress direction */
 841	gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
 842			  GSWIP_PCE_PCTRL_0p(cpu_port));
 843
 844	gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
 845			  GSWIP_MAC_CTRL_2p(cpu_port));
 846	gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
 847		       GSWIP_MAC_FLEN);
 848	gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
 849			  GSWIP_BM_QUEUE_GCTRL);
 850
 851	/* VLAN aware Switching */
 852	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
 853
 854	/* Flush MAC Table */
 855	gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
 856
 857	err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
 858				     GSWIP_PCE_GCTRL_0_MTFL);
 859	if (err) {
 860		dev_err(priv->dev, "MAC flushing didn't finish\n");
 861		return err;
 862	}
 863
 864	gswip_port_enable(ds, cpu_port, NULL);
 865
 866	ds->configure_vlan_while_not_filtering = false;
 867
 868	return 0;
 869}
 870
 871static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
 872						    int port,
 873						    enum dsa_tag_protocol mp)
 874{
 875	return DSA_TAG_PROTO_GSWIP;
 876}
 877
 878static int gswip_vlan_active_create(struct gswip_priv *priv,
 879				    struct net_device *bridge,
 880				    int fid, u16 vid)
 881{
 882	struct gswip_pce_table_entry vlan_active = {0,};
 883	unsigned int max_ports = priv->hw_info->max_ports;
 884	int idx = -1;
 885	int err;
 886	int i;
 887
 888	/* Look for a free slot */
 889	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
 890		if (!priv->vlans[i].bridge) {
 891			idx = i;
 892			break;
 893		}
 894	}
 895
 896	if (idx == -1)
 897		return -ENOSPC;
 898
 899	if (fid == -1)
 900		fid = idx;
 901
 902	vlan_active.index = idx;
 903	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 904	vlan_active.key[0] = vid;
 905	vlan_active.val[0] = fid;
 906	vlan_active.valid = true;
 907
 908	err = gswip_pce_table_entry_write(priv, &vlan_active);
 909	if (err) {
 910		dev_err(priv->dev, "failed to write active VLAN: %d\n",	err);
 911		return err;
 912	}
 913
 914	priv->vlans[idx].bridge = bridge;
 915	priv->vlans[idx].vid = vid;
 916	priv->vlans[idx].fid = fid;
 917
 918	return idx;
 919}
 920
 921static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
 922{
 923	struct gswip_pce_table_entry vlan_active = {0,};
 924	int err;
 925
 926	vlan_active.index = idx;
 927	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
 928	vlan_active.valid = false;
 929	err = gswip_pce_table_entry_write(priv, &vlan_active);
 930	if (err)
 931		dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
 932	priv->vlans[idx].bridge = NULL;
 933
 934	return err;
 935}
 936
 937static int gswip_vlan_add_unaware(struct gswip_priv *priv,
 938				  struct net_device *bridge, int port)
 939{
 940	struct gswip_pce_table_entry vlan_mapping = {0,};
 941	unsigned int max_ports = priv->hw_info->max_ports;
 942	unsigned int cpu_port = priv->hw_info->cpu_port;
 943	bool active_vlan_created = false;
 944	int idx = -1;
 945	int i;
 946	int err;
 947
 948	/* Check if there is already a page for this bridge */
 949	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
 950		if (priv->vlans[i].bridge == bridge) {
 951			idx = i;
 952			break;
 953		}
 954	}
 955
 956	/* If this bridge is not programmed yet, add a Active VLAN table
 957	 * entry in a free slot and prepare the VLAN mapping table entry.
 958	 */
 959	if (idx == -1) {
 960		idx = gswip_vlan_active_create(priv, bridge, -1, 0);
 961		if (idx < 0)
 962			return idx;
 963		active_vlan_created = true;
 964
 965		vlan_mapping.index = idx;
 966		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 967		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
 968		vlan_mapping.val[0] = 0;
 969	} else {
 970		/* Read the existing VLAN mapping entry from the switch */
 971		vlan_mapping.index = idx;
 972		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
 973		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
 974		if (err) {
 975			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
 976				err);
 977			return err;
 978		}
 979	}
 980
 981	/* Update the VLAN mapping entry and write it to the switch */
 982	vlan_mapping.val[1] |= BIT(cpu_port);
 983	vlan_mapping.val[1] |= BIT(port);
 984	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
 985	if (err) {
 986		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
 987		/* In case an Active VLAN was creaetd delete it again */
 988		if (active_vlan_created)
 989			gswip_vlan_active_remove(priv, idx);
 990		return err;
 991	}
 992
 993	gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
 994	return 0;
 995}
 996
 997static int gswip_vlan_add_aware(struct gswip_priv *priv,
 998				struct net_device *bridge, int port,
 999				u16 vid, bool untagged,
1000				bool pvid)
1001{
1002	struct gswip_pce_table_entry vlan_mapping = {0,};
1003	unsigned int max_ports = priv->hw_info->max_ports;
1004	unsigned int cpu_port = priv->hw_info->cpu_port;
1005	bool active_vlan_created = false;
1006	int idx = -1;
1007	int fid = -1;
1008	int i;
1009	int err;
1010
1011	/* Check if there is already a page for this bridge */
1012	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1013		if (priv->vlans[i].bridge == bridge) {
1014			if (fid != -1 && fid != priv->vlans[i].fid)
1015				dev_err(priv->dev, "one bridge with multiple flow ids\n");
1016			fid = priv->vlans[i].fid;
1017			if (priv->vlans[i].vid == vid) {
1018				idx = i;
1019				break;
1020			}
1021		}
1022	}
1023
1024	/* If this bridge is not programmed yet, add a Active VLAN table
1025	 * entry in a free slot and prepare the VLAN mapping table entry.
1026	 */
1027	if (idx == -1) {
1028		idx = gswip_vlan_active_create(priv, bridge, fid, vid);
1029		if (idx < 0)
1030			return idx;
1031		active_vlan_created = true;
1032
1033		vlan_mapping.index = idx;
1034		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1035		/* VLAN ID byte, maps to the VLAN ID of vlan active table */
1036		vlan_mapping.val[0] = vid;
1037	} else {
1038		/* Read the existing VLAN mapping entry from the switch */
1039		vlan_mapping.index = idx;
1040		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1041		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1042		if (err) {
1043			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
1044				err);
1045			return err;
1046		}
1047	}
1048
1049	vlan_mapping.val[0] = vid;
1050	/* Update the VLAN mapping entry and write it to the switch */
1051	vlan_mapping.val[1] |= BIT(cpu_port);
1052	vlan_mapping.val[2] |= BIT(cpu_port);
1053	vlan_mapping.val[1] |= BIT(port);
1054	if (untagged)
1055		vlan_mapping.val[2] &= ~BIT(port);
1056	else
1057		vlan_mapping.val[2] |= BIT(port);
1058	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1059	if (err) {
1060		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1061		/* In case an Active VLAN was creaetd delete it again */
1062		if (active_vlan_created)
1063			gswip_vlan_active_remove(priv, idx);
1064		return err;
1065	}
1066
1067	if (pvid)
1068		gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
1069
1070	return 0;
1071}
1072
1073static int gswip_vlan_remove(struct gswip_priv *priv,
1074			     struct net_device *bridge, int port,
1075			     u16 vid, bool pvid, bool vlan_aware)
1076{
1077	struct gswip_pce_table_entry vlan_mapping = {0,};
1078	unsigned int max_ports = priv->hw_info->max_ports;
1079	unsigned int cpu_port = priv->hw_info->cpu_port;
1080	int idx = -1;
1081	int i;
1082	int err;
1083
1084	/* Check if there is already a page for this bridge */
1085	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1086		if (priv->vlans[i].bridge == bridge &&
1087		    (!vlan_aware || priv->vlans[i].vid == vid)) {
1088			idx = i;
1089			break;
1090		}
1091	}
1092
1093	if (idx == -1) {
1094		dev_err(priv->dev, "bridge to leave does not exists\n");
1095		return -ENOENT;
1096	}
1097
1098	vlan_mapping.index = idx;
1099	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
1100	err = gswip_pce_table_entry_read(priv, &vlan_mapping);
1101	if (err) {
1102		dev_err(priv->dev, "failed to read VLAN mapping: %d\n",	err);
1103		return err;
1104	}
1105
1106	vlan_mapping.val[1] &= ~BIT(port);
1107	vlan_mapping.val[2] &= ~BIT(port);
1108	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
1109	if (err) {
1110		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
1111		return err;
1112	}
1113
1114	/* In case all ports are removed from the bridge, remove the VLAN */
1115	if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
1116		err = gswip_vlan_active_remove(priv, idx);
1117		if (err) {
1118			dev_err(priv->dev, "failed to write active VLAN: %d\n",
1119				err);
1120			return err;
1121		}
1122	}
1123
1124	/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
1125	if (pvid)
1126		gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
1127
1128	return 0;
1129}
1130
1131static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
1132				  struct net_device *bridge)
1133{
1134	struct gswip_priv *priv = ds->priv;
1135	int err;
1136
1137	/* When the bridge uses VLAN filtering we have to configure VLAN
1138	 * specific bridges. No bridge is configured here.
1139	 */
1140	if (!br_vlan_enabled(bridge)) {
1141		err = gswip_vlan_add_unaware(priv, bridge, port);
1142		if (err)
1143			return err;
1144		priv->port_vlan_filter &= ~BIT(port);
1145	} else {
1146		priv->port_vlan_filter |= BIT(port);
1147	}
1148	return gswip_add_single_port_br(priv, port, false);
1149}
1150
1151static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
1152				    struct net_device *bridge)
1153{
1154	struct gswip_priv *priv = ds->priv;
1155
1156	gswip_add_single_port_br(priv, port, true);
1157
1158	/* When the bridge uses VLAN filtering we have to configure VLAN
1159	 * specific bridges. No bridge is configured here.
1160	 */
1161	if (!br_vlan_enabled(bridge))
1162		gswip_vlan_remove(priv, bridge, port, 0, true, false);
1163}
1164
1165static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
1166				   const struct switchdev_obj_port_vlan *vlan,
1167				   struct netlink_ext_ack *extack)
1168{
1169	struct gswip_priv *priv = ds->priv;
1170	struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
1171	unsigned int max_ports = priv->hw_info->max_ports;
1172	int pos = max_ports;
1173	int i, idx = -1;
1174
1175	/* We only support VLAN filtering on bridges */
1176	if (!dsa_is_cpu_port(ds, port) && !bridge)
1177		return -EOPNOTSUPP;
1178
1179	/* Check if there is already a page for this VLAN */
1180	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1181		if (priv->vlans[i].bridge == bridge &&
1182		    priv->vlans[i].vid == vlan->vid) {
1183			idx = i;
1184			break;
1185		}
1186	}
1187
1188	/* If this VLAN is not programmed yet, we have to reserve
1189	 * one entry in the VLAN table. Make sure we start at the
1190	 * next position round.
1191	 */
1192	if (idx == -1) {
1193		/* Look for a free slot */
1194		for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
1195			if (!priv->vlans[pos].bridge) {
1196				idx = pos;
1197				pos++;
1198				break;
1199			}
1200		}
1201
1202		if (idx == -1) {
1203			NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
1204			return -ENOSPC;
1205		}
1206	}
1207
1208	return 0;
1209}
1210
1211static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
1212			       const struct switchdev_obj_port_vlan *vlan,
1213			       struct netlink_ext_ack *extack)
1214{
1215	struct gswip_priv *priv = ds->priv;
1216	struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
1217	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1218	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1219	int err;
1220
1221	err = gswip_port_vlan_prepare(ds, port, vlan, extack);
1222	if (err)
1223		return err;
1224
1225	/* We have to receive all packets on the CPU port and should not
1226	 * do any VLAN filtering here. This is also called with bridge
1227	 * NULL and then we do not know for which bridge to configure
1228	 * this.
1229	 */
1230	if (dsa_is_cpu_port(ds, port))
1231		return 0;
1232
1233	return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
1234				    untagged, pvid);
1235}
1236
1237static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
1238			       const struct switchdev_obj_port_vlan *vlan)
1239{
1240	struct gswip_priv *priv = ds->priv;
1241	struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
1242	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1243
1244	/* We have to receive all packets on the CPU port and should not
1245	 * do any VLAN filtering here. This is also called with bridge
1246	 * NULL and then we do not know for which bridge to configure
1247	 * this.
1248	 */
1249	if (dsa_is_cpu_port(ds, port))
1250		return 0;
1251
1252	return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
1253}
1254
1255static void gswip_port_fast_age(struct dsa_switch *ds, int port)
1256{
1257	struct gswip_priv *priv = ds->priv;
1258	struct gswip_pce_table_entry mac_bridge = {0,};
1259	int i;
1260	int err;
1261
1262	for (i = 0; i < 2048; i++) {
1263		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1264		mac_bridge.index = i;
1265
1266		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1267		if (err) {
1268			dev_err(priv->dev, "failed to read mac bridge: %d\n",
1269				err);
1270			return;
1271		}
1272
1273		if (!mac_bridge.valid)
1274			continue;
1275
1276		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
1277			continue;
1278
1279		if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
1280			continue;
1281
1282		mac_bridge.valid = false;
1283		err = gswip_pce_table_entry_write(priv, &mac_bridge);
1284		if (err) {
1285			dev_err(priv->dev, "failed to write mac bridge: %d\n",
1286				err);
1287			return;
1288		}
1289	}
1290}
1291
1292static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1293{
1294	struct gswip_priv *priv = ds->priv;
1295	u32 stp_state;
1296
1297	switch (state) {
1298	case BR_STATE_DISABLED:
1299		gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
1300				  GSWIP_SDMA_PCTRLp(port));
1301		return;
1302	case BR_STATE_BLOCKING:
1303	case BR_STATE_LISTENING:
1304		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
1305		break;
1306	case BR_STATE_LEARNING:
1307		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
1308		break;
1309	case BR_STATE_FORWARDING:
1310		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
1311		break;
1312	default:
1313		dev_err(priv->dev, "invalid STP state: %d\n", state);
1314		return;
1315	}
1316
1317	gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
1318			  GSWIP_SDMA_PCTRLp(port));
1319	gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
1320			  GSWIP_PCE_PCTRL_0p(port));
1321}
1322
1323static int gswip_port_fdb(struct dsa_switch *ds, int port,
1324			  const unsigned char *addr, u16 vid, bool add)
1325{
1326	struct gswip_priv *priv = ds->priv;
1327	struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
1328	struct gswip_pce_table_entry mac_bridge = {0,};
1329	unsigned int cpu_port = priv->hw_info->cpu_port;
1330	int fid = -1;
1331	int i;
1332	int err;
1333
1334	if (!bridge)
1335		return -EINVAL;
1336
1337	for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
1338		if (priv->vlans[i].bridge == bridge) {
1339			fid = priv->vlans[i].fid;
1340			break;
1341		}
1342	}
1343
1344	if (fid == -1) {
1345		dev_err(priv->dev, "Port not part of a bridge\n");
1346		return -EINVAL;
1347	}
1348
1349	mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1350	mac_bridge.key_mode = true;
1351	mac_bridge.key[0] = addr[5] | (addr[4] << 8);
1352	mac_bridge.key[1] = addr[3] | (addr[2] << 8);
1353	mac_bridge.key[2] = addr[1] | (addr[0] << 8);
1354	mac_bridge.key[3] = fid;
1355	mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
1356	mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
1357	mac_bridge.valid = add;
1358
1359	err = gswip_pce_table_entry_write(priv, &mac_bridge);
1360	if (err)
1361		dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
1362
1363	return err;
1364}
1365
1366static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
1367			      const unsigned char *addr, u16 vid)
1368{
1369	return gswip_port_fdb(ds, port, addr, vid, true);
1370}
1371
1372static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
1373			      const unsigned char *addr, u16 vid)
1374{
1375	return gswip_port_fdb(ds, port, addr, vid, false);
1376}
1377
1378static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
1379			       dsa_fdb_dump_cb_t *cb, void *data)
1380{
1381	struct gswip_priv *priv = ds->priv;
1382	struct gswip_pce_table_entry mac_bridge = {0,};
1383	unsigned char addr[6];
1384	int i;
1385	int err;
1386
1387	for (i = 0; i < 2048; i++) {
1388		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1389		mac_bridge.index = i;
1390
1391		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1392		if (err) {
1393			dev_err(priv->dev, "failed to write mac bridge: %d\n",
1394				err);
1395			return err;
1396		}
1397
1398		if (!mac_bridge.valid)
1399			continue;
1400
1401		addr[5] = mac_bridge.key[0] & 0xff;
1402		addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
1403		addr[3] = mac_bridge.key[1] & 0xff;
1404		addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
1405		addr[1] = mac_bridge.key[2] & 0xff;
1406		addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
1407		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
1408			if (mac_bridge.val[0] & BIT(port)) {
1409				err = cb(addr, 0, true, data);
1410				if (err)
1411					return err;
1412			}
1413		} else {
1414			if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
1415				err = cb(addr, 0, false, data);
1416				if (err)
1417					return err;
1418			}
1419		}
1420	}
1421	return 0;
1422}
1423
1424static void gswip_phylink_set_capab(unsigned long *supported,
1425				    struct phylink_link_state *state)
1426{
1427	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1428
1429	/* Allow all the expected bits */
1430	phylink_set(mask, Autoneg);
1431	phylink_set_port_modes(mask);
1432	phylink_set(mask, Pause);
1433	phylink_set(mask, Asym_Pause);
1434
1435	/* With the exclusion of MII, Reverse MII and Reduced MII, we
1436	 * support Gigabit, including Half duplex
1437	 */
1438	if (state->interface != PHY_INTERFACE_MODE_MII &&
1439	    state->interface != PHY_INTERFACE_MODE_REVMII &&
1440	    state->interface != PHY_INTERFACE_MODE_RMII) {
1441		phylink_set(mask, 1000baseT_Full);
1442		phylink_set(mask, 1000baseT_Half);
1443	}
1444
1445	phylink_set(mask, 10baseT_Half);
1446	phylink_set(mask, 10baseT_Full);
1447	phylink_set(mask, 100baseT_Half);
1448	phylink_set(mask, 100baseT_Full);
1449
1450	bitmap_and(supported, supported, mask,
1451		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1452	bitmap_and(state->advertising, state->advertising, mask,
1453		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1454}
1455
1456static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
1457					  unsigned long *supported,
1458					  struct phylink_link_state *state)
1459{
1460	switch (port) {
1461	case 0:
1462	case 1:
1463		if (!phy_interface_mode_is_rgmii(state->interface) &&
1464		    state->interface != PHY_INTERFACE_MODE_MII &&
1465		    state->interface != PHY_INTERFACE_MODE_REVMII &&
1466		    state->interface != PHY_INTERFACE_MODE_RMII)
1467			goto unsupported;
1468		break;
1469	case 2:
1470	case 3:
1471	case 4:
1472		if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
1473			goto unsupported;
1474		break;
1475	case 5:
1476		if (!phy_interface_mode_is_rgmii(state->interface) &&
1477		    state->interface != PHY_INTERFACE_MODE_INTERNAL)
1478			goto unsupported;
1479		break;
1480	default:
1481		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1482		dev_err(ds->dev, "Unsupported port: %i\n", port);
1483		return;
1484	}
1485
1486	gswip_phylink_set_capab(supported, state);
1487
1488	return;
1489
1490unsupported:
1491	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1492	dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
1493		phy_modes(state->interface), port);
1494}
1495
1496static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
1497					  unsigned long *supported,
1498					  struct phylink_link_state *state)
1499{
1500	switch (port) {
1501	case 0:
1502		if (!phy_interface_mode_is_rgmii(state->interface) &&
1503		    state->interface != PHY_INTERFACE_MODE_GMII &&
1504		    state->interface != PHY_INTERFACE_MODE_RMII)
1505			goto unsupported;
1506		break;
1507	case 1:
1508	case 2:
1509	case 3:
1510	case 4:
1511		if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
1512			goto unsupported;
1513		break;
1514	case 5:
1515		if (!phy_interface_mode_is_rgmii(state->interface) &&
1516		    state->interface != PHY_INTERFACE_MODE_INTERNAL &&
1517		    state->interface != PHY_INTERFACE_MODE_RMII)
1518			goto unsupported;
1519		break;
1520	default:
1521		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1522		dev_err(ds->dev, "Unsupported port: %i\n", port);
1523		return;
1524	}
1525
1526	gswip_phylink_set_capab(supported, state);
1527
1528	return;
1529
1530unsupported:
1531	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1532	dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
1533		phy_modes(state->interface), port);
1534}
1535
1536static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
1537{
1538	u32 mdio_phy;
1539
1540	if (link)
1541		mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
1542	else
1543		mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
1544
1545	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
1546			GSWIP_MDIO_PHYp(port));
1547}
1548
1549static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
1550				 phy_interface_t interface)
1551{
1552	u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
1553
1554	switch (speed) {
1555	case SPEED_10:
1556		mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
1557
1558		if (interface == PHY_INTERFACE_MODE_RMII)
1559			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1560		else
1561			mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
1562
1563		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1564		break;
1565
1566	case SPEED_100:
1567		mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
1568
1569		if (interface == PHY_INTERFACE_MODE_RMII)
1570			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1571		else
1572			mii_cfg = GSWIP_MII_CFG_RATE_M25;
1573
1574		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1575		break;
1576
1577	case SPEED_1000:
1578		mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
1579
1580		mii_cfg = GSWIP_MII_CFG_RATE_M125;
1581
1582		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
1583		break;
1584	}
1585
1586	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
1587			GSWIP_MDIO_PHYp(port));
1588	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
1589	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
1590			  GSWIP_MAC_CTRL_0p(port));
1591}
1592
1593static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
1594{
1595	u32 mac_ctrl_0, mdio_phy;
1596
1597	if (duplex == DUPLEX_FULL) {
1598		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
1599		mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
1600	} else {
1601		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
1602		mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
1603	}
1604
1605	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
1606			  GSWIP_MAC_CTRL_0p(port));
1607	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
1608			GSWIP_MDIO_PHYp(port));
1609}
1610
1611static void gswip_port_set_pause(struct gswip_priv *priv, int port,
1612				 bool tx_pause, bool rx_pause)
1613{
1614	u32 mac_ctrl_0, mdio_phy;
1615
1616	if (tx_pause && rx_pause) {
1617		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
1618		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1619			   GSWIP_MDIO_PHY_FCONRX_EN;
1620	} else if (tx_pause) {
1621		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
1622		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1623			   GSWIP_MDIO_PHY_FCONRX_DIS;
1624	} else if (rx_pause) {
1625		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
1626		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1627			   GSWIP_MDIO_PHY_FCONRX_EN;
1628	} else {
1629		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
1630		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1631			   GSWIP_MDIO_PHY_FCONRX_DIS;
1632	}
1633
1634	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
1635			  mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
1636	gswip_mdio_mask(priv,
1637			GSWIP_MDIO_PHY_FCONTX_MASK |
1638			GSWIP_MDIO_PHY_FCONRX_MASK,
1639			mdio_phy, GSWIP_MDIO_PHYp(port));
1640}
1641
1642static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
1643				     unsigned int mode,
1644				     const struct phylink_link_state *state)
1645{
1646	struct gswip_priv *priv = ds->priv;
1647	u32 miicfg = 0;
1648
1649	miicfg |= GSWIP_MII_CFG_LDCLKDIS;
1650
1651	switch (state->interface) {
1652	case PHY_INTERFACE_MODE_MII:
1653	case PHY_INTERFACE_MODE_INTERNAL:
1654		miicfg |= GSWIP_MII_CFG_MODE_MIIM;
1655		break;
1656	case PHY_INTERFACE_MODE_REVMII:
1657		miicfg |= GSWIP_MII_CFG_MODE_MIIP;
1658		break;
1659	case PHY_INTERFACE_MODE_RMII:
1660		miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
1661
1662		/* Configure the RMII clock as output: */
1663		miicfg |= GSWIP_MII_CFG_RMII_CLK;
1664		break;
1665	case PHY_INTERFACE_MODE_RGMII:
1666	case PHY_INTERFACE_MODE_RGMII_ID:
1667	case PHY_INTERFACE_MODE_RGMII_RXID:
1668	case PHY_INTERFACE_MODE_RGMII_TXID:
1669		miicfg |= GSWIP_MII_CFG_MODE_RGMII;
1670		break;
1671	case PHY_INTERFACE_MODE_GMII:
1672		miicfg |= GSWIP_MII_CFG_MODE_GMII;
1673		break;
1674	default:
1675		dev_err(ds->dev,
1676			"Unsupported interface: %d\n", state->interface);
1677		return;
1678	}
1679
1680	gswip_mii_mask_cfg(priv,
1681			   GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
1682			   GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
1683			   miicfg, port);
1684
1685	switch (state->interface) {
1686	case PHY_INTERFACE_MODE_RGMII_ID:
1687		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
1688					  GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1689		break;
1690	case PHY_INTERFACE_MODE_RGMII_RXID:
1691		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
1692		break;
1693	case PHY_INTERFACE_MODE_RGMII_TXID:
1694		gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
1695		break;
1696	default:
1697		break;
1698	}
1699}
1700
1701static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
1702					unsigned int mode,
1703					phy_interface_t interface)
1704{
1705	struct gswip_priv *priv = ds->priv;
1706
1707	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
1708
1709	if (!dsa_is_cpu_port(ds, port))
1710		gswip_port_set_link(priv, port, false);
1711}
1712
1713static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
1714				      unsigned int mode,
1715				      phy_interface_t interface,
1716				      struct phy_device *phydev,
1717				      int speed, int duplex,
1718				      bool tx_pause, bool rx_pause)
1719{
1720	struct gswip_priv *priv = ds->priv;
1721
1722	if (!dsa_is_cpu_port(ds, port)) {
1723		gswip_port_set_link(priv, port, true);
1724		gswip_port_set_speed(priv, port, speed, interface);
1725		gswip_port_set_duplex(priv, port, duplex);
1726		gswip_port_set_pause(priv, port, tx_pause, rx_pause);
1727	}
1728
1729	gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
1730}
1731
1732static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1733			      uint8_t *data)
1734{
1735	int i;
1736
1737	if (stringset != ETH_SS_STATS)
1738		return;
1739
1740	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
1741		strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
1742			ETH_GSTRING_LEN);
1743}
1744
1745static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
1746				    u32 index)
1747{
1748	u32 result;
1749	int err;
1750
1751	gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
1752	gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
1753				GSWIP_BM_RAM_CTRL_OPMOD,
1754			      table | GSWIP_BM_RAM_CTRL_BAS,
1755			      GSWIP_BM_RAM_CTRL);
1756
1757	err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
1758				     GSWIP_BM_RAM_CTRL_BAS);
1759	if (err) {
1760		dev_err(priv->dev, "timeout while reading table: %u, index: %u",
1761			table, index);
1762		return 0;
1763	}
1764
1765	result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
1766	result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
1767
1768	return result;
1769}
1770
1771static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
1772				    uint64_t *data)
1773{
1774	struct gswip_priv *priv = ds->priv;
1775	const struct gswip_rmon_cnt_desc *rmon_cnt;
1776	int i;
1777	u64 high;
1778
1779	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
1780		rmon_cnt = &gswip_rmon_cnt[i];
1781
1782		data[i] = gswip_bcm_ram_entry_read(priv, port,
1783						   rmon_cnt->offset);
1784		if (rmon_cnt->size == 2) {
1785			high = gswip_bcm_ram_entry_read(priv, port,
1786							rmon_cnt->offset + 1);
1787			data[i] |= high << 32;
1788		}
1789	}
1790}
1791
1792static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
1793{
1794	if (sset != ETH_SS_STATS)
1795		return 0;
1796
1797	return ARRAY_SIZE(gswip_rmon_cnt);
1798}
1799
1800static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
1801	.get_tag_protocol	= gswip_get_tag_protocol,
1802	.setup			= gswip_setup,
1803	.port_enable		= gswip_port_enable,
1804	.port_disable		= gswip_port_disable,
1805	.port_bridge_join	= gswip_port_bridge_join,
1806	.port_bridge_leave	= gswip_port_bridge_leave,
1807	.port_fast_age		= gswip_port_fast_age,
1808	.port_vlan_filtering	= gswip_port_vlan_filtering,
1809	.port_vlan_add		= gswip_port_vlan_add,
1810	.port_vlan_del		= gswip_port_vlan_del,
1811	.port_stp_state_set	= gswip_port_stp_state_set,
1812	.port_fdb_add		= gswip_port_fdb_add,
1813	.port_fdb_del		= gswip_port_fdb_del,
1814	.port_fdb_dump		= gswip_port_fdb_dump,
1815	.phylink_validate	= gswip_xrx200_phylink_validate,
1816	.phylink_mac_config	= gswip_phylink_mac_config,
1817	.phylink_mac_link_down	= gswip_phylink_mac_link_down,
1818	.phylink_mac_link_up	= gswip_phylink_mac_link_up,
1819	.get_strings		= gswip_get_strings,
1820	.get_ethtool_stats	= gswip_get_ethtool_stats,
1821	.get_sset_count		= gswip_get_sset_count,
1822};
1823
1824static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
1825	.get_tag_protocol	= gswip_get_tag_protocol,
1826	.setup			= gswip_setup,
1827	.port_enable		= gswip_port_enable,
1828	.port_disable		= gswip_port_disable,
1829	.port_bridge_join	= gswip_port_bridge_join,
1830	.port_bridge_leave	= gswip_port_bridge_leave,
1831	.port_fast_age		= gswip_port_fast_age,
1832	.port_vlan_filtering	= gswip_port_vlan_filtering,
1833	.port_vlan_add		= gswip_port_vlan_add,
1834	.port_vlan_del		= gswip_port_vlan_del,
1835	.port_stp_state_set	= gswip_port_stp_state_set,
1836	.port_fdb_add		= gswip_port_fdb_add,
1837	.port_fdb_del		= gswip_port_fdb_del,
1838	.port_fdb_dump		= gswip_port_fdb_dump,
1839	.phylink_validate	= gswip_xrx300_phylink_validate,
1840	.phylink_mac_config	= gswip_phylink_mac_config,
1841	.phylink_mac_link_down	= gswip_phylink_mac_link_down,
1842	.phylink_mac_link_up	= gswip_phylink_mac_link_up,
1843	.get_strings		= gswip_get_strings,
1844	.get_ethtool_stats	= gswip_get_ethtool_stats,
1845	.get_sset_count		= gswip_get_sset_count,
1846};
1847
1848static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
1849	.fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
1850	.ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
1851};
1852
1853static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
1854	.fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
1855	.ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
1856};
1857
1858static const struct xway_gphy_match_data xrx300_gphy_data = {
1859	.fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
1860	.ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
1861};
1862
1863static const struct of_device_id xway_gphy_match[] = {
1864	{ .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
1865	{ .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
1866	{ .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
1867	{ .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
1868	{ .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
1869	{},
1870};
1871
1872static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
1873{
1874	struct device *dev = priv->dev;
1875	const struct firmware *fw;
1876	void *fw_addr;
1877	dma_addr_t dma_addr;
1878	dma_addr_t dev_addr;
1879	size_t size;
1880	int ret;
1881
1882	ret = clk_prepare_enable(gphy_fw->clk_gate);
1883	if (ret)
1884		return ret;
1885
1886	reset_control_assert(gphy_fw->reset);
1887
1888	/* The vendor BSP uses a 200ms delay after asserting the reset line.
1889	 * Without this some users are observing that the PHY is not coming up
1890	 * on the MDIO bus.
1891	 */
1892	msleep(200);
1893
1894	ret = request_firmware(&fw, gphy_fw->fw_name, dev);
1895	if (ret) {
1896		dev_err(dev, "failed to load firmware: %s, error: %i\n",
1897			gphy_fw->fw_name, ret);
1898		return ret;
1899	}
1900
1901	/* GPHY cores need the firmware code in a persistent and contiguous
1902	 * memory area with a 16 kB boundary aligned start address.
1903	 */
1904	size = fw->size + XRX200_GPHY_FW_ALIGN;
1905
1906	fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
1907	if (fw_addr) {
1908		fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
1909		dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
1910		memcpy(fw_addr, fw->data, fw->size);
1911	} else {
1912		dev_err(dev, "failed to alloc firmware memory\n");
1913		release_firmware(fw);
1914		return -ENOMEM;
1915	}
1916
1917	release_firmware(fw);
1918
1919	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
1920	if (ret)
1921		return ret;
1922
1923	reset_control_deassert(gphy_fw->reset);
1924
1925	return ret;
1926}
1927
1928static int gswip_gphy_fw_probe(struct gswip_priv *priv,
1929			       struct gswip_gphy_fw *gphy_fw,
1930			       struct device_node *gphy_fw_np, int i)
1931{
1932	struct device *dev = priv->dev;
1933	u32 gphy_mode;
1934	int ret;
1935	char gphyname[10];
1936
1937	snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
1938
1939	gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
1940	if (IS_ERR(gphy_fw->clk_gate)) {
1941		dev_err(dev, "Failed to lookup gate clock\n");
1942		return PTR_ERR(gphy_fw->clk_gate);
1943	}
1944
1945	ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
1946	if (ret)
1947		return ret;
1948
1949	ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
1950	/* Default to GE mode */
1951	if (ret)
1952		gphy_mode = GPHY_MODE_GE;
1953
1954	switch (gphy_mode) {
1955	case GPHY_MODE_FE:
1956		gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
1957		break;
1958	case GPHY_MODE_GE:
1959		gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
1960		break;
1961	default:
1962		dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
1963		return -EINVAL;
1964	}
1965
1966	gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
1967	if (IS_ERR(gphy_fw->reset)) {
1968		if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
1969			dev_err(dev, "Failed to lookup gphy reset\n");
1970		return PTR_ERR(gphy_fw->reset);
1971	}
1972
1973	return gswip_gphy_fw_load(priv, gphy_fw);
1974}
1975
1976static void gswip_gphy_fw_remove(struct gswip_priv *priv,
1977				 struct gswip_gphy_fw *gphy_fw)
1978{
1979	int ret;
1980
1981	/* check if the device was fully probed */
1982	if (!gphy_fw->fw_name)
1983		return;
1984
1985	ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
1986	if (ret)
1987		dev_err(priv->dev, "can not reset GPHY FW pointer");
1988
1989	clk_disable_unprepare(gphy_fw->clk_gate);
1990
1991	reset_control_put(gphy_fw->reset);
1992}
1993
1994static int gswip_gphy_fw_list(struct gswip_priv *priv,
1995			      struct device_node *gphy_fw_list_np, u32 version)
1996{
1997	struct device *dev = priv->dev;
1998	struct device_node *gphy_fw_np;
1999	const struct of_device_id *match;
2000	int err;
2001	int i = 0;
2002
2003	/* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
2004	 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
2005	 * needs a different GPHY firmware.
2006	 */
2007	if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
2008		switch (version) {
2009		case GSWIP_VERSION_2_0:
2010			priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
2011			break;
2012		case GSWIP_VERSION_2_1:
2013			priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
2014			break;
2015		default:
2016			dev_err(dev, "unknown GSWIP version: 0x%x", version);
2017			return -ENOENT;
2018		}
2019	}
2020
2021	match = of_match_node(xway_gphy_match, gphy_fw_list_np);
2022	if (match && match->data)
2023		priv->gphy_fw_name_cfg = match->data;
2024
2025	if (!priv->gphy_fw_name_cfg) {
2026		dev_err(dev, "GPHY compatible type not supported");
2027		return -ENOENT;
2028	}
2029
2030	priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
2031	if (!priv->num_gphy_fw)
2032		return -ENOENT;
2033
2034	priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
2035							   "lantiq,rcu");
2036	if (IS_ERR(priv->rcu_regmap))
2037		return PTR_ERR(priv->rcu_regmap);
2038
2039	priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
2040					   sizeof(*priv->gphy_fw),
2041					   GFP_KERNEL | __GFP_ZERO);
2042	if (!priv->gphy_fw)
2043		return -ENOMEM;
2044
2045	for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
2046		err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
2047					  gphy_fw_np, i);
2048		if (err)
2049			goto remove_gphy;
2050		i++;
2051	}
2052
2053	/* The standalone PHY11G requires 300ms to be fully
2054	 * initialized and ready for any MDIO communication after being
2055	 * taken out of reset. For the SoC-internal GPHY variant there
2056	 * is no (known) documentation for the minimum time after a
2057	 * reset. Use the same value as for the standalone variant as
2058	 * some users have reported internal PHYs not being detected
2059	 * without any delay.
2060	 */
2061	msleep(300);
2062
2063	return 0;
2064
2065remove_gphy:
2066	for (i = 0; i < priv->num_gphy_fw; i++)
2067		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2068	return err;
2069}
2070
2071static int gswip_probe(struct platform_device *pdev)
2072{
2073	struct gswip_priv *priv;
2074	struct device_node *np, *mdio_np, *gphy_fw_np;
2075	struct device *dev = &pdev->dev;
2076	int err;
2077	int i;
2078	u32 version;
2079
2080	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2081	if (!priv)
2082		return -ENOMEM;
2083
2084	priv->gswip = devm_platform_ioremap_resource(pdev, 0);
2085	if (IS_ERR(priv->gswip))
2086		return PTR_ERR(priv->gswip);
2087
2088	priv->mdio = devm_platform_ioremap_resource(pdev, 1);
2089	if (IS_ERR(priv->mdio))
2090		return PTR_ERR(priv->mdio);
2091
2092	priv->mii = devm_platform_ioremap_resource(pdev, 2);
2093	if (IS_ERR(priv->mii))
2094		return PTR_ERR(priv->mii);
2095
2096	priv->hw_info = of_device_get_match_data(dev);
2097	if (!priv->hw_info)
2098		return -EINVAL;
2099
2100	priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
2101	if (!priv->ds)
2102		return -ENOMEM;
2103
2104	priv->ds->dev = dev;
2105	priv->ds->num_ports = priv->hw_info->max_ports;
2106	priv->ds->priv = priv;
2107	priv->ds->ops = priv->hw_info->ops;
2108	priv->dev = dev;
2109	version = gswip_switch_r(priv, GSWIP_VERSION);
2110
2111	np = dev->of_node;
2112	switch (version) {
2113	case GSWIP_VERSION_2_0:
2114	case GSWIP_VERSION_2_1:
2115		if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
2116			return -EINVAL;
2117		break;
2118	case GSWIP_VERSION_2_2:
2119	case GSWIP_VERSION_2_2_ETC:
2120		if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
2121		    !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
2122			return -EINVAL;
2123		break;
2124	default:
2125		dev_err(dev, "unknown GSWIP version: 0x%x", version);
2126		return -ENOENT;
2127	}
2128
2129	/* bring up the mdio bus */
2130	gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
2131	if (gphy_fw_np) {
2132		err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
2133		of_node_put(gphy_fw_np);
2134		if (err) {
2135			dev_err(dev, "gphy fw probe failed\n");
2136			return err;
2137		}
2138	}
2139
2140	/* bring up the mdio bus */
2141	mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
2142	if (mdio_np) {
2143		err = gswip_mdio(priv, mdio_np);
2144		if (err) {
2145			dev_err(dev, "mdio probe failed\n");
2146			goto put_mdio_node;
2147		}
2148	}
2149
2150	err = dsa_register_switch(priv->ds);
2151	if (err) {
2152		dev_err(dev, "dsa switch register failed: %i\n", err);
2153		goto mdio_bus;
2154	}
2155	if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
2156		dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
2157			priv->hw_info->cpu_port);
2158		err = -EINVAL;
2159		goto disable_switch;
2160	}
2161
2162	platform_set_drvdata(pdev, priv);
2163
2164	dev_info(dev, "probed GSWIP version %lx mod %lx\n",
2165		 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
2166		 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
2167	return 0;
2168
2169disable_switch:
2170	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2171	dsa_unregister_switch(priv->ds);
2172mdio_bus:
2173	if (mdio_np)
2174		mdiobus_unregister(priv->ds->slave_mii_bus);
2175put_mdio_node:
2176	of_node_put(mdio_np);
2177	for (i = 0; i < priv->num_gphy_fw; i++)
2178		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2179	return err;
2180}
2181
2182static int gswip_remove(struct platform_device *pdev)
2183{
2184	struct gswip_priv *priv = platform_get_drvdata(pdev);
2185	int i;
2186
2187	/* disable the switch */
2188	gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
2189
2190	dsa_unregister_switch(priv->ds);
2191
2192	if (priv->ds->slave_mii_bus) {
2193		mdiobus_unregister(priv->ds->slave_mii_bus);
2194		of_node_put(priv->ds->slave_mii_bus->dev.of_node);
2195	}
2196
2197	for (i = 0; i < priv->num_gphy_fw; i++)
2198		gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
2199
2200	return 0;
2201}
2202
2203static const struct gswip_hw_info gswip_xrx200 = {
2204	.max_ports = 7,
2205	.cpu_port = 6,
2206	.ops = &gswip_xrx200_switch_ops,
2207};
2208
2209static const struct gswip_hw_info gswip_xrx300 = {
2210	.max_ports = 7,
2211	.cpu_port = 6,
2212	.ops = &gswip_xrx300_switch_ops,
2213};
2214
2215static const struct of_device_id gswip_of_match[] = {
2216	{ .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
2217	{ .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
2218	{ .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
2219	{},
2220};
2221MODULE_DEVICE_TABLE(of, gswip_of_match);
2222
2223static struct platform_driver gswip_driver = {
2224	.probe = gswip_probe,
2225	.remove = gswip_remove,
2226	.driver = {
2227		.name = "gswip",
2228		.of_match_table = gswip_of_match,
2229	},
2230};
2231
2232module_platform_driver(gswip_driver);
2233
2234MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
2235MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
2236MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
2237MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
2238MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
2239MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
2240MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
2241MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
2242MODULE_LICENSE("GPL v2");