Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Broadcom Starfighter 2 DSA switch driver
   3 *
   4 * Copyright (C) 2014, Broadcom Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/netdevice.h>
  15#include <linux/interrupt.h>
  16#include <linux/platform_device.h>
  17#include <linux/phy.h>
  18#include <linux/phy_fixed.h>
 
  19#include <linux/mii.h>
 
  20#include <linux/of.h>
  21#include <linux/of_irq.h>
  22#include <linux/of_address.h>
  23#include <linux/of_net.h>
  24#include <linux/of_mdio.h>
  25#include <net/dsa.h>
  26#include <linux/ethtool.h>
  27#include <linux/if_bridge.h>
  28#include <linux/brcmphy.h>
  29#include <linux/etherdevice.h>
  30#include <linux/platform_data/b53.h>
  31
  32#include "bcm_sf2.h"
  33#include "bcm_sf2_regs.h"
  34#include "b53/b53_priv.h"
  35#include "b53/b53_regs.h"
  36
  37static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38{
  39	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  40	unsigned int i;
  41	u32 reg, offset;
 
 
 
 
 
 
 
 
 
  42
  43	if (priv->type == BCM7445_DEVICE_ID)
  44		offset = CORE_STS_OVERRIDE_IMP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45	else
  46		offset = CORE_STS_OVERRIDE_IMP2;
 
 
 
 
 
 
 
 
  47
  48	/* Enable the port memories */
  49	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  50	reg &= ~P_TXQ_PSM_VDD(port);
  51	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  52
  53	/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  54	reg = core_readl(priv, CORE_IMP_CTL);
  55	reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  56	reg &= ~(RX_DIS | TX_DIS);
  57	core_writel(priv, reg, CORE_IMP_CTL);
  58
  59	/* Enable forwarding */
  60	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  61
  62	/* Enable IMP port in dumb mode */
  63	reg = core_readl(priv, CORE_SWITCH_CTRL);
  64	reg |= MII_DUMB_FWDG_EN;
  65	core_writel(priv, reg, CORE_SWITCH_CTRL);
  66
  67	/* Configure Traffic Class to QoS mapping, allow each priority to map
  68	 * to a different queue number
  69	 */
  70	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
  71	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
  72		reg |= i << (PRT_TO_QID_SHIFT * i);
  73	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
  74
  75	b53_brcm_hdr_setup(ds, port);
  76
  77	/* Force link status for IMP port */
  78	reg = core_readl(priv, offset);
  79	reg |= (MII_SW_OR | LINK_STS);
  80	core_writel(priv, reg, offset);
 
 
 
 
 
 
 
 
 
  81}
  82
  83static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  84{
  85	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  86	u32 reg;
  87
  88	reg = reg_readl(priv, REG_SPHY_CNTRL);
  89	if (enable) {
  90		reg |= PHY_RESET;
  91		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
  92		reg_writel(priv, reg, REG_SPHY_CNTRL);
  93		udelay(21);
  94		reg = reg_readl(priv, REG_SPHY_CNTRL);
  95		reg &= ~PHY_RESET;
  96	} else {
  97		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  98		reg_writel(priv, reg, REG_SPHY_CNTRL);
  99		mdelay(1);
 100		reg |= CK25_DIS;
 101	}
 102	reg_writel(priv, reg, REG_SPHY_CNTRL);
 103
 104	/* Use PHY-driven LED signaling */
 105	if (!enable) {
 106		reg = reg_readl(priv, REG_LED_CNTRL(0));
 107		reg |= SPDLNK_SRC_SEL;
 108		reg_writel(priv, reg, REG_LED_CNTRL(0));
 
 
 
 
 
 109	}
 110}
 111
 112static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
 113					    int port)
 114{
 115	unsigned int off;
 116
 117	switch (port) {
 118	case 7:
 119		off = P7_IRQ_OFF;
 120		break;
 121	case 0:
 122		/* Port 0 interrupts are located on the first bank */
 123		intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
 124		return;
 125	default:
 126		off = P_IRQ_OFF(port);
 127		break;
 128	}
 129
 130	intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
 131}
 132
 133static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
 134					     int port)
 135{
 136	unsigned int off;
 137
 138	switch (port) {
 139	case 7:
 140		off = P7_IRQ_OFF;
 141		break;
 142	case 0:
 143		/* Port 0 interrupts are located on the first bank */
 144		intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
 145		intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
 146		return;
 147	default:
 148		off = P_IRQ_OFF(port);
 149		break;
 150	}
 151
 152	intrl2_1_mask_set(priv, P_IRQ_MASK(off));
 153	intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
 154}
 155
 156static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
 157			      struct phy_device *phy)
 158{
 159	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 160	unsigned int i;
 161	u32 reg;
 162
 
 
 
 
 
 
 
 163	/* Clear the memory power down */
 164	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
 165	reg &= ~P_TXQ_PSM_VDD(port);
 166	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 167
 168	/* Enable Broadcom tags for that port if requested */
 169	if (priv->brcm_tag_mask & BIT(port))
 170		b53_brcm_hdr_setup(ds, port);
 171
 172	/* Configure Traffic Class to QoS mapping, allow each priority to map
 173	 * to a different queue number
 174	 */
 175	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
 176	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
 177		reg |= i << (PRT_TO_QID_SHIFT * i);
 178	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
 179
 180	/* Re-enable the GPHY and re-apply workarounds */
 181	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
 182		bcm_sf2_gphy_enable_set(ds, true);
 183		if (phy) {
 184			/* if phy_stop() has been called before, phy
 185			 * will be in halted state, and phy_start()
 186			 * will call resume.
 187			 *
 188			 * the resume path does not configure back
 189			 * autoneg settings, and since we hard reset
 190			 * the phy manually here, we need to reset the
 191			 * state machine also.
 192			 */
 193			phy->state = PHY_READY;
 194			phy_init_hw(phy);
 195		}
 196	}
 197
 198	/* Enable MoCA port interrupts to get notified */
 199	if (port == priv->moca_port)
 200		bcm_sf2_port_intr_enable(priv, port);
 201
 202	/* Set per-queue pause threshold to 32 */
 203	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
 204
 205	/* Set ACB threshold to 24 */
 206	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
 207		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
 208						    SF2_NUM_EGRESS_QUEUES + i));
 209		reg &= ~XOFF_THRESHOLD_MASK;
 210		reg |= 24;
 211		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
 212						    SF2_NUM_EGRESS_QUEUES + i));
 213	}
 214
 215	return b53_enable_port(ds, port, phy);
 216}
 217
 218static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
 219				 struct phy_device *phy)
 220{
 221	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 222	u32 off, reg;
 223
 224	if (priv->wol_ports_mask & (1 << port))
 
 
 
 
 225		return;
 
 226
 227	if (port == priv->moca_port)
 228		bcm_sf2_port_intr_disable(priv, port);
 229
 230	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
 231		bcm_sf2_gphy_enable_set(ds, false);
 232
 233	if (dsa_is_cpu_port(ds, port))
 234		off = CORE_IMP_CTL;
 235	else
 236		off = CORE_G_PCTL_PORT(port);
 237
 238	b53_disable_port(ds, port, phy);
 239
 240	/* Power down the port memory */
 241	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
 242	reg |= P_TXQ_PSM_VDD(port);
 243	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 
 
 
 
 244}
 245
 246
 247static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
 248			       int regnum, u16 val)
 249{
 250	int ret = 0;
 251	u32 reg;
 252
 253	reg = reg_readl(priv, REG_SWITCH_CNTRL);
 254	reg |= MDIO_MASTER_SEL;
 255	reg_writel(priv, reg, REG_SWITCH_CNTRL);
 256
 257	/* Page << 8 | offset */
 258	reg = 0x70;
 259	reg <<= 2;
 260	core_writel(priv, addr, reg);
 261
 262	/* Page << 8 | offset */
 263	reg = 0x80 << 8 | regnum << 1;
 264	reg <<= 2;
 265
 266	if (op)
 267		ret = core_readl(priv, reg);
 268	else
 269		core_writel(priv, val, reg);
 270
 271	reg = reg_readl(priv, REG_SWITCH_CNTRL);
 272	reg &= ~MDIO_MASTER_SEL;
 273	reg_writel(priv, reg, REG_SWITCH_CNTRL);
 274
 275	return ret & 0xffff;
 276}
 277
 278static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
 279{
 280	struct bcm_sf2_priv *priv = bus->priv;
 281
 282	/* Intercept reads from Broadcom pseudo-PHY address, else, send
 283	 * them to our master MDIO bus controller
 284	 */
 285	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
 286		return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
 287	else
 288		return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
 289}
 290
 291static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
 292				 u16 val)
 293{
 294	struct bcm_sf2_priv *priv = bus->priv;
 295
 296	/* Intercept writes to the Broadcom pseudo-PHY address, else,
 297	 * send them to our master MDIO bus controller
 298	 */
 299	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
 300		bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
 301	else
 302		mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
 303
 304	return 0;
 305}
 306
 307static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
 308{
 309	struct bcm_sf2_priv *priv = dev_id;
 
 310
 311	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
 312				~priv->irq0_mask;
 313	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 314
 315	return IRQ_HANDLED;
 316}
 317
 318static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
 319{
 320	struct bcm_sf2_priv *priv = dev_id;
 
 321
 322	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
 323				~priv->irq1_mask;
 324	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 325
 326	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
 327		priv->port_sts[7].link = 1;
 328	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
 329		priv->port_sts[7].link = 0;
 
 
 
 
 330
 331	return IRQ_HANDLED;
 332}
 333
 334static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
 335{
 336	unsigned int timeout = 1000;
 337	u32 reg;
 
 
 
 
 
 
 
 
 
 
 
 
 338
 339	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
 340	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
 341	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
 342
 343	do {
 344		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
 345		if (!(reg & SOFTWARE_RESET))
 346			break;
 347
 348		usleep_range(1000, 2000);
 349	} while (timeout-- > 0);
 350
 351	if (timeout == 0)
 352		return -ETIMEDOUT;
 353
 354	return 0;
 355}
 356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
 358{
 359	intrl2_0_mask_set(priv, 0xffffffff);
 360	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
 361	intrl2_1_mask_set(priv, 0xffffffff);
 362	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
 363}
 364
 365static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
 366				   struct device_node *dn)
 367{
 
 
 368	struct device_node *port;
 369	int mode;
 370	unsigned int port_num;
 
 
 371
 372	priv->moca_port = -1;
 373
 374	for_each_available_child_of_node(dn, port) {
 375		if (of_property_read_u32(port, "reg", &port_num))
 376			continue;
 377
 
 
 
 
 
 
 
 378		/* Internal PHYs get assigned a specific 'phy-mode' property
 379		 * value: "internal" to help flag them before MDIO probing
 380		 * has completed, since they might be turned off at that
 381		 * time
 382		 */
 383		mode = of_get_phy_mode(port);
 384		if (mode < 0)
 385			continue;
 386
 387		if (mode == PHY_INTERFACE_MODE_INTERNAL)
 388			priv->int_phy_mask |= 1 << port_num;
 389
 390		if (mode == PHY_INTERFACE_MODE_MOCA)
 391			priv->moca_port = port_num;
 392
 393		if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
 394			priv->brcm_tag_mask |= 1 << port_num;
 
 
 
 
 
 
 
 
 
 
 395	}
 396}
 397
 398static int bcm_sf2_mdio_register(struct dsa_switch *ds)
 399{
 400	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 401	struct device_node *dn;
 
 
 402	static int index;
 403	int err;
 404
 405	/* Find our integrated MDIO bus node */
 406	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
 407	priv->master_mii_bus = of_mdio_find_bus(dn);
 408	if (!priv->master_mii_bus)
 409		return -EPROBE_DEFER;
 410
 411	get_device(&priv->master_mii_bus->dev);
 412	priv->master_mii_dn = dn;
 413
 414	priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
 415	if (!priv->slave_mii_bus)
 416		return -ENOMEM;
 417
 418	priv->slave_mii_bus->priv = priv;
 419	priv->slave_mii_bus->name = "sf2 slave mii";
 420	priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
 421	priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
 422	snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
 
 423		 index++);
 424	priv->slave_mii_bus->dev.of_node = dn;
 425
 426	/* Include the pseudo-PHY address to divert reads towards our
 427	 * workaround. This is only required for 7445D0, since 7445E0
 428	 * disconnects the internal switch pseudo-PHY such that we can use the
 429	 * regular SWITCH_MDIO master controller instead.
 430	 *
 431	 * Here we flag the pseudo PHY as needing special treatment and would
 432	 * otherwise make all other PHY read/writes go to the master MDIO bus
 433	 * controller that comes with this switch backed by the "mdio-unimac"
 434	 * driver.
 435	 */
 436	if (of_machine_is_compatible("brcm,bcm7445d0"))
 437		priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
 438	else
 439		priv->indir_phy_mask = 0;
 440
 441	ds->phys_mii_mask = priv->indir_phy_mask;
 442	ds->slave_mii_bus = priv->slave_mii_bus;
 443	priv->slave_mii_bus->parent = ds->dev->parent;
 444	priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
 
 
 
 
 
 
 
 
 
 445
 446	if (dn)
 447		err = of_mdiobus_register(priv->slave_mii_bus, dn);
 448	else
 449		err = mdiobus_register(priv->slave_mii_bus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 450
 
 451	if (err)
 452		of_node_put(dn);
 453
 
 
 
 
 
 
 
 
 
 
 454	return err;
 455}
 456
 457static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
 458{
 459	mdiobus_unregister(priv->slave_mii_bus);
 460	if (priv->master_mii_dn)
 461		of_node_put(priv->master_mii_dn);
 462}
 463
 464static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
 465{
 466	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 467
 468	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
 469	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
 470	 * the REG_PHY_REVISION register layout is.
 471	 */
 472
 473	return priv->hw_params.gphy_rev;
 
 
 
 
 474}
 475
 476static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
 477				   struct phy_device *phydev)
 478{
 
 479	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 480	struct ethtool_eee *p = &priv->dev->ports[port].eee;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481	u32 id_mode_dis = 0, port_mode;
 482	const char *str = NULL;
 483	u32 reg, offset;
 
 484
 485	if (priv->type == BCM7445_DEVICE_ID)
 486		offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
 487	else
 488		offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
 489
 490	switch (phydev->interface) {
 
 
 
 491	case PHY_INTERFACE_MODE_RGMII:
 492		str = "RGMII (no delay)";
 493		id_mode_dis = 1;
 
 494	case PHY_INTERFACE_MODE_RGMII_TXID:
 495		if (!str)
 496			str = "RGMII (TX delay)";
 497		port_mode = EXT_GPHY;
 498		break;
 499	case PHY_INTERFACE_MODE_MII:
 500		str = "MII";
 501		port_mode = EXT_EPHY;
 502		break;
 503	case PHY_INTERFACE_MODE_REVMII:
 504		str = "Reverse MII";
 505		port_mode = EXT_REVMII;
 506		break;
 507	default:
 508		/* All other PHYs: internal and MoCA */
 509		goto force_link;
 510	}
 511
 512	/* If the link is down, just disable the interface to conserve power */
 513	if (!phydev->link) {
 514		reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
 515		reg &= ~RGMII_MODE_EN;
 516		reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
 517		goto force_link;
 518	}
 519
 520	/* Clear id_mode_dis bit, and the existing port mode, but
 521	 * make sure we enable the RGMII block for data to pass
 522	 */
 523	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
 524	reg &= ~ID_MODE_DIS;
 525	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
 526	reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
 527
 528	reg |= port_mode | RGMII_MODE_EN;
 529	if (id_mode_dis)
 530		reg |= ID_MODE_DIS;
 531
 532	if (phydev->pause) {
 533		if (phydev->asym_pause)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 534			reg |= TX_PAUSE_EN;
 535		reg |= RX_PAUSE_EN;
 536	}
 537
 538	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
 
 539
 540	pr_info("Port %d configured for %s\n", port, str);
 
 
 
 
 
 
 
 541
 542force_link:
 543	/* Force link settings detected from the PHY */
 544	reg = SW_OVERRIDE;
 545	switch (phydev->speed) {
 546	case SPEED_1000:
 547		reg |= SPDSTS_1000 << SPEED_SHIFT;
 548		break;
 549	case SPEED_100:
 550		reg |= SPDSTS_100 << SPEED_SHIFT;
 551		break;
 552	}
 553
 554	if (phydev->link)
 555		reg |= LINK_STS;
 556	if (phydev->duplex == DUPLEX_FULL)
 557		reg |= DUPLX_MODE;
 558
 
 
 
 
 
 559	core_writel(priv, reg, offset);
 560
 561	if (!phydev->is_pseudo_fixed_link)
 562		p->eee_enabled = b53_eee_init(ds, port, phydev);
 
 
 563}
 564
 565static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
 566					 struct fixed_phy_status *status)
 567{
 568	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 569	u32 duplex, pause, offset;
 570	u32 reg;
 571
 572	if (priv->type == BCM7445_DEVICE_ID)
 573		offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
 574	else
 575		offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
 576
 577	duplex = core_readl(priv, CORE_DUPSTS);
 578	pause = core_readl(priv, CORE_PAUSESTS);
 579
 580	status->link = 0;
 581
 582	/* MoCA port is special as we do not get link status from CORE_LNKSTS,
 583	 * which means that we need to force the link at the port override
 584	 * level to get the data to flow. We do use what the interrupt handler
 585	 * did determine before.
 586	 *
 587	 * For the other ports, we just force the link status, since this is
 588	 * a fixed PHY device.
 589	 */
 590	if (port == priv->moca_port) {
 591		status->link = priv->port_sts[port].link;
 592		/* For MoCA interfaces, also force a link down notification
 593		 * since some version of the user-space daemon (mocad) use
 594		 * cmd->autoneg to force the link, which messes up the PHY
 595		 * state machine and make it go in PHY_FORCING state instead.
 596		 */
 597		if (!status->link)
 598			netif_carrier_off(ds->ports[port].slave);
 599		status->duplex = 1;
 600	} else {
 601		status->link = 1;
 602		status->duplex = !!(duplex & (1 << port));
 603	}
 604
 605	reg = core_readl(priv, offset);
 606	reg |= SW_OVERRIDE;
 607	if (status->link)
 608		reg |= LINK_STS;
 609	else
 610		reg &= ~LINK_STS;
 611	core_writel(priv, reg, offset);
 612
 613	if ((pause & (1 << port)) &&
 614	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
 615		status->asym_pause = 1;
 616		status->pause = 1;
 617	}
 618
 619	if (pause & (1 << port))
 620		status->pause = 1;
 621}
 622
 623static void bcm_sf2_enable_acb(struct dsa_switch *ds)
 624{
 625	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 626	u32 reg;
 627
 628	/* Enable ACB globally */
 629	reg = acb_readl(priv, ACB_CONTROL);
 630	reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
 631	acb_writel(priv, reg, ACB_CONTROL);
 632	reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
 633	reg |= ACB_EN | ACB_ALGORITHM;
 634	acb_writel(priv, reg, ACB_CONTROL);
 635}
 636
 637static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
 638{
 639	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 640	unsigned int port;
 641
 642	bcm_sf2_intr_disable(priv);
 643
 644	/* Disable all ports physically present including the IMP
 645	 * port, the other ones have already been disabled during
 646	 * bcm_sf2_sw_setup
 647	 */
 648	for (port = 0; port < DSA_MAX_PORTS; port++) {
 649		if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
 650			bcm_sf2_port_disable(ds, port, NULL);
 651	}
 652
 
 
 
 653	return 0;
 654}
 655
 656static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 657{
 658	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 659	unsigned int port;
 660	int ret;
 661
 
 
 
 662	ret = bcm_sf2_sw_rst(priv);
 663	if (ret) {
 664		pr_err("%s: failed to software reset switch\n", __func__);
 665		return ret;
 666	}
 667
 
 
 
 
 
 
 668	if (priv->hw_params.num_gphy == 1)
 669		bcm_sf2_gphy_enable_set(ds, true);
 670
 671	for (port = 0; port < DSA_MAX_PORTS; port++) {
 672		if (dsa_is_user_port(ds, port))
 673			bcm_sf2_port_setup(ds, port, NULL);
 674		else if (dsa_is_cpu_port(ds, port))
 675			bcm_sf2_imp_setup(ds, port);
 676	}
 677
 678	bcm_sf2_enable_acb(ds);
 679
 680	return 0;
 681}
 682
 683static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
 684			       struct ethtool_wolinfo *wol)
 685{
 686	struct net_device *p = ds->ports[port].cpu_dp->master;
 687	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 688	struct ethtool_wolinfo pwol;
 689
 690	/* Get the parent device WoL settings */
 691	p->ethtool_ops->get_wol(p, &pwol);
 
 692
 693	/* Advertise the parent device supported settings */
 694	wol->supported = pwol.supported;
 695	memset(&wol->sopass, 0, sizeof(wol->sopass));
 696
 697	if (pwol.wolopts & WAKE_MAGICSECURE)
 698		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
 699
 700	if (priv->wol_ports_mask & (1 << port))
 701		wol->wolopts = pwol.wolopts;
 702	else
 703		wol->wolopts = 0;
 704}
 705
 706static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
 707			      struct ethtool_wolinfo *wol)
 708{
 709	struct net_device *p = ds->ports[port].cpu_dp->master;
 710	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 711	s8 cpu_port = ds->ports[port].cpu_dp->index;
 712	struct ethtool_wolinfo pwol;
 713
 714	p->ethtool_ops->get_wol(p, &pwol);
 
 715	if (wol->wolopts & ~pwol.supported)
 716		return -EINVAL;
 717
 718	if (wol->wolopts)
 719		priv->wol_ports_mask |= (1 << port);
 720	else
 721		priv->wol_ports_mask &= ~(1 << port);
 722
 723	/* If we have at least one port enabled, make sure the CPU port
 724	 * is also enabled. If the CPU port is the last one enabled, we disable
 725	 * it since this configuration does not make sense.
 726	 */
 727	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
 728		priv->wol_ports_mask |= (1 << cpu_port);
 729	else
 730		priv->wol_ports_mask &= ~(1 << cpu_port);
 731
 732	return p->ethtool_ops->set_wol(p, wol);
 733}
 734
 735static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 736{
 737	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 738	unsigned int port;
 739
 740	/* Enable all valid ports and disable those unused */
 741	for (port = 0; port < priv->hw_params.num_ports; port++) {
 742		/* IMP port receives special treatment */
 743		if (dsa_is_user_port(ds, port))
 744			bcm_sf2_port_setup(ds, port, NULL);
 745		else if (dsa_is_cpu_port(ds, port))
 746			bcm_sf2_imp_setup(ds, port);
 747		else
 748			bcm_sf2_port_disable(ds, port, NULL);
 749	}
 750
 751	b53_configure_vlan(ds);
 752	bcm_sf2_enable_acb(ds);
 753
 754	return 0;
 
 
 
 
 
 755}
 756
 757/* The SWITCH_CORE register space is managed by b53 but operates on a page +
 758 * register basis so we need to translate that into an address that the
 759 * bus-glue understands.
 760 */
 761#define SF2_PAGE_REG_MKADDR(page, reg)	((page) << 10 | (reg) << 2)
 762
 763static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
 764			      u8 *val)
 765{
 766	struct bcm_sf2_priv *priv = dev->priv;
 767
 768	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
 769
 770	return 0;
 771}
 772
 773static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
 774			       u16 *val)
 775{
 776	struct bcm_sf2_priv *priv = dev->priv;
 777
 778	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
 779
 780	return 0;
 781}
 782
 783static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
 784			       u32 *val)
 785{
 786	struct bcm_sf2_priv *priv = dev->priv;
 787
 788	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
 789
 790	return 0;
 791}
 792
 793static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
 794			       u64 *val)
 795{
 796	struct bcm_sf2_priv *priv = dev->priv;
 797
 798	*val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
 799
 800	return 0;
 801}
 802
 803static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
 804			       u8 value)
 805{
 806	struct bcm_sf2_priv *priv = dev->priv;
 807
 808	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
 809
 810	return 0;
 811}
 812
 813static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
 814				u16 value)
 815{
 816	struct bcm_sf2_priv *priv = dev->priv;
 817
 818	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
 819
 820	return 0;
 821}
 822
 823static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
 824				u32 value)
 825{
 826	struct bcm_sf2_priv *priv = dev->priv;
 827
 828	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
 829
 830	return 0;
 831}
 832
 833static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
 834				u64 value)
 835{
 836	struct bcm_sf2_priv *priv = dev->priv;
 837
 838	core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
 839
 840	return 0;
 841}
 842
 843static const struct b53_io_ops bcm_sf2_io_ops = {
 844	.read8	= bcm_sf2_core_read8,
 845	.read16	= bcm_sf2_core_read16,
 846	.read32	= bcm_sf2_core_read32,
 847	.read48	= bcm_sf2_core_read64,
 848	.read64	= bcm_sf2_core_read64,
 849	.write8	= bcm_sf2_core_write8,
 850	.write16 = bcm_sf2_core_write16,
 851	.write32 = bcm_sf2_core_write32,
 852	.write48 = bcm_sf2_core_write64,
 853	.write64 = bcm_sf2_core_write64,
 854};
 855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856static const struct dsa_switch_ops bcm_sf2_ops = {
 857	.get_tag_protocol	= b53_get_tag_protocol,
 858	.setup			= bcm_sf2_sw_setup,
 859	.get_strings		= b53_get_strings,
 860	.get_ethtool_stats	= b53_get_ethtool_stats,
 861	.get_sset_count		= b53_get_sset_count,
 
 
 862	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
 863	.adjust_link		= bcm_sf2_sw_adjust_link,
 864	.fixed_link_update	= bcm_sf2_sw_fixed_link_update,
 865	.suspend		= bcm_sf2_sw_suspend,
 866	.resume			= bcm_sf2_sw_resume,
 867	.get_wol		= bcm_sf2_sw_get_wol,
 868	.set_wol		= bcm_sf2_sw_set_wol,
 869	.port_enable		= bcm_sf2_port_setup,
 870	.port_disable		= bcm_sf2_port_disable,
 871	.get_mac_eee		= b53_get_mac_eee,
 872	.set_mac_eee		= b53_set_mac_eee,
 873	.port_bridge_join	= b53_br_join,
 874	.port_bridge_leave	= b53_br_leave,
 
 
 875	.port_stp_state_set	= b53_br_set_stp_state,
 876	.port_fast_age		= b53_br_fast_age,
 877	.port_vlan_filtering	= b53_vlan_filtering,
 878	.port_vlan_prepare	= b53_vlan_prepare,
 879	.port_vlan_add		= b53_vlan_add,
 880	.port_vlan_del		= b53_vlan_del,
 881	.port_fdb_dump		= b53_fdb_dump,
 882	.port_fdb_add		= b53_fdb_add,
 883	.port_fdb_del		= b53_fdb_del,
 884	.get_rxnfc		= bcm_sf2_get_rxnfc,
 885	.set_rxnfc		= bcm_sf2_set_rxnfc,
 886	.port_mirror_add	= b53_mirror_add,
 887	.port_mirror_del	= b53_mirror_del,
 
 
 888};
 889
 890struct bcm_sf2_of_data {
 891	u32 type;
 892	const u16 *reg_offsets;
 893	unsigned int core_reg_align;
 894	unsigned int num_cfp_rules;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895};
 896
 897/* Register offsets for the SWITCH_REG_* block */
 898static const u16 bcm_sf2_7445_reg_offsets[] = {
 899	[REG_SWITCH_CNTRL]	= 0x00,
 900	[REG_SWITCH_STATUS]	= 0x04,
 901	[REG_DIR_DATA_WRITE]	= 0x08,
 902	[REG_DIR_DATA_READ]	= 0x0C,
 903	[REG_SWITCH_REVISION]	= 0x18,
 904	[REG_PHY_REVISION]	= 0x1C,
 905	[REG_SPHY_CNTRL]	= 0x2C,
 906	[REG_RGMII_0_CNTRL]	= 0x34,
 907	[REG_RGMII_1_CNTRL]	= 0x40,
 908	[REG_RGMII_2_CNTRL]	= 0x4c,
 909	[REG_LED_0_CNTRL]	= 0x90,
 910	[REG_LED_1_CNTRL]	= 0x94,
 911	[REG_LED_2_CNTRL]	= 0x98,
 912};
 913
 914static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
 915	.type		= BCM7445_DEVICE_ID,
 916	.core_reg_align	= 0,
 917	.reg_offsets	= bcm_sf2_7445_reg_offsets,
 918	.num_cfp_rules	= 256,
 919};
 920
 921static const u16 bcm_sf2_7278_reg_offsets[] = {
 922	[REG_SWITCH_CNTRL]	= 0x00,
 923	[REG_SWITCH_STATUS]	= 0x04,
 924	[REG_DIR_DATA_WRITE]	= 0x08,
 925	[REG_DIR_DATA_READ]	= 0x0c,
 926	[REG_SWITCH_REVISION]	= 0x10,
 927	[REG_PHY_REVISION]	= 0x14,
 928	[REG_SPHY_CNTRL]	= 0x24,
 929	[REG_RGMII_0_CNTRL]	= 0xe0,
 930	[REG_RGMII_1_CNTRL]	= 0xec,
 931	[REG_RGMII_2_CNTRL]	= 0xf8,
 932	[REG_LED_0_CNTRL]	= 0x40,
 933	[REG_LED_1_CNTRL]	= 0x4c,
 934	[REG_LED_2_CNTRL]	= 0x58,
 935};
 936
 937static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
 938	.type		= BCM7278_DEVICE_ID,
 939	.core_reg_align	= 1,
 940	.reg_offsets	= bcm_sf2_7278_reg_offsets,
 941	.num_cfp_rules	= 128,
 942};
 943
 944static const struct of_device_id bcm_sf2_of_match[] = {
 
 
 
 945	{ .compatible = "brcm,bcm7445-switch-v4.0",
 946	  .data = &bcm_sf2_7445_data
 947	},
 948	{ .compatible = "brcm,bcm7278-switch-v4.0",
 949	  .data = &bcm_sf2_7278_data
 950	},
 951	{ .compatible = "brcm,bcm7278-switch-v4.8",
 952	  .data = &bcm_sf2_7278_data
 953	},
 954	{ /* sentinel */ },
 955};
 956MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
 957
 958static int bcm_sf2_sw_probe(struct platform_device *pdev)
 959{
 960	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
 961	struct device_node *dn = pdev->dev.of_node;
 962	const struct of_device_id *of_id = NULL;
 963	const struct bcm_sf2_of_data *data;
 964	struct b53_platform_data *pdata;
 965	struct dsa_switch_ops *ops;
 
 966	struct bcm_sf2_priv *priv;
 967	struct b53_device *dev;
 968	struct dsa_switch *ds;
 969	void __iomem **base;
 970	struct resource *r;
 971	unsigned int i;
 972	u32 reg, rev;
 973	int ret;
 974
 975	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 976	if (!priv)
 977		return -ENOMEM;
 978
 979	ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
 980	if (!ops)
 981		return -ENOMEM;
 982
 983	dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
 984	if (!dev)
 985		return -ENOMEM;
 986
 987	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 988	if (!pdata)
 989		return -ENOMEM;
 990
 991	of_id = of_match_node(bcm_sf2_of_match, dn);
 992	if (!of_id || !of_id->data)
 993		return -EINVAL;
 994
 995	data = of_id->data;
 996
 997	/* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
 998	priv->type = data->type;
 999	priv->reg_offsets = data->reg_offsets;
1000	priv->core_reg_align = data->core_reg_align;
1001	priv->num_cfp_rules = data->num_cfp_rules;
 
 
 
 
 
 
 
1002
1003	/* Auto-detection using standard registers will not work, so
1004	 * provide an indication of what kind of device we are for
1005	 * b53_common to work with
1006	 */
1007	pdata->chip_id = priv->type;
1008	dev->pdata = pdata;
1009
1010	priv->dev = dev;
1011	ds = dev->ds;
1012	ds->ops = &bcm_sf2_ops;
 
1013
1014	/* Advertise the 8 egress queues */
1015	ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1016
1017	dev_set_drvdata(&pdev->dev, priv);
1018
1019	spin_lock_init(&priv->indir_lock);
1020	mutex_init(&priv->stats_mutex);
1021	mutex_init(&priv->cfp.lock);
 
1022
1023	/* CFP rule #0 cannot be used for specific classifications, flag it as
1024	 * permanently used
1025	 */
1026	set_bit(0, priv->cfp.used);
1027	set_bit(0, priv->cfp.unique);
1028
1029	bcm_sf2_identify_ports(priv, dn->child);
 
 
 
 
 
 
1030
1031	priv->irq0 = irq_of_parse_and_map(dn, 0);
1032	priv->irq1 = irq_of_parse_and_map(dn, 1);
1033
1034	base = &priv->core;
1035	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1036		r = platform_get_resource(pdev, IORESOURCE_MEM, i);
1037		*base = devm_ioremap_resource(&pdev->dev, r);
1038		if (IS_ERR(*base)) {
1039			pr_err("unable to find register: %s\n", reg_names[i]);
1040			return PTR_ERR(*base);
1041		}
1042		base++;
1043	}
1044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045	ret = bcm_sf2_sw_rst(priv);
1046	if (ret) {
1047		pr_err("unable to software reset switch: %d\n", ret);
1048		return ret;
1049	}
1050
 
 
 
 
1051	ret = bcm_sf2_mdio_register(ds);
1052	if (ret) {
1053		pr_err("failed to register MDIO bus\n");
1054		return ret;
1055	}
1056
 
 
1057	ret = bcm_sf2_cfp_rst(priv);
1058	if (ret) {
1059		pr_err("failed to reset CFP\n");
1060		goto out_mdio;
1061	}
1062
1063	/* Disable all interrupts and request them */
1064	bcm_sf2_intr_disable(priv);
1065
1066	ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1067			       "switch_0", priv);
1068	if (ret < 0) {
1069		pr_err("failed to request switch_0 IRQ\n");
1070		goto out_mdio;
1071	}
1072
1073	ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1074			       "switch_1", priv);
1075	if (ret < 0) {
1076		pr_err("failed to request switch_1 IRQ\n");
1077		goto out_mdio;
1078	}
1079
1080	/* Reset the MIB counters */
1081	reg = core_readl(priv, CORE_GMNCFGCFG);
1082	reg |= RST_MIB_CNT;
1083	core_writel(priv, reg, CORE_GMNCFGCFG);
1084	reg &= ~RST_MIB_CNT;
1085	core_writel(priv, reg, CORE_GMNCFGCFG);
1086
1087	/* Get the maximum number of ports for this switch */
1088	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1089	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1090		priv->hw_params.num_ports = DSA_MAX_PORTS;
1091
1092	/* Assume a single GPHY setup if we can't read that property */
1093	if (of_property_read_u32(dn, "brcm,num-gphy",
1094				 &priv->hw_params.num_gphy))
1095		priv->hw_params.num_gphy = 1;
1096
1097	rev = reg_readl(priv, REG_SWITCH_REVISION);
1098	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1099					SWITCH_TOP_REV_MASK;
1100	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1101
1102	rev = reg_readl(priv, REG_PHY_REVISION);
1103	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1104
1105	ret = b53_switch_register(dev);
1106	if (ret)
1107		goto out_mdio;
1108
1109	pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1110		priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1111		priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1112		priv->core, priv->irq0, priv->irq1);
 
1113
1114	return 0;
1115
1116out_mdio:
1117	bcm_sf2_mdio_unregister(priv);
 
 
 
 
1118	return ret;
1119}
1120
1121static int bcm_sf2_sw_remove(struct platform_device *pdev)
1122{
1123	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1124
1125	/* Disable all ports and interrupts */
 
 
1126	priv->wol_ports_mask = 0;
1127	bcm_sf2_sw_suspend(priv->dev->ds);
 
1128	dsa_unregister_switch(priv->dev->ds);
 
1129	bcm_sf2_mdio_unregister(priv);
1130
1131	return 0;
 
 
1132}
1133
1134static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1135{
1136	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1137
 
 
 
1138	/* For a kernel about to be kexec'd we want to keep the GPHY on for a
1139	 * successful MDIO bus scan to occur. If we did turn off the GPHY
1140	 * before (e.g: port_disable), this will also power it back on.
1141	 *
1142	 * Do not rely on kexec_in_progress, just power the PHY on.
1143	 */
1144	if (priv->hw_params.num_gphy == 1)
1145		bcm_sf2_gphy_enable_set(priv->dev->ds, true);
 
 
 
 
1146}
1147
1148#ifdef CONFIG_PM_SLEEP
1149static int bcm_sf2_suspend(struct device *dev)
1150{
1151	struct platform_device *pdev = to_platform_device(dev);
1152	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1153
1154	return dsa_switch_suspend(priv->dev->ds);
1155}
1156
1157static int bcm_sf2_resume(struct device *dev)
1158{
1159	struct platform_device *pdev = to_platform_device(dev);
1160	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1161
1162	return dsa_switch_resume(priv->dev->ds);
1163}
1164#endif /* CONFIG_PM_SLEEP */
1165
1166static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1167			 bcm_sf2_suspend, bcm_sf2_resume);
1168
1169
1170static struct platform_driver bcm_sf2_driver = {
1171	.probe	= bcm_sf2_sw_probe,
1172	.remove	= bcm_sf2_sw_remove,
1173	.shutdown = bcm_sf2_sw_shutdown,
1174	.driver = {
1175		.name = "brcm-sf2",
1176		.of_match_table = bcm_sf2_of_match,
1177		.pm = &bcm_sf2_pm_ops,
1178	},
1179};
1180module_platform_driver(bcm_sf2_driver);
1181
1182MODULE_AUTHOR("Broadcom Corporation");
1183MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1184MODULE_LICENSE("GPL");
1185MODULE_ALIAS("platform:brcm-sf2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Broadcom Starfighter 2 DSA switch driver
   4 *
   5 * Copyright (C) 2014, Broadcom Corporation
 
 
 
 
 
   6 */
   7
   8#include <linux/list.h>
   9#include <linux/module.h>
  10#include <linux/netdevice.h>
  11#include <linux/interrupt.h>
  12#include <linux/platform_device.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/phylink.h>
  16#include <linux/mii.h>
  17#include <linux/clk.h>
  18#include <linux/of.h>
  19#include <linux/of_irq.h>
  20#include <linux/of_address.h>
  21#include <linux/of_net.h>
  22#include <linux/of_mdio.h>
  23#include <net/dsa.h>
  24#include <linux/ethtool.h>
  25#include <linux/if_bridge.h>
  26#include <linux/brcmphy.h>
  27#include <linux/etherdevice.h>
  28#include <linux/platform_data/b53.h>
  29
  30#include "bcm_sf2.h"
  31#include "bcm_sf2_regs.h"
  32#include "b53/b53_priv.h"
  33#include "b53/b53_regs.h"
  34
  35static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
  36{
  37	switch (priv->type) {
  38	case BCM4908_DEVICE_ID:
  39		switch (port) {
  40		case 7:
  41			return REG_RGMII_11_CNTRL;
  42		default:
  43			break;
  44		}
  45		break;
  46	default:
  47		switch (port) {
  48		case 0:
  49			return REG_RGMII_0_CNTRL;
  50		case 1:
  51			return REG_RGMII_1_CNTRL;
  52		case 2:
  53			return REG_RGMII_2_CNTRL;
  54		default:
  55			break;
  56		}
  57	}
  58
  59	WARN_ONCE(1, "Unsupported port %d\n", port);
  60
  61	/* RO fallback reg */
  62	return REG_SWITCH_STATUS;
  63}
  64
  65static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
  66{
  67	switch (port) {
  68	case 0:
  69		return REG_LED_0_CNTRL;
  70	case 1:
  71		return REG_LED_1_CNTRL;
  72	case 2:
  73		return REG_LED_2_CNTRL;
  74	}
  75
  76	switch (priv->type) {
  77	case BCM4908_DEVICE_ID:
  78		switch (port) {
  79		case 3:
  80			return REG_LED_3_CNTRL;
  81		case 7:
  82			return REG_LED_4_CNTRL;
  83		default:
  84			break;
  85		}
  86		break;
  87	default:
  88		break;
  89	}
  90
  91	WARN_ONCE(1, "Unsupported port %d\n", port);
  92
  93	/* RO fallback reg */
  94	return REG_SWITCH_STATUS;
  95}
  96
  97static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
  98{
  99	switch (priv->type) {
 100	case BCM4908_DEVICE_ID:
 101	case BCM7445_DEVICE_ID:
 102		return port == 8 ? CORE_STS_OVERRIDE_IMP :
 103				   CORE_STS_OVERRIDE_GMIIP_PORT(port);
 104	case BCM7278_DEVICE_ID:
 105		return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
 106				   CORE_STS_OVERRIDE_GMIIP2_PORT(port);
 107	default:
 108		WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
 109	}
 110
 111	/* RO fallback register */
 112	return REG_SWITCH_STATUS;
 113}
 114
 115/* Return the number of active ports, not counting the IMP (CPU) port */
 116static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
 117{
 118	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 119	unsigned int port, count = 0;
 120
 121	for (port = 0; port < ds->num_ports; port++) {
 122		if (dsa_is_cpu_port(ds, port))
 123			continue;
 124		if (priv->port_sts[port].enabled)
 125			count++;
 126	}
 127
 128	return count;
 129}
 130
 131static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
 132{
 133	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 134	unsigned long new_rate;
 135	unsigned int ports_active;
 136	/* Frequenty in Mhz */
 137	static const unsigned long rate_table[] = {
 138		59220000,
 139		60820000,
 140		62500000,
 141		62500000,
 142	};
 143
 144	ports_active = bcm_sf2_num_active_ports(ds);
 145	if (ports_active == 0 || !priv->clk_mdiv)
 146		return;
 147
 148	/* If we overflow our table, just use the recommended operational
 149	 * frequency
 150	 */
 151	if (ports_active > ARRAY_SIZE(rate_table))
 152		new_rate = 90000000;
 153	else
 154		new_rate = rate_table[ports_active - 1];
 155	clk_set_rate(priv->clk_mdiv, new_rate);
 156}
 157
 158static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 159{
 160	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 161	unsigned int i;
 162	u32 reg;
 163
 164	/* Enable the port memories */
 165	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
 166	reg &= ~P_TXQ_PSM_VDD(port);
 167	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 168
 
 
 
 
 
 
 169	/* Enable forwarding */
 170	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
 171
 172	/* Enable IMP port in dumb mode */
 173	reg = core_readl(priv, CORE_SWITCH_CTRL);
 174	reg |= MII_DUMB_FWDG_EN;
 175	core_writel(priv, reg, CORE_SWITCH_CTRL);
 176
 177	/* Configure Traffic Class to QoS mapping, allow each priority to map
 178	 * to a different queue number
 179	 */
 180	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
 181	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
 182		reg |= i << (PRT_TO_QID_SHIFT * i);
 183	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
 184
 185	b53_brcm_hdr_setup(ds, port);
 186
 187	if (port == 8) {
 188		/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
 189		reg = core_readl(priv, CORE_IMP_CTL);
 190		reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
 191		reg &= ~(RX_DIS | TX_DIS);
 192		core_writel(priv, reg, CORE_IMP_CTL);
 193	} else {
 194		reg = core_readl(priv, CORE_G_PCTL_PORT(port));
 195		reg &= ~(RX_DIS | TX_DIS);
 196		core_writel(priv, reg, CORE_G_PCTL_PORT(port));
 197	}
 198
 199	priv->port_sts[port].enabled = true;
 200}
 201
 202static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
 203{
 204	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 205	u32 reg;
 206
 207	reg = reg_readl(priv, REG_SPHY_CNTRL);
 208	if (enable) {
 209		reg |= PHY_RESET;
 210		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
 211		reg_writel(priv, reg, REG_SPHY_CNTRL);
 212		udelay(21);
 213		reg = reg_readl(priv, REG_SPHY_CNTRL);
 214		reg &= ~PHY_RESET;
 215	} else {
 216		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
 217		reg_writel(priv, reg, REG_SPHY_CNTRL);
 218		mdelay(1);
 219		reg |= CK25_DIS;
 220	}
 221	reg_writel(priv, reg, REG_SPHY_CNTRL);
 222
 223	/* Use PHY-driven LED signaling */
 224	if (!enable) {
 225		u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
 226
 227		if (priv->type == BCM7278_DEVICE_ID ||
 228		    priv->type == BCM7445_DEVICE_ID) {
 229			reg = reg_led_readl(priv, led_ctrl, 0);
 230			reg |= LED_CNTRL_SPDLNK_SRC_SEL;
 231			reg_led_writel(priv, reg, led_ctrl, 0);
 232		}
 233	}
 234}
 235
 236static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
 237					    int port)
 238{
 239	unsigned int off;
 240
 241	switch (port) {
 242	case 7:
 243		off = P7_IRQ_OFF;
 244		break;
 245	case 0:
 246		/* Port 0 interrupts are located on the first bank */
 247		intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
 248		return;
 249	default:
 250		off = P_IRQ_OFF(port);
 251		break;
 252	}
 253
 254	intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
 255}
 256
 257static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
 258					     int port)
 259{
 260	unsigned int off;
 261
 262	switch (port) {
 263	case 7:
 264		off = P7_IRQ_OFF;
 265		break;
 266	case 0:
 267		/* Port 0 interrupts are located on the first bank */
 268		intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
 269		intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
 270		return;
 271	default:
 272		off = P_IRQ_OFF(port);
 273		break;
 274	}
 275
 276	intrl2_1_mask_set(priv, P_IRQ_MASK(off));
 277	intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
 278}
 279
 280static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
 281			      struct phy_device *phy)
 282{
 283	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 284	unsigned int i;
 285	u32 reg;
 286
 287	if (!dsa_is_user_port(ds, port))
 288		return 0;
 289
 290	priv->port_sts[port].enabled = true;
 291
 292	bcm_sf2_recalc_clock(ds);
 293
 294	/* Clear the memory power down */
 295	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
 296	reg &= ~P_TXQ_PSM_VDD(port);
 297	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 298
 299	/* Enable Broadcom tags for that port if requested */
 300	if (priv->brcm_tag_mask & BIT(port))
 301		b53_brcm_hdr_setup(ds, port);
 302
 303	/* Configure Traffic Class to QoS mapping, allow each priority to map
 304	 * to a different queue number
 305	 */
 306	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
 307	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
 308		reg |= i << (PRT_TO_QID_SHIFT * i);
 309	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
 310
 311	/* Re-enable the GPHY and re-apply workarounds */
 312	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
 313		bcm_sf2_gphy_enable_set(ds, true);
 314		if (phy) {
 315			/* if phy_stop() has been called before, phy
 316			 * will be in halted state, and phy_start()
 317			 * will call resume.
 318			 *
 319			 * the resume path does not configure back
 320			 * autoneg settings, and since we hard reset
 321			 * the phy manually here, we need to reset the
 322			 * state machine also.
 323			 */
 324			phy->state = PHY_READY;
 325			phy_init_hw(phy);
 326		}
 327	}
 328
 329	/* Enable MoCA port interrupts to get notified */
 330	if (port == priv->moca_port)
 331		bcm_sf2_port_intr_enable(priv, port);
 332
 333	/* Set per-queue pause threshold to 32 */
 334	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
 335
 336	/* Set ACB threshold to 24 */
 337	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
 338		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
 339						    SF2_NUM_EGRESS_QUEUES + i));
 340		reg &= ~XOFF_THRESHOLD_MASK;
 341		reg |= 24;
 342		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
 343						    SF2_NUM_EGRESS_QUEUES + i));
 344	}
 345
 346	return b53_enable_port(ds, port, phy);
 347}
 348
 349static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
 
 350{
 351	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 352	u32 reg;
 353
 354	/* Disable learning while in WoL mode */
 355	if (priv->wol_ports_mask & (1 << port)) {
 356		reg = core_readl(priv, CORE_DIS_LEARN);
 357		reg |= BIT(port);
 358		core_writel(priv, reg, CORE_DIS_LEARN);
 359		return;
 360	}
 361
 362	if (port == priv->moca_port)
 363		bcm_sf2_port_intr_disable(priv, port);
 364
 365	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
 366		bcm_sf2_gphy_enable_set(ds, false);
 367
 368	b53_disable_port(ds, port);
 
 
 
 
 
 369
 370	/* Power down the port memory */
 371	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
 372	reg |= P_TXQ_PSM_VDD(port);
 373	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 374
 375	priv->port_sts[port].enabled = false;
 376
 377	bcm_sf2_recalc_clock(ds);
 378}
 379
 380
 381static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
 382			       int regnum, u16 val)
 383{
 384	int ret = 0;
 385	u32 reg;
 386
 387	reg = reg_readl(priv, REG_SWITCH_CNTRL);
 388	reg |= MDIO_MASTER_SEL;
 389	reg_writel(priv, reg, REG_SWITCH_CNTRL);
 390
 391	/* Page << 8 | offset */
 392	reg = 0x70;
 393	reg <<= 2;
 394	core_writel(priv, addr, reg);
 395
 396	/* Page << 8 | offset */
 397	reg = 0x80 << 8 | regnum << 1;
 398	reg <<= 2;
 399
 400	if (op)
 401		ret = core_readl(priv, reg);
 402	else
 403		core_writel(priv, val, reg);
 404
 405	reg = reg_readl(priv, REG_SWITCH_CNTRL);
 406	reg &= ~MDIO_MASTER_SEL;
 407	reg_writel(priv, reg, REG_SWITCH_CNTRL);
 408
 409	return ret & 0xffff;
 410}
 411
 412static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
 413{
 414	struct bcm_sf2_priv *priv = bus->priv;
 415
 416	/* Intercept reads from Broadcom pseudo-PHY address, else, send
 417	 * them to our master MDIO bus controller
 418	 */
 419	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
 420		return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
 421	else
 422		return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
 423}
 424
 425static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
 426				 u16 val)
 427{
 428	struct bcm_sf2_priv *priv = bus->priv;
 429
 430	/* Intercept writes to the Broadcom pseudo-PHY address, else,
 431	 * send them to our master MDIO bus controller
 432	 */
 433	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
 434		return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
 435	else
 436		return mdiobus_write_nested(priv->master_mii_bus, addr,
 437				regnum, val);
 
 438}
 439
 440static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
 441{
 442	struct dsa_switch *ds = dev_id;
 443	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 444
 445	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
 446				~priv->irq0_mask;
 447	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 448
 449	return IRQ_HANDLED;
 450}
 451
 452static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
 453{
 454	struct dsa_switch *ds = dev_id;
 455	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 456
 457	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
 458				~priv->irq1_mask;
 459	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 460
 461	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
 462		priv->port_sts[7].link = true;
 463		dsa_port_phylink_mac_change(ds, 7, true);
 464	}
 465	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
 466		priv->port_sts[7].link = false;
 467		dsa_port_phylink_mac_change(ds, 7, false);
 468	}
 469
 470	return IRQ_HANDLED;
 471}
 472
 473static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
 474{
 475	unsigned int timeout = 1000;
 476	u32 reg;
 477	int ret;
 478
 479	/* The watchdog reset does not work on 7278, we need to hit the
 480	 * "external" reset line through the reset controller.
 481	 */
 482	if (priv->type == BCM7278_DEVICE_ID) {
 483		ret = reset_control_assert(priv->rcdev);
 484		if (ret)
 485			return ret;
 486
 487		return reset_control_deassert(priv->rcdev);
 488	}
 489
 490	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
 491	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
 492	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
 493
 494	do {
 495		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
 496		if (!(reg & SOFTWARE_RESET))
 497			break;
 498
 499		usleep_range(1000, 2000);
 500	} while (timeout-- > 0);
 501
 502	if (timeout == 0)
 503		return -ETIMEDOUT;
 504
 505	return 0;
 506}
 507
 508static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
 509{
 510	struct device *dev = priv->dev->ds->dev;
 511	int shift;
 512	u32 mask;
 513	u32 reg;
 514	int i;
 515
 516	mask = BIT(priv->num_crossbar_ext_bits) - 1;
 517
 518	reg = reg_readl(priv, REG_CROSSBAR);
 519	switch (priv->type) {
 520	case BCM4908_DEVICE_ID:
 521		shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_ext_bits;
 522		reg &= ~(mask << shift);
 523		if (0) /* FIXME */
 524			reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
 525		else if (priv->int_phy_mask & BIT(7))
 526			reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift;
 527		else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode))
 528			reg |= CROSSBAR_BCM4908_EXT_RGMII << shift;
 529		else if (WARN(1, "Invalid port mode\n"))
 530			return;
 531		break;
 532	default:
 533		return;
 534	}
 535	reg_writel(priv, reg, REG_CROSSBAR);
 536
 537	reg = reg_readl(priv, REG_CROSSBAR);
 538	for (i = 0; i < priv->num_crossbar_int_ports; i++) {
 539		shift = i * priv->num_crossbar_ext_bits;
 540
 541		dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
 542			(reg >> shift) & mask);
 543	}
 544}
 545
 546static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
 547{
 548	intrl2_0_mask_set(priv, 0xffffffff);
 549	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
 550	intrl2_1_mask_set(priv, 0xffffffff);
 551	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
 552}
 553
 554static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
 555				   struct device_node *dn)
 556{
 557	struct device *dev = priv->dev->ds->dev;
 558	struct bcm_sf2_port_status *port_st;
 559	struct device_node *port;
 
 560	unsigned int port_num;
 561	struct property *prop;
 562	int err;
 563
 564	priv->moca_port = -1;
 565
 566	for_each_available_child_of_node(dn, port) {
 567		if (of_property_read_u32(port, "reg", &port_num))
 568			continue;
 569
 570		if (port_num >= DSA_MAX_PORTS) {
 571			dev_err(dev, "Invalid port number %d\n", port_num);
 572			continue;
 573		}
 574
 575		port_st = &priv->port_sts[port_num];
 576
 577		/* Internal PHYs get assigned a specific 'phy-mode' property
 578		 * value: "internal" to help flag them before MDIO probing
 579		 * has completed, since they might be turned off at that
 580		 * time
 581		 */
 582		err = of_get_phy_mode(port, &port_st->mode);
 583		if (err)
 584			continue;
 585
 586		if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
 587			priv->int_phy_mask |= 1 << port_num;
 588
 589		if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
 590			priv->moca_port = port_num;
 591
 592		if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
 593			priv->brcm_tag_mask |= 1 << port_num;
 594
 595		/* Ensure that port 5 is not picked up as a DSA CPU port
 596		 * flavour but a regular port instead. We should be using
 597		 * devlink to be able to set the port flavour.
 598		 */
 599		if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
 600			prop = of_find_property(port, "ethernet", NULL);
 601			if (prop)
 602				of_remove_property(port, prop);
 603		}
 604	}
 605}
 606
 607static int bcm_sf2_mdio_register(struct dsa_switch *ds)
 608{
 609	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 610	struct device_node *dn, *child;
 611	struct phy_device *phydev;
 612	struct property *prop;
 613	static int index;
 614	int err, reg;
 615
 616	/* Find our integrated MDIO bus node */
 617	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
 618	priv->master_mii_bus = of_mdio_find_bus(dn);
 619	if (!priv->master_mii_bus) {
 620		err = -EPROBE_DEFER;
 621		goto err_of_node_put;
 622	}
 623
 624	priv->user_mii_bus = mdiobus_alloc();
 625	if (!priv->user_mii_bus) {
 626		err = -ENOMEM;
 627		goto err_put_master_mii_bus_dev;
 628	}
 629
 630	priv->user_mii_bus->priv = priv;
 631	priv->user_mii_bus->name = "sf2 user mii";
 632	priv->user_mii_bus->read = bcm_sf2_sw_mdio_read;
 633	priv->user_mii_bus->write = bcm_sf2_sw_mdio_write;
 634	snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
 635		 index++);
 
 636
 637	/* Include the pseudo-PHY address to divert reads towards our
 638	 * workaround. This is only required for 7445D0, since 7445E0
 639	 * disconnects the internal switch pseudo-PHY such that we can use the
 640	 * regular SWITCH_MDIO master controller instead.
 641	 *
 642	 * Here we flag the pseudo PHY as needing special treatment and would
 643	 * otherwise make all other PHY read/writes go to the master MDIO bus
 644	 * controller that comes with this switch backed by the "mdio-unimac"
 645	 * driver.
 646	 */
 647	if (of_machine_is_compatible("brcm,bcm7445d0"))
 648		priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
 649	else
 650		priv->indir_phy_mask = 0;
 651
 652	ds->phys_mii_mask = priv->indir_phy_mask;
 653	ds->user_mii_bus = priv->user_mii_bus;
 654	priv->user_mii_bus->parent = ds->dev->parent;
 655	priv->user_mii_bus->phy_mask = ~priv->indir_phy_mask;
 656
 657	/* We need to make sure that of_phy_connect() will not work by
 658	 * removing the 'phandle' and 'linux,phandle' properties and
 659	 * unregister the existing PHY device that was already registered.
 660	 */
 661	for_each_available_child_of_node(dn, child) {
 662		if (of_property_read_u32(child, "reg", &reg) ||
 663		    reg >= PHY_MAX_ADDR)
 664			continue;
 665
 666		if (!(priv->indir_phy_mask & BIT(reg)))
 667			continue;
 668
 669		prop = of_find_property(child, "phandle", NULL);
 670		if (prop)
 671			of_remove_property(child, prop);
 672
 673		prop = of_find_property(child, "linux,phandle", NULL);
 674		if (prop)
 675			of_remove_property(child, prop);
 676
 677		phydev = of_phy_find_device(child);
 678		if (phydev) {
 679			phy_device_remove(phydev);
 680			phy_device_free(phydev);
 681		}
 682	}
 683
 684	err = mdiobus_register(priv->user_mii_bus);
 685	if (err)
 686		goto err_free_user_mii_bus;
 687
 688	of_node_put(dn);
 689
 690	return 0;
 691
 692err_free_user_mii_bus:
 693	mdiobus_free(priv->user_mii_bus);
 694err_put_master_mii_bus_dev:
 695	put_device(&priv->master_mii_bus->dev);
 696err_of_node_put:
 697	of_node_put(dn);
 698	return err;
 699}
 700
 701static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
 702{
 703	mdiobus_unregister(priv->user_mii_bus);
 704	mdiobus_free(priv->user_mii_bus);
 705	put_device(&priv->master_mii_bus->dev);
 706}
 707
 708static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
 709{
 710	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 711
 712	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
 713	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
 714	 * the REG_PHY_REVISION register layout is.
 715	 */
 716	if (priv->int_phy_mask & BIT(port))
 717		return priv->hw_params.gphy_rev;
 718	else
 719		return PHY_BRCM_AUTO_PWRDWN_ENABLE |
 720		       PHY_BRCM_DIS_TXCRXC_NOENRGY |
 721		       PHY_BRCM_IDDQ_SUSPEND;
 722}
 723
 724static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
 725				struct phylink_config *config)
 726{
 727	unsigned long *interfaces = config->supported_interfaces;
 728	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 729
 730	if (priv->int_phy_mask & BIT(port)) {
 731		__set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
 732	} else if (priv->moca_port == port) {
 733		__set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
 734	} else {
 735		__set_bit(PHY_INTERFACE_MODE_MII, interfaces);
 736		__set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
 737		__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
 738		phy_interface_set_rgmii(interfaces);
 739	}
 740
 741	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
 742		MAC_10 | MAC_100 | MAC_1000;
 743}
 744
 745static void bcm_sf2_sw_mac_config(struct phylink_config *config,
 746				  unsigned int mode,
 747				  const struct phylink_link_state *state)
 748{
 749	struct dsa_port *dp = dsa_phylink_to_port(config);
 750	u32 id_mode_dis = 0, port_mode;
 751	struct bcm_sf2_priv *priv;
 752	u32 reg_rgmii_ctrl;
 753	u32 reg;
 754
 755	priv = bcm_sf2_to_priv(dp->ds);
 
 
 
 756
 757	if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID))
 758		return;
 759
 760	switch (state->interface) {
 761	case PHY_INTERFACE_MODE_RGMII:
 
 762		id_mode_dis = 1;
 763		fallthrough;
 764	case PHY_INTERFACE_MODE_RGMII_TXID:
 
 
 765		port_mode = EXT_GPHY;
 766		break;
 767	case PHY_INTERFACE_MODE_MII:
 
 768		port_mode = EXT_EPHY;
 769		break;
 770	case PHY_INTERFACE_MODE_REVMII:
 
 771		port_mode = EXT_REVMII;
 772		break;
 773	default:
 774		/* Nothing required for all other PHYs: internal and MoCA */
 775		return;
 776	}
 777
 778	reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index);
 
 
 
 
 
 
 779
 780	/* Clear id_mode_dis bit, and the existing port mode, let
 781	 * RGMII_MODE_EN bet set by mac_link_{up,down}
 782	 */
 783	reg = reg_readl(priv, reg_rgmii_ctrl);
 784	reg &= ~ID_MODE_DIS;
 785	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
 
 786
 787	reg |= port_mode;
 788	if (id_mode_dis)
 789		reg |= ID_MODE_DIS;
 790
 791	reg_writel(priv, reg, reg_rgmii_ctrl);
 792}
 793
 794static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
 795				    phy_interface_t interface, bool link)
 796{
 797	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 798	u32 reg_rgmii_ctrl;
 799	u32 reg;
 800
 801	if (!phy_interface_mode_is_rgmii(interface) &&
 802	    interface != PHY_INTERFACE_MODE_MII &&
 803	    interface != PHY_INTERFACE_MODE_REVMII)
 804		return;
 805
 806	reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
 807
 808	/* If the link is down, just disable the interface to conserve power */
 809	reg = reg_readl(priv, reg_rgmii_ctrl);
 810	if (link)
 811		reg |= RGMII_MODE_EN;
 812	else
 813		reg &= ~RGMII_MODE_EN;
 814	reg_writel(priv, reg, reg_rgmii_ctrl);
 815}
 816
 817static void bcm_sf2_sw_mac_link_down(struct phylink_config *config,
 818				     unsigned int mode,
 819				     phy_interface_t interface)
 820{
 821	struct dsa_port *dp = dsa_phylink_to_port(config);
 822	struct bcm_sf2_priv *priv;
 823	int port = dp->index;
 824	u32 reg, offset;
 825
 826	priv = bcm_sf2_to_priv(dp->ds);
 827	if (priv->wol_ports_mask & BIT(port))
 828		return;
 829
 830	offset = bcm_sf2_port_override_offset(priv, port);
 831	reg = core_readl(priv, offset);
 832	reg &= ~LINK_STS;
 833	core_writel(priv, reg, offset);
 834
 835	bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false);
 836}
 837
 838static void bcm_sf2_sw_mac_link_up(struct phylink_config *config,
 839				   struct phy_device *phydev,
 840				   unsigned int mode,
 841				   phy_interface_t interface,
 842				   int speed, int duplex,
 843				   bool tx_pause, bool rx_pause)
 844{
 845	struct dsa_port *dp = dsa_phylink_to_port(config);
 846	struct bcm_sf2_priv *priv;
 847	u32 reg_rgmii_ctrl = 0;
 848	struct ethtool_keee *p;
 849	int port = dp->index;
 850	u32 reg, offset;
 851
 852	bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true);
 853
 854	priv = bcm_sf2_to_priv(dp->ds);
 855	offset = bcm_sf2_port_override_offset(priv, port);
 856
 857	if (phy_interface_mode_is_rgmii(interface) ||
 858	    interface == PHY_INTERFACE_MODE_MII ||
 859	    interface == PHY_INTERFACE_MODE_REVMII) {
 860		reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
 861		reg = reg_readl(priv, reg_rgmii_ctrl);
 862		reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
 863
 864		if (tx_pause)
 865			reg |= TX_PAUSE_EN;
 866		if (rx_pause)
 867			reg |= RX_PAUSE_EN;
 868
 869		reg_writel(priv, reg, reg_rgmii_ctrl);
 870	}
 871
 872	reg = LINK_STS;
 873	if (port == 8) {
 874		if (priv->type == BCM4908_DEVICE_ID)
 875			reg |= GMII_SPEED_UP_2G;
 876		reg |= MII_SW_OR;
 877	} else {
 878		reg |= SW_OVERRIDE;
 879	}
 880
 881	switch (speed) {
 
 
 
 882	case SPEED_1000:
 883		reg |= SPDSTS_1000 << SPEED_SHIFT;
 884		break;
 885	case SPEED_100:
 886		reg |= SPDSTS_100 << SPEED_SHIFT;
 887		break;
 888	}
 889
 890	if (duplex == DUPLEX_FULL)
 
 
 891		reg |= DUPLX_MODE;
 892
 893	if (tx_pause)
 894		reg |= TXFLOW_CNTL;
 895	if (rx_pause)
 896		reg |= RXFLOW_CNTL;
 897
 898	core_writel(priv, reg, offset);
 899
 900	if (mode == MLO_AN_PHY && phydev) {
 901		p = &priv->dev->ports[port].eee;
 902		p->eee_enabled = b53_eee_init(dp->ds, port, phydev);
 903	}
 904}
 905
 906static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
 907				   struct phylink_link_state *status)
 908{
 909	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 
 
 
 
 
 
 
 
 
 
 910
 911	status->link = false;
 912
 913	/* MoCA port is special as we do not get link status from CORE_LNKSTS,
 914	 * which means that we need to force the link at the port override
 915	 * level to get the data to flow. We do use what the interrupt handler
 916	 * did determine before.
 917	 *
 918	 * For the other ports, we just force the link status, since this is
 919	 * a fixed PHY device.
 920	 */
 921	if (port == priv->moca_port) {
 922		status->link = priv->port_sts[port].link;
 923		/* For MoCA interfaces, also force a link down notification
 924		 * since some version of the user-space daemon (mocad) use
 925		 * cmd->autoneg to force the link, which messes up the PHY
 926		 * state machine and make it go in PHY_FORCING state instead.
 927		 */
 928		if (!status->link)
 929			netif_carrier_off(dsa_to_port(ds, port)->user);
 930		status->duplex = DUPLEX_FULL;
 931	} else {
 932		status->link = true;
 
 933	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934}
 935
 936static void bcm_sf2_enable_acb(struct dsa_switch *ds)
 937{
 938	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 939	u32 reg;
 940
 941	/* Enable ACB globally */
 942	reg = acb_readl(priv, ACB_CONTROL);
 943	reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
 944	acb_writel(priv, reg, ACB_CONTROL);
 945	reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
 946	reg |= ACB_EN | ACB_ALGORITHM;
 947	acb_writel(priv, reg, ACB_CONTROL);
 948}
 949
 950static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
 951{
 952	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 953	unsigned int port;
 954
 955	bcm_sf2_intr_disable(priv);
 956
 957	/* Disable all ports physically present including the IMP
 958	 * port, the other ones have already been disabled during
 959	 * bcm_sf2_sw_setup
 960	 */
 961	for (port = 0; port < ds->num_ports; port++) {
 962		if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
 963			bcm_sf2_port_disable(ds, port);
 964	}
 965
 966	if (!priv->wol_ports_mask)
 967		clk_disable_unprepare(priv->clk);
 968
 969	return 0;
 970}
 971
 972static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 973{
 974	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 
 975	int ret;
 976
 977	if (!priv->wol_ports_mask)
 978		clk_prepare_enable(priv->clk);
 979
 980	ret = bcm_sf2_sw_rst(priv);
 981	if (ret) {
 982		pr_err("%s: failed to software reset switch\n", __func__);
 983		return ret;
 984	}
 985
 986	bcm_sf2_crossbar_setup(priv);
 987
 988	ret = bcm_sf2_cfp_resume(ds);
 989	if (ret)
 990		return ret;
 991
 992	if (priv->hw_params.num_gphy == 1)
 993		bcm_sf2_gphy_enable_set(ds, true);
 994
 995	ds->ops->setup(ds);
 
 
 
 
 
 
 
 996
 997	return 0;
 998}
 999
1000static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1001			       struct ethtool_wolinfo *wol)
1002{
1003	struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
1004	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1005	struct ethtool_wolinfo pwol = { };
1006
1007	/* Get the parent device WoL settings */
1008	if (p->ethtool_ops->get_wol)
1009		p->ethtool_ops->get_wol(p, &pwol);
1010
1011	/* Advertise the parent device supported settings */
1012	wol->supported = pwol.supported;
1013	memset(&wol->sopass, 0, sizeof(wol->sopass));
1014
1015	if (pwol.wolopts & WAKE_MAGICSECURE)
1016		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1017
1018	if (priv->wol_ports_mask & (1 << port))
1019		wol->wolopts = pwol.wolopts;
1020	else
1021		wol->wolopts = 0;
1022}
1023
1024static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1025			      struct ethtool_wolinfo *wol)
1026{
1027	struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
1028	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1029	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1030	struct ethtool_wolinfo pwol =  { };
1031
1032	if (p->ethtool_ops->get_wol)
1033		p->ethtool_ops->get_wol(p, &pwol);
1034	if (wol->wolopts & ~pwol.supported)
1035		return -EINVAL;
1036
1037	if (wol->wolopts)
1038		priv->wol_ports_mask |= (1 << port);
1039	else
1040		priv->wol_ports_mask &= ~(1 << port);
1041
1042	/* If we have at least one port enabled, make sure the CPU port
1043	 * is also enabled. If the CPU port is the last one enabled, we disable
1044	 * it since this configuration does not make sense.
1045	 */
1046	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1047		priv->wol_ports_mask |= (1 << cpu_port);
1048	else
1049		priv->wol_ports_mask &= ~(1 << cpu_port);
1050
1051	return p->ethtool_ops->set_wol(p, wol);
1052}
1053
1054static int bcm_sf2_sw_setup(struct dsa_switch *ds)
1055{
1056	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1057	unsigned int port;
1058
1059	/* Enable all valid ports and disable those unused */
1060	for (port = 0; port < priv->hw_params.num_ports; port++) {
1061		/* IMP port receives special treatment */
1062		if (dsa_is_user_port(ds, port))
1063			bcm_sf2_port_setup(ds, port, NULL);
1064		else if (dsa_is_cpu_port(ds, port))
1065			bcm_sf2_imp_setup(ds, port);
1066		else
1067			bcm_sf2_port_disable(ds, port);
1068	}
1069
1070	b53_configure_vlan(ds);
1071	bcm_sf2_enable_acb(ds);
1072
1073	return b53_setup_devlink_resources(ds);
1074}
1075
1076static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
1077{
1078	dsa_devlink_resources_unregister(ds);
1079}
1080
1081/* The SWITCH_CORE register space is managed by b53 but operates on a page +
1082 * register basis so we need to translate that into an address that the
1083 * bus-glue understands.
1084 */
1085#define SF2_PAGE_REG_MKADDR(page, reg)	((page) << 10 | (reg) << 2)
1086
1087static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
1088			      u8 *val)
1089{
1090	struct bcm_sf2_priv *priv = dev->priv;
1091
1092	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1093
1094	return 0;
1095}
1096
1097static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
1098			       u16 *val)
1099{
1100	struct bcm_sf2_priv *priv = dev->priv;
1101
1102	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1103
1104	return 0;
1105}
1106
1107static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
1108			       u32 *val)
1109{
1110	struct bcm_sf2_priv *priv = dev->priv;
1111
1112	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1113
1114	return 0;
1115}
1116
1117static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
1118			       u64 *val)
1119{
1120	struct bcm_sf2_priv *priv = dev->priv;
1121
1122	*val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
1123
1124	return 0;
1125}
1126
1127static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
1128			       u8 value)
1129{
1130	struct bcm_sf2_priv *priv = dev->priv;
1131
1132	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1133
1134	return 0;
1135}
1136
1137static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
1138				u16 value)
1139{
1140	struct bcm_sf2_priv *priv = dev->priv;
1141
1142	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1143
1144	return 0;
1145}
1146
1147static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
1148				u32 value)
1149{
1150	struct bcm_sf2_priv *priv = dev->priv;
1151
1152	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1153
1154	return 0;
1155}
1156
1157static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
1158				u64 value)
1159{
1160	struct bcm_sf2_priv *priv = dev->priv;
1161
1162	core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1163
1164	return 0;
1165}
1166
1167static const struct b53_io_ops bcm_sf2_io_ops = {
1168	.read8	= bcm_sf2_core_read8,
1169	.read16	= bcm_sf2_core_read16,
1170	.read32	= bcm_sf2_core_read32,
1171	.read48	= bcm_sf2_core_read64,
1172	.read64	= bcm_sf2_core_read64,
1173	.write8	= bcm_sf2_core_write8,
1174	.write16 = bcm_sf2_core_write16,
1175	.write32 = bcm_sf2_core_write32,
1176	.write48 = bcm_sf2_core_write64,
1177	.write64 = bcm_sf2_core_write64,
1178};
1179
1180static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
1181				   u32 stringset, uint8_t *data)
1182{
1183	int cnt = b53_get_sset_count(ds, port, stringset);
1184
1185	b53_get_strings(ds, port, stringset, data);
1186	data += cnt * ETH_GSTRING_LEN;
1187	bcm_sf2_cfp_get_strings(ds, port, stringset, &data);
1188}
1189
1190static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
1191					 uint64_t *data)
1192{
1193	int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
1194
1195	b53_get_ethtool_stats(ds, port, data);
1196	bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
1197}
1198
1199static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
1200				     int sset)
1201{
1202	int cnt = b53_get_sset_count(ds, port, sset);
1203
1204	if (cnt < 0)
1205		return cnt;
1206
1207	cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
1208
1209	return cnt;
1210}
1211
1212static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
1213	.mac_config	= bcm_sf2_sw_mac_config,
1214	.mac_link_down	= bcm_sf2_sw_mac_link_down,
1215	.mac_link_up	= bcm_sf2_sw_mac_link_up,
1216};
1217
1218static const struct dsa_switch_ops bcm_sf2_ops = {
1219	.get_tag_protocol	= b53_get_tag_protocol,
1220	.setup			= bcm_sf2_sw_setup,
1221	.teardown		= bcm_sf2_sw_teardown,
1222	.get_strings		= bcm_sf2_sw_get_strings,
1223	.get_ethtool_stats	= bcm_sf2_sw_get_ethtool_stats,
1224	.get_sset_count		= bcm_sf2_sw_get_sset_count,
1225	.get_ethtool_phy_stats	= b53_get_ethtool_phy_stats,
1226	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
1227	.phylink_get_caps	= bcm_sf2_sw_get_caps,
1228	.phylink_fixed_state	= bcm_sf2_sw_fixed_state,
1229	.suspend		= bcm_sf2_sw_suspend,
1230	.resume			= bcm_sf2_sw_resume,
1231	.get_wol		= bcm_sf2_sw_get_wol,
1232	.set_wol		= bcm_sf2_sw_set_wol,
1233	.port_enable		= bcm_sf2_port_setup,
1234	.port_disable		= bcm_sf2_port_disable,
1235	.get_mac_eee		= b53_get_mac_eee,
1236	.set_mac_eee		= b53_set_mac_eee,
1237	.port_bridge_join	= b53_br_join,
1238	.port_bridge_leave	= b53_br_leave,
1239	.port_pre_bridge_flags	= b53_br_flags_pre,
1240	.port_bridge_flags	= b53_br_flags,
1241	.port_stp_state_set	= b53_br_set_stp_state,
1242	.port_fast_age		= b53_br_fast_age,
1243	.port_vlan_filtering	= b53_vlan_filtering,
 
1244	.port_vlan_add		= b53_vlan_add,
1245	.port_vlan_del		= b53_vlan_del,
1246	.port_fdb_dump		= b53_fdb_dump,
1247	.port_fdb_add		= b53_fdb_add,
1248	.port_fdb_del		= b53_fdb_del,
1249	.get_rxnfc		= bcm_sf2_get_rxnfc,
1250	.set_rxnfc		= bcm_sf2_set_rxnfc,
1251	.port_mirror_add	= b53_mirror_add,
1252	.port_mirror_del	= b53_mirror_del,
1253	.port_mdb_add		= b53_mdb_add,
1254	.port_mdb_del		= b53_mdb_del,
1255};
1256
1257struct bcm_sf2_of_data {
1258	u32 type;
1259	const u16 *reg_offsets;
1260	unsigned int core_reg_align;
1261	unsigned int num_cfp_rules;
1262	unsigned int num_crossbar_int_ports;
1263	unsigned int num_crossbar_ext_bits;
1264};
1265
1266static const u16 bcm_sf2_4908_reg_offsets[] = {
1267	[REG_SWITCH_CNTRL]	= 0x00,
1268	[REG_SWITCH_STATUS]	= 0x04,
1269	[REG_DIR_DATA_WRITE]	= 0x08,
1270	[REG_DIR_DATA_READ]	= 0x0c,
1271	[REG_SWITCH_REVISION]	= 0x10,
1272	[REG_PHY_REVISION]	= 0x14,
1273	[REG_SPHY_CNTRL]	= 0x24,
1274	[REG_CROSSBAR]		= 0xc8,
1275	[REG_RGMII_11_CNTRL]	= 0x014c,
1276	[REG_LED_0_CNTRL]		= 0x40,
1277	[REG_LED_1_CNTRL]		= 0x4c,
1278	[REG_LED_2_CNTRL]		= 0x58,
1279	[REG_LED_3_CNTRL]		= 0x64,
1280	[REG_LED_4_CNTRL]		= 0x88,
1281	[REG_LED_5_CNTRL]		= 0xa0,
1282	[REG_LED_AGGREGATE_CTRL]	= 0xb8,
1283
1284};
1285
1286static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
1287	.type		= BCM4908_DEVICE_ID,
1288	.core_reg_align	= 0,
1289	.reg_offsets	= bcm_sf2_4908_reg_offsets,
1290	.num_cfp_rules	= 256,
1291	.num_crossbar_int_ports = 2,
1292	.num_crossbar_ext_bits = 2,
1293};
1294
1295/* Register offsets for the SWITCH_REG_* block */
1296static const u16 bcm_sf2_7445_reg_offsets[] = {
1297	[REG_SWITCH_CNTRL]	= 0x00,
1298	[REG_SWITCH_STATUS]	= 0x04,
1299	[REG_DIR_DATA_WRITE]	= 0x08,
1300	[REG_DIR_DATA_READ]	= 0x0C,
1301	[REG_SWITCH_REVISION]	= 0x18,
1302	[REG_PHY_REVISION]	= 0x1C,
1303	[REG_SPHY_CNTRL]	= 0x2C,
1304	[REG_RGMII_0_CNTRL]	= 0x34,
1305	[REG_RGMII_1_CNTRL]	= 0x40,
1306	[REG_RGMII_2_CNTRL]	= 0x4c,
1307	[REG_LED_0_CNTRL]	= 0x90,
1308	[REG_LED_1_CNTRL]	= 0x94,
1309	[REG_LED_2_CNTRL]	= 0x98,
1310};
1311
1312static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1313	.type		= BCM7445_DEVICE_ID,
1314	.core_reg_align	= 0,
1315	.reg_offsets	= bcm_sf2_7445_reg_offsets,
1316	.num_cfp_rules	= 256,
1317};
1318
1319static const u16 bcm_sf2_7278_reg_offsets[] = {
1320	[REG_SWITCH_CNTRL]	= 0x00,
1321	[REG_SWITCH_STATUS]	= 0x04,
1322	[REG_DIR_DATA_WRITE]	= 0x08,
1323	[REG_DIR_DATA_READ]	= 0x0c,
1324	[REG_SWITCH_REVISION]	= 0x10,
1325	[REG_PHY_REVISION]	= 0x14,
1326	[REG_SPHY_CNTRL]	= 0x24,
1327	[REG_RGMII_0_CNTRL]	= 0xe0,
1328	[REG_RGMII_1_CNTRL]	= 0xec,
1329	[REG_RGMII_2_CNTRL]	= 0xf8,
1330	[REG_LED_0_CNTRL]	= 0x40,
1331	[REG_LED_1_CNTRL]	= 0x4c,
1332	[REG_LED_2_CNTRL]	= 0x58,
1333};
1334
1335static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1336	.type		= BCM7278_DEVICE_ID,
1337	.core_reg_align	= 1,
1338	.reg_offsets	= bcm_sf2_7278_reg_offsets,
1339	.num_cfp_rules	= 128,
1340};
1341
1342static const struct of_device_id bcm_sf2_of_match[] = {
1343	{ .compatible = "brcm,bcm4908-switch",
1344	  .data = &bcm_sf2_4908_data
1345	},
1346	{ .compatible = "brcm,bcm7445-switch-v4.0",
1347	  .data = &bcm_sf2_7445_data
1348	},
1349	{ .compatible = "brcm,bcm7278-switch-v4.0",
1350	  .data = &bcm_sf2_7278_data
1351	},
1352	{ .compatible = "brcm,bcm7278-switch-v4.8",
1353	  .data = &bcm_sf2_7278_data
1354	},
1355	{ /* sentinel */ },
1356};
1357MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1358
1359static int bcm_sf2_sw_probe(struct platform_device *pdev)
1360{
1361	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1362	struct device_node *dn = pdev->dev.of_node;
1363	const struct of_device_id *of_id = NULL;
1364	const struct bcm_sf2_of_data *data;
1365	struct b53_platform_data *pdata;
1366	struct dsa_switch_ops *ops;
1367	struct device_node *ports;
1368	struct bcm_sf2_priv *priv;
1369	struct b53_device *dev;
1370	struct dsa_switch *ds;
1371	void __iomem **base;
 
1372	unsigned int i;
1373	u32 reg, rev;
1374	int ret;
1375
1376	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1377	if (!priv)
1378		return -ENOMEM;
1379
1380	ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1381	if (!ops)
1382		return -ENOMEM;
1383
1384	dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
1385	if (!dev)
1386		return -ENOMEM;
1387
1388	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1389	if (!pdata)
1390		return -ENOMEM;
1391
1392	of_id = of_match_node(bcm_sf2_of_match, dn);
1393	if (!of_id || !of_id->data)
1394		return -EINVAL;
1395
1396	data = of_id->data;
1397
1398	/* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1399	priv->type = data->type;
1400	priv->reg_offsets = data->reg_offsets;
1401	priv->core_reg_align = data->core_reg_align;
1402	priv->num_cfp_rules = data->num_cfp_rules;
1403	priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
1404	priv->num_crossbar_ext_bits = data->num_crossbar_ext_bits;
1405
1406	priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
1407								"switch");
1408	if (IS_ERR(priv->rcdev))
1409		return PTR_ERR(priv->rcdev);
1410
1411	/* Auto-detection using standard registers will not work, so
1412	 * provide an indication of what kind of device we are for
1413	 * b53_common to work with
1414	 */
1415	pdata->chip_id = priv->type;
1416	dev->pdata = pdata;
1417
1418	priv->dev = dev;
1419	ds = dev->ds;
1420	ds->ops = &bcm_sf2_ops;
1421	ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
1422
1423	/* Advertise the 8 egress queues */
1424	ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1425
1426	dev_set_drvdata(&pdev->dev, priv);
1427
1428	spin_lock_init(&priv->indir_lock);
 
1429	mutex_init(&priv->cfp.lock);
1430	INIT_LIST_HEAD(&priv->cfp.rules_list);
1431
1432	/* CFP rule #0 cannot be used for specific classifications, flag it as
1433	 * permanently used
1434	 */
1435	set_bit(0, priv->cfp.used);
1436	set_bit(0, priv->cfp.unique);
1437
1438	/* Balance of_node_put() done by of_find_node_by_name() */
1439	of_node_get(dn);
1440	ports = of_find_node_by_name(dn, "ports");
1441	if (ports) {
1442		bcm_sf2_identify_ports(priv, ports);
1443		of_node_put(ports);
1444	}
1445
1446	priv->irq0 = irq_of_parse_and_map(dn, 0);
1447	priv->irq1 = irq_of_parse_and_map(dn, 1);
1448
1449	base = &priv->core;
1450	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1451		*base = devm_platform_ioremap_resource(pdev, i);
 
1452		if (IS_ERR(*base)) {
1453			pr_err("unable to find register: %s\n", reg_names[i]);
1454			return PTR_ERR(*base);
1455		}
1456		base++;
1457	}
1458
1459	priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
1460	if (IS_ERR(priv->clk))
1461		return PTR_ERR(priv->clk);
1462
1463	ret = clk_prepare_enable(priv->clk);
1464	if (ret)
1465		return ret;
1466
1467	priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
1468	if (IS_ERR(priv->clk_mdiv)) {
1469		ret = PTR_ERR(priv->clk_mdiv);
1470		goto out_clk;
1471	}
1472
1473	ret = clk_prepare_enable(priv->clk_mdiv);
1474	if (ret)
1475		goto out_clk;
1476
1477	ret = bcm_sf2_sw_rst(priv);
1478	if (ret) {
1479		pr_err("unable to software reset switch: %d\n", ret);
1480		goto out_clk_mdiv;
1481	}
1482
1483	bcm_sf2_crossbar_setup(priv);
1484
1485	bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1486
1487	ret = bcm_sf2_mdio_register(ds);
1488	if (ret) {
1489		pr_err("failed to register MDIO bus\n");
1490		goto out_clk_mdiv;
1491	}
1492
1493	bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1494
1495	ret = bcm_sf2_cfp_rst(priv);
1496	if (ret) {
1497		pr_err("failed to reset CFP\n");
1498		goto out_mdio;
1499	}
1500
1501	/* Disable all interrupts and request them */
1502	bcm_sf2_intr_disable(priv);
1503
1504	ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1505			       "switch_0", ds);
1506	if (ret < 0) {
1507		pr_err("failed to request switch_0 IRQ\n");
1508		goto out_mdio;
1509	}
1510
1511	ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1512			       "switch_1", ds);
1513	if (ret < 0) {
1514		pr_err("failed to request switch_1 IRQ\n");
1515		goto out_mdio;
1516	}
1517
1518	/* Reset the MIB counters */
1519	reg = core_readl(priv, CORE_GMNCFGCFG);
1520	reg |= RST_MIB_CNT;
1521	core_writel(priv, reg, CORE_GMNCFGCFG);
1522	reg &= ~RST_MIB_CNT;
1523	core_writel(priv, reg, CORE_GMNCFGCFG);
1524
1525	/* Get the maximum number of ports for this switch */
1526	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1527	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1528		priv->hw_params.num_ports = DSA_MAX_PORTS;
1529
1530	/* Assume a single GPHY setup if we can't read that property */
1531	if (of_property_read_u32(dn, "brcm,num-gphy",
1532				 &priv->hw_params.num_gphy))
1533		priv->hw_params.num_gphy = 1;
1534
1535	rev = reg_readl(priv, REG_SWITCH_REVISION);
1536	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1537					SWITCH_TOP_REV_MASK;
1538	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1539
1540	rev = reg_readl(priv, REG_PHY_REVISION);
1541	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1542
1543	ret = b53_switch_register(dev);
1544	if (ret)
1545		goto out_mdio;
1546
1547	dev_info(&pdev->dev,
1548		 "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
1549		 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1550		 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1551		 priv->irq0, priv->irq1);
1552
1553	return 0;
1554
1555out_mdio:
1556	bcm_sf2_mdio_unregister(priv);
1557out_clk_mdiv:
1558	clk_disable_unprepare(priv->clk_mdiv);
1559out_clk:
1560	clk_disable_unprepare(priv->clk);
1561	return ret;
1562}
1563
1564static void bcm_sf2_sw_remove(struct platform_device *pdev)
1565{
1566	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1567
1568	if (!priv)
1569		return;
1570
1571	priv->wol_ports_mask = 0;
1572	/* Disable interrupts */
1573	bcm_sf2_intr_disable(priv);
1574	dsa_unregister_switch(priv->dev->ds);
1575	bcm_sf2_cfp_exit(priv->dev->ds);
1576	bcm_sf2_mdio_unregister(priv);
1577	clk_disable_unprepare(priv->clk_mdiv);
1578	clk_disable_unprepare(priv->clk);
1579	if (priv->type == BCM7278_DEVICE_ID)
1580		reset_control_assert(priv->rcdev);
1581}
1582
1583static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1584{
1585	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1586
1587	if (!priv)
1588		return;
1589
1590	/* For a kernel about to be kexec'd we want to keep the GPHY on for a
1591	 * successful MDIO bus scan to occur. If we did turn off the GPHY
1592	 * before (e.g: port_disable), this will also power it back on.
1593	 *
1594	 * Do not rely on kexec_in_progress, just power the PHY on.
1595	 */
1596	if (priv->hw_params.num_gphy == 1)
1597		bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1598
1599	dsa_switch_shutdown(priv->dev->ds);
1600
1601	platform_set_drvdata(pdev, NULL);
1602}
1603
1604#ifdef CONFIG_PM_SLEEP
1605static int bcm_sf2_suspend(struct device *dev)
1606{
1607	struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
 
1608
1609	return dsa_switch_suspend(priv->dev->ds);
1610}
1611
1612static int bcm_sf2_resume(struct device *dev)
1613{
1614	struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
 
1615
1616	return dsa_switch_resume(priv->dev->ds);
1617}
1618#endif /* CONFIG_PM_SLEEP */
1619
1620static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1621			 bcm_sf2_suspend, bcm_sf2_resume);
1622
1623
1624static struct platform_driver bcm_sf2_driver = {
1625	.probe	= bcm_sf2_sw_probe,
1626	.remove = bcm_sf2_sw_remove,
1627	.shutdown = bcm_sf2_sw_shutdown,
1628	.driver = {
1629		.name = "brcm-sf2",
1630		.of_match_table = bcm_sf2_of_match,
1631		.pm = &bcm_sf2_pm_ops,
1632	},
1633};
1634module_platform_driver(bcm_sf2_driver);
1635
1636MODULE_AUTHOR("Broadcom Corporation");
1637MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1638MODULE_LICENSE("GPL");
1639MODULE_ALIAS("platform:brcm-sf2");