Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
   3 * Copyright (c) 2008 Marvell Semiconductor
   4 *
   5 * Copyright (c) 2015 CMC Electronics, Inc.
   6 *	Added support for VLAN Table Unit operations
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/delay.h>
  15#include <linux/etherdevice.h>
  16#include <linux/ethtool.h>
  17#include <linux/if_bridge.h>
  18#include <linux/jiffies.h>
  19#include <linux/list.h>
  20#include <linux/module.h>
  21#include <linux/netdevice.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/phy.h>
  24#include <net/dsa.h>
  25#include <net/switchdev.h>
  26#include "mv88e6xxx.h"
  27
  28static void assert_smi_lock(struct dsa_switch *ds)
  29{
  30	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  31
  32	if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
  33		dev_err(ds->master_dev, "SMI lock not held!\n");
  34		dump_stack();
  35	}
  36}
  37
  38/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  39 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  40 * will be directly accessible on some {device address,register address}
  41 * pair.  If the ADDR[4:0] pins are not strapped to zero, the switch
  42 * will only respond to SMI transactions to that specific address, and
  43 * an indirect addressing mechanism needs to be used to access its
  44 * registers.
  45 */
  46static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
  47{
  48	int ret;
  49	int i;
  50
  51	for (i = 0; i < 16; i++) {
  52		ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
  53		if (ret < 0)
  54			return ret;
  55
  56		if ((ret & SMI_CMD_BUSY) == 0)
  57			return 0;
  58	}
  59
  60	return -ETIMEDOUT;
  61}
  62
  63static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
  64				int reg)
  65{
  66	int ret;
  67
  68	if (sw_addr == 0)
  69		return mdiobus_read_nested(bus, addr, reg);
  70
  71	/* Wait for the bus to become free. */
  72	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  73	if (ret < 0)
  74		return ret;
  75
  76	/* Transmit the read command. */
  77	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  78				   SMI_CMD_OP_22_READ | (addr << 5) | reg);
  79	if (ret < 0)
  80		return ret;
  81
  82	/* Wait for the read command to complete. */
  83	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  84	if (ret < 0)
  85		return ret;
  86
  87	/* Read the data. */
  88	ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
  89	if (ret < 0)
  90		return ret;
  91
  92	return ret & 0xffff;
  93}
  94
  95static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  96{
  97	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  98	int ret;
  99
 100	assert_smi_lock(ds);
 101
 102	if (bus == NULL)
 103		return -EINVAL;
 104
 105	ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
 106	if (ret < 0)
 107		return ret;
 108
 109	dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
 110		addr, reg, ret);
 111
 112	return ret;
 113}
 114
 115int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
 116{
 117	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 118	int ret;
 119
 120	mutex_lock(&ps->smi_mutex);
 121	ret = _mv88e6xxx_reg_read(ds, addr, reg);
 122	mutex_unlock(&ps->smi_mutex);
 123
 124	return ret;
 125}
 126
 127static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
 128				 int reg, u16 val)
 129{
 130	int ret;
 131
 132	if (sw_addr == 0)
 133		return mdiobus_write_nested(bus, addr, reg, val);
 134
 135	/* Wait for the bus to become free. */
 136	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
 137	if (ret < 0)
 138		return ret;
 139
 140	/* Transmit the data to write. */
 141	ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
 142	if (ret < 0)
 143		return ret;
 144
 145	/* Transmit the write command. */
 146	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
 147				   SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
 148	if (ret < 0)
 149		return ret;
 150
 151	/* Wait for the write command to complete. */
 152	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
 153	if (ret < 0)
 154		return ret;
 155
 156	return 0;
 157}
 158
 159static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
 160				u16 val)
 161{
 162	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
 163
 164	assert_smi_lock(ds);
 165
 166	if (bus == NULL)
 167		return -EINVAL;
 168
 169	dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
 170		addr, reg, val);
 171
 172	return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
 173}
 174
 175int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 176{
 177	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 178	int ret;
 179
 180	mutex_lock(&ps->smi_mutex);
 181	ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
 182	mutex_unlock(&ps->smi_mutex);
 183
 184	return ret;
 185}
 186
 187int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 188{
 189	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
 190	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
 191	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 192
 193	return 0;
 194}
 195
 196int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
 197{
 198	int i;
 199	int ret;
 200
 201	for (i = 0; i < 6; i++) {
 202		int j;
 203
 204		/* Write the MAC address byte. */
 205		REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
 206			  GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
 207
 208		/* Wait for the write to complete. */
 209		for (j = 0; j < 16; j++) {
 210			ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
 211			if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
 212				break;
 213		}
 214		if (j == 16)
 215			return -ETIMEDOUT;
 216	}
 217
 218	return 0;
 219}
 220
 221static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
 222{
 223	if (addr >= 0)
 224		return _mv88e6xxx_reg_read(ds, addr, regnum);
 225	return 0xffff;
 226}
 227
 228static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
 229				u16 val)
 230{
 231	if (addr >= 0)
 232		return _mv88e6xxx_reg_write(ds, addr, regnum, val);
 233	return 0;
 234}
 235
 236#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
 237static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
 238{
 239	int ret;
 240	unsigned long timeout;
 241
 242	ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
 243	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
 244		  ret & ~GLOBAL_CONTROL_PPU_ENABLE);
 245
 246	timeout = jiffies + 1 * HZ;
 247	while (time_before(jiffies, timeout)) {
 248		ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
 249		usleep_range(1000, 2000);
 250		if ((ret & GLOBAL_STATUS_PPU_MASK) !=
 251		    GLOBAL_STATUS_PPU_POLLING)
 252			return 0;
 253	}
 254
 255	return -ETIMEDOUT;
 256}
 257
 258static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
 259{
 260	int ret;
 261	unsigned long timeout;
 262
 263	ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
 264	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
 265
 266	timeout = jiffies + 1 * HZ;
 267	while (time_before(jiffies, timeout)) {
 268		ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
 269		usleep_range(1000, 2000);
 270		if ((ret & GLOBAL_STATUS_PPU_MASK) ==
 271		    GLOBAL_STATUS_PPU_POLLING)
 272			return 0;
 273	}
 274
 275	return -ETIMEDOUT;
 276}
 277
 278static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
 279{
 280	struct mv88e6xxx_priv_state *ps;
 281
 282	ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
 283	if (mutex_trylock(&ps->ppu_mutex)) {
 284		struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
 285
 286		if (mv88e6xxx_ppu_enable(ds) == 0)
 287			ps->ppu_disabled = 0;
 288		mutex_unlock(&ps->ppu_mutex);
 289	}
 290}
 291
 292static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
 293{
 294	struct mv88e6xxx_priv_state *ps = (void *)_ps;
 295
 296	schedule_work(&ps->ppu_work);
 297}
 298
 299static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
 300{
 301	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 302	int ret;
 303
 304	mutex_lock(&ps->ppu_mutex);
 305
 306	/* If the PHY polling unit is enabled, disable it so that
 307	 * we can access the PHY registers.  If it was already
 308	 * disabled, cancel the timer that is going to re-enable
 309	 * it.
 310	 */
 311	if (!ps->ppu_disabled) {
 312		ret = mv88e6xxx_ppu_disable(ds);
 313		if (ret < 0) {
 314			mutex_unlock(&ps->ppu_mutex);
 315			return ret;
 316		}
 317		ps->ppu_disabled = 1;
 318	} else {
 319		del_timer(&ps->ppu_timer);
 320		ret = 0;
 321	}
 322
 323	return ret;
 324}
 325
 326static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
 327{
 328	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 329
 330	/* Schedule a timer to re-enable the PHY polling unit. */
 331	mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
 332	mutex_unlock(&ps->ppu_mutex);
 333}
 334
 335void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
 336{
 337	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 338
 339	mutex_init(&ps->ppu_mutex);
 340	INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
 341	init_timer(&ps->ppu_timer);
 342	ps->ppu_timer.data = (unsigned long)ps;
 343	ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
 344}
 345
 346int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
 347{
 348	int ret;
 349
 350	ret = mv88e6xxx_ppu_access_get(ds);
 351	if (ret >= 0) {
 352		ret = mv88e6xxx_reg_read(ds, addr, regnum);
 353		mv88e6xxx_ppu_access_put(ds);
 354	}
 355
 356	return ret;
 357}
 358
 359int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
 360			    int regnum, u16 val)
 361{
 362	int ret;
 363
 364	ret = mv88e6xxx_ppu_access_get(ds);
 365	if (ret >= 0) {
 366		ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
 367		mv88e6xxx_ppu_access_put(ds);
 368	}
 369
 370	return ret;
 371}
 372#endif
 373
 374static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
 375{
 376	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 377
 378	switch (ps->id) {
 379	case PORT_SWITCH_ID_6031:
 380	case PORT_SWITCH_ID_6061:
 381	case PORT_SWITCH_ID_6035:
 382	case PORT_SWITCH_ID_6065:
 383		return true;
 384	}
 385	return false;
 386}
 387
 388static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
 389{
 390	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 391
 392	switch (ps->id) {
 393	case PORT_SWITCH_ID_6092:
 394	case PORT_SWITCH_ID_6095:
 395		return true;
 396	}
 397	return false;
 398}
 399
 400static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
 401{
 402	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 403
 404	switch (ps->id) {
 405	case PORT_SWITCH_ID_6046:
 406	case PORT_SWITCH_ID_6085:
 407	case PORT_SWITCH_ID_6096:
 408	case PORT_SWITCH_ID_6097:
 409		return true;
 410	}
 411	return false;
 412}
 413
 414static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
 415{
 416	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 417
 418	switch (ps->id) {
 419	case PORT_SWITCH_ID_6123:
 420	case PORT_SWITCH_ID_6161:
 421	case PORT_SWITCH_ID_6165:
 422		return true;
 423	}
 424	return false;
 425}
 426
 427static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
 428{
 429	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 430
 431	switch (ps->id) {
 432	case PORT_SWITCH_ID_6121:
 433	case PORT_SWITCH_ID_6122:
 434	case PORT_SWITCH_ID_6152:
 435	case PORT_SWITCH_ID_6155:
 436	case PORT_SWITCH_ID_6182:
 437	case PORT_SWITCH_ID_6185:
 438	case PORT_SWITCH_ID_6108:
 439	case PORT_SWITCH_ID_6131:
 440		return true;
 441	}
 442	return false;
 443}
 444
 445static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
 446{
 447	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 448
 449	switch (ps->id) {
 450	case PORT_SWITCH_ID_6320:
 451	case PORT_SWITCH_ID_6321:
 452		return true;
 453	}
 454	return false;
 455}
 456
 457static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
 458{
 459	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 460
 461	switch (ps->id) {
 462	case PORT_SWITCH_ID_6171:
 463	case PORT_SWITCH_ID_6175:
 464	case PORT_SWITCH_ID_6350:
 465	case PORT_SWITCH_ID_6351:
 466		return true;
 467	}
 468	return false;
 469}
 470
 471static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
 472{
 473	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 474
 475	switch (ps->id) {
 476	case PORT_SWITCH_ID_6172:
 477	case PORT_SWITCH_ID_6176:
 478	case PORT_SWITCH_ID_6240:
 479	case PORT_SWITCH_ID_6352:
 480		return true;
 481	}
 482	return false;
 483}
 484
 485/* We expect the switch to perform auto negotiation if there is a real
 486 * phy. However, in the case of a fixed link phy, we force the port
 487 * settings from the fixed link settings.
 488 */
 489void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
 490			   struct phy_device *phydev)
 491{
 492	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 493	u32 reg;
 494	int ret;
 495
 496	if (!phy_is_pseudo_fixed_link(phydev))
 497		return;
 498
 499	mutex_lock(&ps->smi_mutex);
 500
 501	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
 502	if (ret < 0)
 503		goto out;
 504
 505	reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
 506		      PORT_PCS_CTRL_FORCE_LINK |
 507		      PORT_PCS_CTRL_DUPLEX_FULL |
 508		      PORT_PCS_CTRL_FORCE_DUPLEX |
 509		      PORT_PCS_CTRL_UNFORCED);
 510
 511	reg |= PORT_PCS_CTRL_FORCE_LINK;
 512	if (phydev->link)
 513			reg |= PORT_PCS_CTRL_LINK_UP;
 514
 515	if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
 516		goto out;
 517
 518	switch (phydev->speed) {
 519	case SPEED_1000:
 520		reg |= PORT_PCS_CTRL_1000;
 521		break;
 522	case SPEED_100:
 523		reg |= PORT_PCS_CTRL_100;
 524		break;
 525	case SPEED_10:
 526		reg |= PORT_PCS_CTRL_10;
 527		break;
 528	default:
 529		pr_info("Unknown speed");
 530		goto out;
 531	}
 532
 533	reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
 534	if (phydev->duplex == DUPLEX_FULL)
 535		reg |= PORT_PCS_CTRL_DUPLEX_FULL;
 536
 537	if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
 538	    (port >= ps->num_ports - 2)) {
 539		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
 540			reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
 541		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
 542			reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
 543		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
 544			reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
 545				PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
 546	}
 547	_mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
 548
 549out:
 550	mutex_unlock(&ps->smi_mutex);
 551}
 552
 553static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
 554{
 555	int ret;
 556	int i;
 557
 558	for (i = 0; i < 10; i++) {
 559		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
 560		if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
 561			return 0;
 562	}
 563
 564	return -ETIMEDOUT;
 565}
 566
 567static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 568{
 569	int ret;
 570
 571	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
 572		port = (port + 1) << 5;
 573
 574	/* Snapshot the hardware statistics counters for this port. */
 575	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
 576				   GLOBAL_STATS_OP_CAPTURE_PORT |
 577				   GLOBAL_STATS_OP_HIST_RX_TX | port);
 578	if (ret < 0)
 579		return ret;
 580
 581	/* Wait for the snapshotting to complete. */
 582	ret = _mv88e6xxx_stats_wait(ds);
 583	if (ret < 0)
 584		return ret;
 585
 586	return 0;
 587}
 588
 589static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
 590{
 591	u32 _val;
 592	int ret;
 593
 594	*val = 0;
 595
 596	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
 597				   GLOBAL_STATS_OP_READ_CAPTURED |
 598				   GLOBAL_STATS_OP_HIST_RX_TX | stat);
 599	if (ret < 0)
 600		return;
 601
 602	ret = _mv88e6xxx_stats_wait(ds);
 603	if (ret < 0)
 604		return;
 605
 606	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
 607	if (ret < 0)
 608		return;
 609
 610	_val = ret << 16;
 611
 612	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
 613	if (ret < 0)
 614		return;
 615
 616	*val = _val | ret;
 617}
 618
 619static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
 620	{ "in_good_octets",	8, 0x00, BANK0, },
 621	{ "in_bad_octets",	4, 0x02, BANK0, },
 622	{ "in_unicast",		4, 0x04, BANK0, },
 623	{ "in_broadcasts",	4, 0x06, BANK0, },
 624	{ "in_multicasts",	4, 0x07, BANK0, },
 625	{ "in_pause",		4, 0x16, BANK0, },
 626	{ "in_undersize",	4, 0x18, BANK0, },
 627	{ "in_fragments",	4, 0x19, BANK0, },
 628	{ "in_oversize",	4, 0x1a, BANK0, },
 629	{ "in_jabber",		4, 0x1b, BANK0, },
 630	{ "in_rx_error",	4, 0x1c, BANK0, },
 631	{ "in_fcs_error",	4, 0x1d, BANK0, },
 632	{ "out_octets",		8, 0x0e, BANK0, },
 633	{ "out_unicast",	4, 0x10, BANK0, },
 634	{ "out_broadcasts",	4, 0x13, BANK0, },
 635	{ "out_multicasts",	4, 0x12, BANK0, },
 636	{ "out_pause",		4, 0x15, BANK0, },
 637	{ "excessive",		4, 0x11, BANK0, },
 638	{ "collisions",		4, 0x1e, BANK0, },
 639	{ "deferred",		4, 0x05, BANK0, },
 640	{ "single",		4, 0x14, BANK0, },
 641	{ "multiple",		4, 0x17, BANK0, },
 642	{ "out_fcs_error",	4, 0x03, BANK0, },
 643	{ "late",		4, 0x1f, BANK0, },
 644	{ "hist_64bytes",	4, 0x08, BANK0, },
 645	{ "hist_65_127bytes",	4, 0x09, BANK0, },
 646	{ "hist_128_255bytes",	4, 0x0a, BANK0, },
 647	{ "hist_256_511bytes",	4, 0x0b, BANK0, },
 648	{ "hist_512_1023bytes", 4, 0x0c, BANK0, },
 649	{ "hist_1024_max_bytes", 4, 0x0d, BANK0, },
 650	{ "sw_in_discards",	4, 0x10, PORT, },
 651	{ "sw_in_filtered",	2, 0x12, PORT, },
 652	{ "sw_out_filtered",	2, 0x13, PORT, },
 653	{ "in_discards",	4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 654	{ "in_filtered",	4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 655	{ "in_accepted",	4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 656	{ "in_bad_accepted",	4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 657	{ "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 658	{ "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 659	{ "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 660	{ "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 661	{ "tcam_counter_0",	4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 662	{ "tcam_counter_1",	4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 663	{ "tcam_counter_2",	4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
 664	{ "tcam_counter_3",	4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
 665	{ "in_da_unknown",	4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
 666	{ "in_management",	4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
 667	{ "out_queue_0",	4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 668	{ "out_queue_1",	4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 669	{ "out_queue_2",	4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 670	{ "out_queue_3",	4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 671	{ "out_queue_4",	4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 672	{ "out_queue_5",	4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 673	{ "out_queue_6",	4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 674	{ "out_queue_7",	4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 675	{ "out_cut_through",	4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
 676	{ "out_octets_a",	4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
 677	{ "out_octets_b",	4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
 678	{ "out_management",	4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
 679};
 680
 681static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
 682			       struct mv88e6xxx_hw_stat *stat)
 683{
 684	switch (stat->type) {
 685	case BANK0:
 686		return true;
 687	case BANK1:
 688		return mv88e6xxx_6320_family(ds);
 689	case PORT:
 690		return mv88e6xxx_6095_family(ds) ||
 691			mv88e6xxx_6185_family(ds) ||
 692			mv88e6xxx_6097_family(ds) ||
 693			mv88e6xxx_6165_family(ds) ||
 694			mv88e6xxx_6351_family(ds) ||
 695			mv88e6xxx_6352_family(ds);
 696	}
 697	return false;
 698}
 699
 700static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
 701					    struct mv88e6xxx_hw_stat *s,
 702					    int port)
 703{
 704	u32 low;
 705	u32 high = 0;
 706	int ret;
 707	u64 value;
 708
 709	switch (s->type) {
 710	case PORT:
 711		ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
 712		if (ret < 0)
 713			return UINT64_MAX;
 714
 715		low = ret;
 716		if (s->sizeof_stat == 4) {
 717			ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
 718						  s->reg + 1);
 719			if (ret < 0)
 720				return UINT64_MAX;
 721			high = ret;
 722		}
 723		break;
 724	case BANK0:
 725	case BANK1:
 726		_mv88e6xxx_stats_read(ds, s->reg, &low);
 727		if (s->sizeof_stat == 8)
 728			_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
 729	}
 730	value = (((u64)high) << 16) | low;
 731	return value;
 732}
 733
 734void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
 735{
 736	struct mv88e6xxx_hw_stat *stat;
 737	int i, j;
 738
 739	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 740		stat = &mv88e6xxx_hw_stats[i];
 741		if (mv88e6xxx_has_stat(ds, stat)) {
 742			memcpy(data + j * ETH_GSTRING_LEN, stat->string,
 743			       ETH_GSTRING_LEN);
 744			j++;
 745		}
 746	}
 747}
 748
 749int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
 750{
 751	struct mv88e6xxx_hw_stat *stat;
 752	int i, j;
 753
 754	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 755		stat = &mv88e6xxx_hw_stats[i];
 756		if (mv88e6xxx_has_stat(ds, stat))
 757			j++;
 758	}
 759	return j;
 760}
 761
 762void
 763mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 764			    int port, uint64_t *data)
 765{
 766	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 767	struct mv88e6xxx_hw_stat *stat;
 768	int ret;
 769	int i, j;
 770
 771	mutex_lock(&ps->smi_mutex);
 772
 773	ret = _mv88e6xxx_stats_snapshot(ds, port);
 774	if (ret < 0) {
 775		mutex_unlock(&ps->smi_mutex);
 776		return;
 777	}
 778	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 779		stat = &mv88e6xxx_hw_stats[i];
 780		if (mv88e6xxx_has_stat(ds, stat)) {
 781			data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
 782			j++;
 783		}
 784	}
 785
 786	mutex_unlock(&ps->smi_mutex);
 787}
 788
 789int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
 790{
 791	return 32 * sizeof(u16);
 792}
 793
 794void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 795			struct ethtool_regs *regs, void *_p)
 796{
 797	u16 *p = _p;
 798	int i;
 799
 800	regs->version = 0;
 801
 802	memset(p, 0xff, 32 * sizeof(u16));
 803
 804	for (i = 0; i < 32; i++) {
 805		int ret;
 806
 807		ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
 808		if (ret >= 0)
 809			p[i] = ret;
 810	}
 811}
 812
 813static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
 814			   u16 mask)
 815{
 816	unsigned long timeout = jiffies + HZ / 10;
 817
 818	while (time_before(jiffies, timeout)) {
 819		int ret;
 820
 821		ret = _mv88e6xxx_reg_read(ds, reg, offset);
 822		if (ret < 0)
 823			return ret;
 824		if (!(ret & mask))
 825			return 0;
 826
 827		usleep_range(1000, 2000);
 828	}
 829	return -ETIMEDOUT;
 830}
 831
 832static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
 833{
 834	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 835	int ret;
 836
 837	mutex_lock(&ps->smi_mutex);
 838	ret = _mv88e6xxx_wait(ds, reg, offset, mask);
 839	mutex_unlock(&ps->smi_mutex);
 840
 841	return ret;
 842}
 843
 844static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
 845{
 846	return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
 847			       GLOBAL2_SMI_OP_BUSY);
 848}
 849
 850int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
 851{
 852	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
 853			      GLOBAL2_EEPROM_OP_LOAD);
 854}
 855
 856int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
 857{
 858	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
 859			      GLOBAL2_EEPROM_OP_BUSY);
 860}
 861
 862static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
 863{
 864	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
 865			       GLOBAL_ATU_OP_BUSY);
 866}
 867
 868static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
 869					int regnum)
 870{
 871	int ret;
 872
 873	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
 874				   GLOBAL2_SMI_OP_22_READ | (addr << 5) |
 875				   regnum);
 876	if (ret < 0)
 877		return ret;
 878
 879	ret = _mv88e6xxx_phy_wait(ds);
 880	if (ret < 0)
 881		return ret;
 882
 883	return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
 884}
 885
 886static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
 887					 int regnum, u16 val)
 888{
 889	int ret;
 890
 891	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
 892	if (ret < 0)
 893		return ret;
 894
 895	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
 896				   GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
 897				   regnum);
 898
 899	return _mv88e6xxx_phy_wait(ds);
 900}
 901
 902int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
 903{
 904	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 905	int reg;
 906
 907	mutex_lock(&ps->smi_mutex);
 908
 909	reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
 910	if (reg < 0)
 911		goto out;
 912
 913	e->eee_enabled = !!(reg & 0x0200);
 914	e->tx_lpi_enabled = !!(reg & 0x0100);
 915
 916	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
 917	if (reg < 0)
 918		goto out;
 919
 920	e->eee_active = !!(reg & PORT_STATUS_EEE);
 921	reg = 0;
 922
 923out:
 924	mutex_unlock(&ps->smi_mutex);
 925	return reg;
 926}
 927
 928int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
 929		      struct phy_device *phydev, struct ethtool_eee *e)
 930{
 931	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 932	int reg;
 933	int ret;
 934
 935	mutex_lock(&ps->smi_mutex);
 936
 937	ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
 938	if (ret < 0)
 939		goto out;
 940
 941	reg = ret & ~0x0300;
 942	if (e->eee_enabled)
 943		reg |= 0x0200;
 944	if (e->tx_lpi_enabled)
 945		reg |= 0x0100;
 946
 947	ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
 948out:
 949	mutex_unlock(&ps->smi_mutex);
 950
 951	return ret;
 952}
 953
 954static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
 955{
 956	int ret;
 957
 958	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
 959	if (ret < 0)
 960		return ret;
 961
 962	return _mv88e6xxx_atu_wait(ds);
 963}
 964
 965static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
 966				     struct mv88e6xxx_atu_entry *entry)
 967{
 968	u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
 969
 970	if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
 971		unsigned int mask, shift;
 972
 973		if (entry->trunk) {
 974			data |= GLOBAL_ATU_DATA_TRUNK;
 975			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
 976			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
 977		} else {
 978			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
 979			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
 980		}
 981
 982		data |= (entry->portv_trunkid << shift) & mask;
 983	}
 984
 985	return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
 986}
 987
 988static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
 989				     struct mv88e6xxx_atu_entry *entry,
 990				     bool static_too)
 991{
 992	int op;
 993	int err;
 994
 995	err = _mv88e6xxx_atu_wait(ds);
 996	if (err)
 997		return err;
 998
 999	err = _mv88e6xxx_atu_data_write(ds, entry);
1000	if (err)
1001		return err;
1002
1003	if (entry->fid) {
1004		err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
1005					   entry->fid);
1006		if (err)
1007			return err;
1008
1009		op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1010			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1011	} else {
1012		op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1013			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1014	}
1015
1016	return _mv88e6xxx_atu_cmd(ds, op);
1017}
1018
1019static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1020{
1021	struct mv88e6xxx_atu_entry entry = {
1022		.fid = fid,
1023		.state = 0, /* EntryState bits must be 0 */
1024	};
1025
1026	return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1027}
1028
1029static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
1030			       int to_port, bool static_too)
1031{
1032	struct mv88e6xxx_atu_entry entry = {
1033		.trunk = false,
1034		.fid = fid,
1035	};
1036
1037	/* EntryState bits must be 0xF */
1038	entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1039
1040	/* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1041	entry.portv_trunkid = (to_port & 0x0f) << 4;
1042	entry.portv_trunkid |= from_port & 0x0f;
1043
1044	return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1045}
1046
1047static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
1048				 bool static_too)
1049{
1050	/* Destination port 0xF means remove the entries */
1051	return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
1052}
1053
1054static const char * const mv88e6xxx_port_state_names[] = {
1055	[PORT_CONTROL_STATE_DISABLED] = "Disabled",
1056	[PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
1057	[PORT_CONTROL_STATE_LEARNING] = "Learning",
1058	[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1059};
1060
1061static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
1062{
1063	int reg, ret = 0;
1064	u8 oldstate;
1065
1066	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
1067	if (reg < 0)
1068		return reg;
1069
1070	oldstate = reg & PORT_CONTROL_STATE_MASK;
1071
1072	if (oldstate != state) {
1073		/* Flush forwarding database if we're moving a port
1074		 * from Learning or Forwarding state to Disabled or
1075		 * Blocking or Listening state.
1076		 */
1077		if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1078		     oldstate == PORT_CONTROL_STATE_FORWARDING)
1079		    && (state == PORT_CONTROL_STATE_DISABLED ||
1080			state == PORT_CONTROL_STATE_BLOCKING)) {
1081			ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
1082			if (ret)
1083				return ret;
1084		}
1085
1086		reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1087		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1088					   reg);
1089		if (ret)
1090			return ret;
1091
1092		netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
1093			   mv88e6xxx_port_state_names[state],
1094			   mv88e6xxx_port_state_names[oldstate]);
1095	}
1096
1097	return ret;
1098}
1099
1100static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
1101{
1102	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1103	struct net_device *bridge = ps->ports[port].bridge_dev;
1104	const u16 mask = (1 << ps->num_ports) - 1;
1105	u16 output_ports = 0;
1106	int reg;
1107	int i;
1108
1109	/* allow CPU port or DSA link(s) to send frames to every port */
1110	if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1111		output_ports = mask;
1112	} else {
1113		for (i = 0; i < ps->num_ports; ++i) {
1114			/* allow sending frames to every group member */
1115			if (bridge && ps->ports[i].bridge_dev == bridge)
1116				output_ports |= BIT(i);
1117
1118			/* allow sending frames to CPU port and DSA link(s) */
1119			if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1120				output_ports |= BIT(i);
1121		}
1122	}
1123
1124	/* prevent frames from going back out of the port they came in on */
1125	output_ports &= ~BIT(port);
1126
1127	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
1128	if (reg < 0)
1129		return reg;
1130
1131	reg &= ~mask;
1132	reg |= output_ports & mask;
1133
1134	return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1135}
1136
1137int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1138{
1139	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1140	int stp_state;
1141
1142	switch (state) {
1143	case BR_STATE_DISABLED:
1144		stp_state = PORT_CONTROL_STATE_DISABLED;
1145		break;
1146	case BR_STATE_BLOCKING:
1147	case BR_STATE_LISTENING:
1148		stp_state = PORT_CONTROL_STATE_BLOCKING;
1149		break;
1150	case BR_STATE_LEARNING:
1151		stp_state = PORT_CONTROL_STATE_LEARNING;
1152		break;
1153	case BR_STATE_FORWARDING:
1154	default:
1155		stp_state = PORT_CONTROL_STATE_FORWARDING;
1156		break;
1157	}
1158
1159	/* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1160	 * so we can not update the port state directly but need to schedule it.
1161	 */
1162	ps->ports[port].state = stp_state;
1163	set_bit(port, ps->port_state_update_mask);
1164	schedule_work(&ps->bridge_work);
1165
1166	return 0;
1167}
1168
1169static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
1170				u16 *old)
1171{
1172	u16 pvid;
1173	int ret;
1174
1175	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1176	if (ret < 0)
1177		return ret;
1178
1179	pvid = ret & PORT_DEFAULT_VLAN_MASK;
1180
1181	if (new) {
1182		ret &= ~PORT_DEFAULT_VLAN_MASK;
1183		ret |= *new & PORT_DEFAULT_VLAN_MASK;
1184
1185		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1186					   PORT_DEFAULT_VLAN, ret);
1187		if (ret < 0)
1188			return ret;
1189
1190		netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
1191			   pvid);
1192	}
1193
1194	if (old)
1195		*old = pvid;
1196
1197	return 0;
1198}
1199
1200static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1201{
1202	return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
1203}
1204
1205static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1206{
1207	return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
1208}
1209
1210static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1211{
1212	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1213			       GLOBAL_VTU_OP_BUSY);
1214}
1215
1216static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1217{
1218	int ret;
1219
1220	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1221	if (ret < 0)
1222		return ret;
1223
1224	return _mv88e6xxx_vtu_wait(ds);
1225}
1226
1227static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1228{
1229	int ret;
1230
1231	ret = _mv88e6xxx_vtu_wait(ds);
1232	if (ret < 0)
1233		return ret;
1234
1235	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1236}
1237
1238static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1239					struct mv88e6xxx_vtu_stu_entry *entry,
1240					unsigned int nibble_offset)
1241{
1242	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1243	u16 regs[3];
1244	int i;
1245	int ret;
1246
1247	for (i = 0; i < 3; ++i) {
1248		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1249					  GLOBAL_VTU_DATA_0_3 + i);
1250		if (ret < 0)
1251			return ret;
1252
1253		regs[i] = ret;
1254	}
1255
1256	for (i = 0; i < ps->num_ports; ++i) {
1257		unsigned int shift = (i % 4) * 4 + nibble_offset;
1258		u16 reg = regs[i / 4];
1259
1260		entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1261	}
1262
1263	return 0;
1264}
1265
1266static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1267					 struct mv88e6xxx_vtu_stu_entry *entry,
1268					 unsigned int nibble_offset)
1269{
1270	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1271	u16 regs[3] = { 0 };
1272	int i;
1273	int ret;
1274
1275	for (i = 0; i < ps->num_ports; ++i) {
1276		unsigned int shift = (i % 4) * 4 + nibble_offset;
1277		u8 data = entry->data[i];
1278
1279		regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1280	}
1281
1282	for (i = 0; i < 3; ++i) {
1283		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1284					   GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1285		if (ret < 0)
1286			return ret;
1287	}
1288
1289	return 0;
1290}
1291
1292static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
1293{
1294	return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1295				    vid & GLOBAL_VTU_VID_MASK);
1296}
1297
1298static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
1299				  struct mv88e6xxx_vtu_stu_entry *entry)
1300{
1301	struct mv88e6xxx_vtu_stu_entry next = { 0 };
1302	int ret;
1303
1304	ret = _mv88e6xxx_vtu_wait(ds);
1305	if (ret < 0)
1306		return ret;
1307
1308	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1309	if (ret < 0)
1310		return ret;
1311
1312	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1313	if (ret < 0)
1314		return ret;
1315
1316	next.vid = ret & GLOBAL_VTU_VID_MASK;
1317	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1318
1319	if (next.valid) {
1320		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1321		if (ret < 0)
1322			return ret;
1323
1324		if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1325		    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1326			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1327						  GLOBAL_VTU_FID);
1328			if (ret < 0)
1329				return ret;
1330
1331			next.fid = ret & GLOBAL_VTU_FID_MASK;
1332
1333			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1334						  GLOBAL_VTU_SID);
1335			if (ret < 0)
1336				return ret;
1337
1338			next.sid = ret & GLOBAL_VTU_SID_MASK;
1339		}
1340	}
1341
1342	*entry = next;
1343	return 0;
1344}
1345
1346int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1347			     struct switchdev_obj_port_vlan *vlan,
1348			     int (*cb)(struct switchdev_obj *obj))
1349{
1350	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1351	struct mv88e6xxx_vtu_stu_entry next;
1352	u16 pvid;
1353	int err;
1354
1355	mutex_lock(&ps->smi_mutex);
1356
1357	err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
1358	if (err)
1359		goto unlock;
1360
1361	err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
1362	if (err)
1363		goto unlock;
1364
1365	do {
1366		err = _mv88e6xxx_vtu_getnext(ds, &next);
1367		if (err)
1368			break;
1369
1370		if (!next.valid)
1371			break;
1372
1373		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1374			continue;
1375
1376		/* reinit and dump this VLAN obj */
1377		vlan->vid_begin = vlan->vid_end = next.vid;
1378		vlan->flags = 0;
1379
1380		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1381			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1382
1383		if (next.vid == pvid)
1384			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1385
1386		err = cb(&vlan->obj);
1387		if (err)
1388			break;
1389	} while (next.vid < GLOBAL_VTU_VID_MASK);
1390
1391unlock:
1392	mutex_unlock(&ps->smi_mutex);
1393
1394	return err;
1395}
1396
1397static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1398				    struct mv88e6xxx_vtu_stu_entry *entry)
1399{
1400	u16 reg = 0;
1401	int ret;
1402
1403	ret = _mv88e6xxx_vtu_wait(ds);
1404	if (ret < 0)
1405		return ret;
1406
1407	if (!entry->valid)
1408		goto loadpurge;
1409
1410	/* Write port member tags */
1411	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1412	if (ret < 0)
1413		return ret;
1414
1415	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1416	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1417		reg = entry->sid & GLOBAL_VTU_SID_MASK;
1418		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1419		if (ret < 0)
1420			return ret;
1421
1422		reg = entry->fid & GLOBAL_VTU_FID_MASK;
1423		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1424		if (ret < 0)
1425			return ret;
1426	}
1427
1428	reg = GLOBAL_VTU_VID_VALID;
1429loadpurge:
1430	reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1431	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1432	if (ret < 0)
1433		return ret;
1434
1435	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1436}
1437
1438static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1439				  struct mv88e6xxx_vtu_stu_entry *entry)
1440{
1441	struct mv88e6xxx_vtu_stu_entry next = { 0 };
1442	int ret;
1443
1444	ret = _mv88e6xxx_vtu_wait(ds);
1445	if (ret < 0)
1446		return ret;
1447
1448	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1449				   sid & GLOBAL_VTU_SID_MASK);
1450	if (ret < 0)
1451		return ret;
1452
1453	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1454	if (ret < 0)
1455		return ret;
1456
1457	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1458	if (ret < 0)
1459		return ret;
1460
1461	next.sid = ret & GLOBAL_VTU_SID_MASK;
1462
1463	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1464	if (ret < 0)
1465		return ret;
1466
1467	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1468
1469	if (next.valid) {
1470		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1471		if (ret < 0)
1472			return ret;
1473	}
1474
1475	*entry = next;
1476	return 0;
1477}
1478
1479static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1480				    struct mv88e6xxx_vtu_stu_entry *entry)
1481{
1482	u16 reg = 0;
1483	int ret;
1484
1485	ret = _mv88e6xxx_vtu_wait(ds);
1486	if (ret < 0)
1487		return ret;
1488
1489	if (!entry->valid)
1490		goto loadpurge;
1491
1492	/* Write port states */
1493	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1494	if (ret < 0)
1495		return ret;
1496
1497	reg = GLOBAL_VTU_VID_VALID;
1498loadpurge:
1499	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1500	if (ret < 0)
1501		return ret;
1502
1503	reg = entry->sid & GLOBAL_VTU_SID_MASK;
1504	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1505	if (ret < 0)
1506		return ret;
1507
1508	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1509}
1510
1511static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
1512			       u16 *old)
1513{
1514	u16 fid;
1515	int ret;
1516
1517	/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1518	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
1519	if (ret < 0)
1520		return ret;
1521
1522	fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
1523
1524	if (new) {
1525		ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1526		ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1527
1528		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
1529					   ret);
1530		if (ret < 0)
1531			return ret;
1532	}
1533
1534	/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1535	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
1536	if (ret < 0)
1537		return ret;
1538
1539	fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
1540
1541	if (new) {
1542		ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
1543		ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
1544
1545		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
1546					   ret);
1547		if (ret < 0)
1548			return ret;
1549
1550		netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
1551	}
1552
1553	if (old)
1554		*old = fid;
1555
1556	return 0;
1557}
1558
1559static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
1560{
1561	return _mv88e6xxx_port_fid(ds, port, NULL, fid);
1562}
1563
1564static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
1565{
1566	return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
1567}
1568
1569static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
1570{
1571	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1572	DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1573	struct mv88e6xxx_vtu_stu_entry vlan;
1574	int i, err;
1575
1576	bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1577
1578	/* Set every FID bit used by the (un)bridged ports */
1579	for (i = 0; i < ps->num_ports; ++i) {
1580		err = _mv88e6xxx_port_fid_get(ds, i, fid);
1581		if (err)
1582			return err;
1583
1584		set_bit(*fid, fid_bitmap);
1585	}
1586
1587	/* Set every FID bit used by the VLAN entries */
1588	err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
1589	if (err)
1590		return err;
1591
1592	do {
1593		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1594		if (err)
1595			return err;
1596
1597		if (!vlan.valid)
1598			break;
1599
1600		set_bit(vlan.fid, fid_bitmap);
1601	} while (vlan.vid < GLOBAL_VTU_VID_MASK);
1602
1603	/* The reset value 0x000 is used to indicate that multiple address
1604	 * databases are not needed. Return the next positive available.
1605	 */
1606	*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1607	if (unlikely(*fid == MV88E6XXX_N_FID))
1608		return -ENOSPC;
1609
1610	/* Clear the database */
1611	return _mv88e6xxx_atu_flush(ds, *fid, true);
1612}
1613
1614static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
1615			      struct mv88e6xxx_vtu_stu_entry *entry)
1616{
1617	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1618	struct mv88e6xxx_vtu_stu_entry vlan = {
1619		.valid = true,
1620		.vid = vid,
1621	};
1622	int i, err;
1623
1624	err = _mv88e6xxx_fid_new(ds, &vlan.fid);
1625	if (err)
1626		return err;
1627
1628	/* exclude all ports except the CPU and DSA ports */
1629	for (i = 0; i < ps->num_ports; ++i)
1630		vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1631			? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1632			: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1633
1634	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1635	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1636		struct mv88e6xxx_vtu_stu_entry vstp;
1637
1638		/* Adding a VTU entry requires a valid STU entry. As VSTP is not
1639		 * implemented, only one STU entry is needed to cover all VTU
1640		 * entries. Thus, validate the SID 0.
1641		 */
1642		vlan.sid = 0;
1643		err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1644		if (err)
1645			return err;
1646
1647		if (vstp.sid != vlan.sid || !vstp.valid) {
1648			memset(&vstp, 0, sizeof(vstp));
1649			vstp.valid = true;
1650			vstp.sid = vlan.sid;
1651
1652			err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1653			if (err)
1654				return err;
1655		}
1656	}
1657
1658	*entry = vlan;
1659	return 0;
1660}
1661
1662static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
1663			      struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1664{
1665	int err;
1666
1667	if (!vid)
1668		return -EINVAL;
1669
1670	err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
1671	if (err)
1672		return err;
1673
1674	err = _mv88e6xxx_vtu_getnext(ds, entry);
1675	if (err)
1676		return err;
1677
1678	if (entry->vid != vid || !entry->valid) {
1679		if (!creat)
1680			return -EOPNOTSUPP;
1681		/* -ENOENT would've been more appropriate, but switchdev expects
1682		 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1683		 */
1684
1685		err = _mv88e6xxx_vtu_new(ds, vid, entry);
1686	}
1687
1688	return err;
1689}
1690
1691static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1692					u16 vid_begin, u16 vid_end)
1693{
1694	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1695	struct mv88e6xxx_vtu_stu_entry vlan;
1696	int i, err;
1697
1698	if (!vid_begin)
1699		return -EOPNOTSUPP;
1700
1701	mutex_lock(&ps->smi_mutex);
1702
1703	err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
1704	if (err)
1705		goto unlock;
1706
1707	do {
1708		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
1709		if (err)
1710			goto unlock;
1711
1712		if (!vlan.valid)
1713			break;
1714
1715		if (vlan.vid > vid_end)
1716			break;
1717
1718		for (i = 0; i < ps->num_ports; ++i) {
1719			if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
1720				continue;
1721
1722			if (vlan.data[i] ==
1723			    GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1724				continue;
1725
1726			if (ps->ports[i].bridge_dev ==
1727			    ps->ports[port].bridge_dev)
1728				break; /* same bridge, check next VLAN */
1729
1730			netdev_warn(ds->ports[port],
1731				    "hardware VLAN %d already used by %s\n",
1732				    vlan.vid,
1733				    netdev_name(ps->ports[i].bridge_dev));
1734			err = -EOPNOTSUPP;
1735			goto unlock;
1736		}
1737	} while (vlan.vid < vid_end);
1738
1739unlock:
1740	mutex_unlock(&ps->smi_mutex);
1741
1742	return err;
1743}
1744
1745static const char * const mv88e6xxx_port_8021q_mode_names[] = {
1746	[PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
1747	[PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
1748	[PORT_CONTROL_2_8021Q_CHECK] = "Check",
1749	[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
1750};
1751
1752int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
1753				  bool vlan_filtering)
1754{
1755	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1756	u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
1757		PORT_CONTROL_2_8021Q_DISABLED;
1758	int ret;
1759
1760	mutex_lock(&ps->smi_mutex);
1761
1762	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
1763	if (ret < 0)
1764		goto unlock;
1765
1766	old = ret & PORT_CONTROL_2_8021Q_MASK;
1767
1768	if (new != old) {
1769		ret &= ~PORT_CONTROL_2_8021Q_MASK;
1770		ret |= new & PORT_CONTROL_2_8021Q_MASK;
1771
1772		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
1773					   ret);
1774		if (ret < 0)
1775			goto unlock;
1776
1777		netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
1778			   mv88e6xxx_port_8021q_mode_names[new],
1779			   mv88e6xxx_port_8021q_mode_names[old]);
1780	}
1781
1782	ret = 0;
1783unlock:
1784	mutex_unlock(&ps->smi_mutex);
1785
1786	return ret;
1787}
1788
1789int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
1790				const struct switchdev_obj_port_vlan *vlan,
1791				struct switchdev_trans *trans)
1792{
1793	int err;
1794
1795	/* If the requested port doesn't belong to the same bridge as the VLAN
1796	 * members, do not support it (yet) and fallback to software VLAN.
1797	 */
1798	err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
1799					   vlan->vid_end);
1800	if (err)
1801		return err;
1802
1803	/* We don't need any dynamic resource from the kernel (yet),
1804	 * so skip the prepare phase.
1805	 */
1806	return 0;
1807}
1808
1809static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1810				    bool untagged)
1811{
1812	struct mv88e6xxx_vtu_stu_entry vlan;
1813	int err;
1814
1815	err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
1816	if (err)
1817		return err;
1818
1819	vlan.data[port] = untagged ?
1820		GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1821		GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1822
1823	return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1824}
1825
1826int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
1827			    const struct switchdev_obj_port_vlan *vlan,
1828			    struct switchdev_trans *trans)
1829{
1830	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1831	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1832	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1833	u16 vid;
1834	int err = 0;
1835
1836	mutex_lock(&ps->smi_mutex);
1837
1838	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1839		err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
1840		if (err)
1841			goto unlock;
1842	}
1843
1844	/* no PVID with ranges, otherwise it's a bug */
1845	if (pvid)
1846		err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
1847unlock:
1848	mutex_unlock(&ps->smi_mutex);
1849
1850	return err;
1851}
1852
1853static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1854{
1855	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1856	struct mv88e6xxx_vtu_stu_entry vlan;
1857	int i, err;
1858
1859	err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
1860	if (err)
1861		return err;
1862
1863	/* Tell switchdev if this VLAN is handled in software */
1864	if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1865		return -EOPNOTSUPP;
1866
1867	vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1868
1869	/* keep the VLAN unless all ports are excluded */
1870	vlan.valid = false;
1871	for (i = 0; i < ps->num_ports; ++i) {
1872		if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1873			continue;
1874
1875		if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1876			vlan.valid = true;
1877			break;
1878		}
1879	}
1880
1881	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1882	if (err)
1883		return err;
1884
1885	return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
1886}
1887
1888int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1889			    const struct switchdev_obj_port_vlan *vlan)
1890{
1891	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1892	u16 pvid, vid;
1893	int err = 0;
1894
1895	mutex_lock(&ps->smi_mutex);
1896
1897	err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
1898	if (err)
1899		goto unlock;
1900
1901	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1902		err = _mv88e6xxx_port_vlan_del(ds, port, vid);
1903		if (err)
1904			goto unlock;
1905
1906		if (vid == pvid) {
1907			err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1908			if (err)
1909				goto unlock;
1910		}
1911	}
1912
1913unlock:
1914	mutex_unlock(&ps->smi_mutex);
1915
1916	return err;
1917}
1918
1919static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1920				    const unsigned char *addr)
1921{
1922	int i, ret;
1923
1924	for (i = 0; i < 3; i++) {
1925		ret = _mv88e6xxx_reg_write(
1926			ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1927			(addr[i * 2] << 8) | addr[i * 2 + 1]);
1928		if (ret < 0)
1929			return ret;
1930	}
1931
1932	return 0;
1933}
1934
1935static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
1936{
1937	int i, ret;
1938
1939	for (i = 0; i < 3; i++) {
1940		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1941					  GLOBAL_ATU_MAC_01 + i);
1942		if (ret < 0)
1943			return ret;
1944		addr[i * 2] = ret >> 8;
1945		addr[i * 2 + 1] = ret & 0xff;
1946	}
1947
1948	return 0;
1949}
1950
1951static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1952			       struct mv88e6xxx_atu_entry *entry)
1953{
1954	int ret;
1955
1956	ret = _mv88e6xxx_atu_wait(ds);
1957	if (ret < 0)
1958		return ret;
1959
1960	ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
1961	if (ret < 0)
1962		return ret;
1963
1964	ret = _mv88e6xxx_atu_data_write(ds, entry);
1965	if (ret < 0)
1966		return ret;
1967
1968	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1969	if (ret < 0)
1970		return ret;
1971
1972	return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
1973}
1974
1975static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1976				    const unsigned char *addr, u16 vid,
1977				    u8 state)
1978{
1979	struct mv88e6xxx_atu_entry entry = { 0 };
1980	struct mv88e6xxx_vtu_stu_entry vlan;
1981	int err;
1982
1983	/* Null VLAN ID corresponds to the port private database */
1984	if (vid == 0)
1985		err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
1986	else
1987		err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
1988	if (err)
1989		return err;
1990
1991	entry.fid = vlan.fid;
1992	entry.state = state;
1993	ether_addr_copy(entry.mac, addr);
1994	if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1995		entry.trunk = false;
1996		entry.portv_trunkid = BIT(port);
1997	}
1998
1999	return _mv88e6xxx_atu_load(ds, &entry);
2000}
2001
2002int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2003			       const struct switchdev_obj_port_fdb *fdb,
2004			       struct switchdev_trans *trans)
2005{
2006	/* We don't need any dynamic resource from the kernel (yet),
2007	 * so skip the prepare phase.
2008	 */
2009	return 0;
2010}
2011
2012int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2013			   const struct switchdev_obj_port_fdb *fdb,
2014			   struct switchdev_trans *trans)
2015{
2016	int state = is_multicast_ether_addr(fdb->addr) ?
2017		GLOBAL_ATU_DATA_STATE_MC_STATIC :
2018		GLOBAL_ATU_DATA_STATE_UC_STATIC;
2019	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2020	int ret;
2021
2022	mutex_lock(&ps->smi_mutex);
2023	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
2024	mutex_unlock(&ps->smi_mutex);
2025
2026	return ret;
2027}
2028
2029int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2030			   const struct switchdev_obj_port_fdb *fdb)
2031{
2032	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2033	int ret;
2034
2035	mutex_lock(&ps->smi_mutex);
2036	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
2037				       GLOBAL_ATU_DATA_STATE_UNUSED);
2038	mutex_unlock(&ps->smi_mutex);
2039
2040	return ret;
2041}
2042
2043static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
2044				  struct mv88e6xxx_atu_entry *entry)
2045{
2046	struct mv88e6xxx_atu_entry next = { 0 };
2047	int ret;
2048
2049	next.fid = fid;
2050
2051	ret = _mv88e6xxx_atu_wait(ds);
2052	if (ret < 0)
2053		return ret;
2054
2055	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
2056	if (ret < 0)
2057		return ret;
2058
2059	ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
2060	if (ret < 0)
2061		return ret;
2062
2063	ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
2064	if (ret < 0)
2065		return ret;
2066
2067	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2068	if (ret < 0)
2069		return ret;
2070
2071	next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
2072	if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2073		unsigned int mask, shift;
2074
2075		if (ret & GLOBAL_ATU_DATA_TRUNK) {
2076			next.trunk = true;
2077			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2078			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2079		} else {
2080			next.trunk = false;
2081			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2082			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2083		}
2084
2085		next.portv_trunkid = (ret & mask) >> shift;
2086	}
2087
2088	*entry = next;
2089	return 0;
2090}
2091
2092static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
2093					int port,
2094					struct switchdev_obj_port_fdb *fdb,
2095					int (*cb)(struct switchdev_obj *obj))
2096{
2097	struct mv88e6xxx_atu_entry addr = {
2098		.mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2099	};
2100	int err;
2101
2102	err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
2103	if (err)
2104		return err;
2105
2106	do {
2107		err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
2108		if (err)
2109			break;
2110
2111		if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2112			break;
2113
2114		if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
2115			bool is_static = addr.state ==
2116				(is_multicast_ether_addr(addr.mac) ?
2117				 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2118				 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2119
2120			fdb->vid = vid;
2121			ether_addr_copy(fdb->addr, addr.mac);
2122			fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
2123
2124			err = cb(&fdb->obj);
2125			if (err)
2126				break;
2127		}
2128	} while (!is_broadcast_ether_addr(addr.mac));
2129
2130	return err;
2131}
2132
2133int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2134			    struct switchdev_obj_port_fdb *fdb,
2135			    int (*cb)(struct switchdev_obj *obj))
2136{
2137	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2138	struct mv88e6xxx_vtu_stu_entry vlan = {
2139		.vid = GLOBAL_VTU_VID_MASK, /* all ones */
2140	};
2141	u16 fid;
2142	int err;
2143
2144	mutex_lock(&ps->smi_mutex);
2145
2146	/* Dump port's default Filtering Information Database (VLAN ID 0) */
2147	err = _mv88e6xxx_port_fid_get(ds, port, &fid);
2148	if (err)
2149		goto unlock;
2150
2151	err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
2152	if (err)
2153		goto unlock;
2154
2155	/* Dump VLANs' Filtering Information Databases */
2156	err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
2157	if (err)
2158		goto unlock;
2159
2160	do {
2161		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
2162		if (err)
2163			break;
2164
2165		if (!vlan.valid)
2166			break;
2167
2168		err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
2169						   fdb, cb);
2170		if (err)
2171			break;
2172	} while (vlan.vid < GLOBAL_VTU_VID_MASK);
2173
2174unlock:
2175	mutex_unlock(&ps->smi_mutex);
2176
2177	return err;
2178}
2179
2180int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2181			       struct net_device *bridge)
2182{
2183	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2184	int i, err = 0;
2185
2186	mutex_lock(&ps->smi_mutex);
2187
2188	/* Assign the bridge and remap each port's VLANTable */
2189	ps->ports[port].bridge_dev = bridge;
2190
2191	for (i = 0; i < ps->num_ports; ++i) {
2192		if (ps->ports[i].bridge_dev == bridge) {
2193			err = _mv88e6xxx_port_based_vlan_map(ds, i);
2194			if (err)
2195				break;
2196		}
2197	}
2198
2199	mutex_unlock(&ps->smi_mutex);
2200
2201	return err;
2202}
2203
2204void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2205{
2206	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2207	struct net_device *bridge = ps->ports[port].bridge_dev;
2208	int i;
2209
2210	mutex_lock(&ps->smi_mutex);
2211
2212	/* Unassign the bridge and remap each port's VLANTable */
2213	ps->ports[port].bridge_dev = NULL;
2214
2215	for (i = 0; i < ps->num_ports; ++i)
2216		if (i == port || ps->ports[i].bridge_dev == bridge)
2217			if (_mv88e6xxx_port_based_vlan_map(ds, i))
2218				netdev_warn(ds->ports[i], "failed to remap\n");
2219
2220	mutex_unlock(&ps->smi_mutex);
2221}
2222
2223static void mv88e6xxx_bridge_work(struct work_struct *work)
2224{
2225	struct mv88e6xxx_priv_state *ps;
2226	struct dsa_switch *ds;
2227	int port;
2228
2229	ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2230	ds = ((struct dsa_switch *)ps) - 1;
2231
2232	mutex_lock(&ps->smi_mutex);
2233
2234	for (port = 0; port < ps->num_ports; ++port)
2235		if (test_and_clear_bit(port, ps->port_state_update_mask) &&
2236		    _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
2237			netdev_warn(ds->ports[port], "failed to update state to %s\n",
2238				    mv88e6xxx_port_state_names[ps->ports[port].state]);
2239
2240	mutex_unlock(&ps->smi_mutex);
2241}
2242
2243static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2244				     int reg, int val)
2245{
2246	int ret;
2247
2248	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2249	if (ret < 0)
2250		goto restore_page_0;
2251
2252	ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2253restore_page_0:
2254	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2255
2256	return ret;
2257}
2258
2259static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
2260				    int reg)
2261{
2262	int ret;
2263
2264	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2265	if (ret < 0)
2266		goto restore_page_0;
2267
2268	ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2269restore_page_0:
2270	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2271
2272	return ret;
2273}
2274
2275static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
2276{
2277	int ret;
2278
2279	ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2280				       MII_BMCR);
2281	if (ret < 0)
2282		return ret;
2283
2284	if (ret & BMCR_PDOWN) {
2285		ret &= ~BMCR_PDOWN;
2286		ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
2287						PAGE_FIBER_SERDES, MII_BMCR,
2288						ret);
2289	}
2290
2291	return ret;
2292}
2293
2294static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2295{
2296	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2297	int ret;
2298	u16 reg;
2299
2300	mutex_lock(&ps->smi_mutex);
2301
2302	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2303	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2304	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2305	    mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
2306		/* MAC Forcing register: don't force link, speed,
2307		 * duplex or flow control state to any particular
2308		 * values on physical ports, but force the CPU port
2309		 * and all DSA ports to their maximum bandwidth and
2310		 * full duplex.
2311		 */
2312		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
2313		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2314			reg &= ~PORT_PCS_CTRL_UNFORCED;
2315			reg |= PORT_PCS_CTRL_FORCE_LINK |
2316				PORT_PCS_CTRL_LINK_UP |
2317				PORT_PCS_CTRL_DUPLEX_FULL |
2318				PORT_PCS_CTRL_FORCE_DUPLEX;
2319			if (mv88e6xxx_6065_family(ds))
2320				reg |= PORT_PCS_CTRL_100;
2321			else
2322				reg |= PORT_PCS_CTRL_1000;
2323		} else {
2324			reg |= PORT_PCS_CTRL_UNFORCED;
2325		}
2326
2327		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2328					   PORT_PCS_CTRL, reg);
2329		if (ret)
2330			goto abort;
2331	}
2332
2333	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2334	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2335	 * tunneling, determine priority by looking at 802.1p and IP
2336	 * priority fields (IP prio has precedence), and set STP state
2337	 * to Forwarding.
2338	 *
2339	 * If this is the CPU link, use DSA or EDSA tagging depending
2340	 * on which tagging mode was configured.
2341	 *
2342	 * If this is a link to another switch, use DSA tagging mode.
2343	 *
2344	 * If this is the upstream port for this switch, enable
2345	 * forwarding of unknown unicasts and multicasts.
2346	 */
2347	reg = 0;
2348	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2349	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2350	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
2351	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
2352		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2353		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2354		PORT_CONTROL_STATE_FORWARDING;
2355	if (dsa_is_cpu_port(ds, port)) {
2356		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2357			reg |= PORT_CONTROL_DSA_TAG;
2358		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2359		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2360		    mv88e6xxx_6320_family(ds)) {
2361			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2362				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2363			else
2364				reg |= PORT_CONTROL_FRAME_MODE_DSA;
2365			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2366				PORT_CONTROL_FORWARD_UNKNOWN_MC;
2367		}
2368
2369		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2370		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2371		    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
2372		    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
2373			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2374				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2375		}
2376	}
2377	if (dsa_is_dsa_port(ds, port)) {
2378		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2379			reg |= PORT_CONTROL_DSA_TAG;
2380		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2381		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2382		    mv88e6xxx_6320_family(ds)) {
2383			reg |= PORT_CONTROL_FRAME_MODE_DSA;
2384		}
2385
2386		if (port == dsa_upstream_port(ds))
2387			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2388				PORT_CONTROL_FORWARD_UNKNOWN_MC;
2389	}
2390	if (reg) {
2391		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2392					   PORT_CONTROL, reg);
2393		if (ret)
2394			goto abort;
2395	}
2396
2397	/* If this port is connected to a SerDes, make sure the SerDes is not
2398	 * powered down.
2399	 */
2400	if (mv88e6xxx_6352_family(ds)) {
2401		ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
2402		if (ret < 0)
2403			goto abort;
2404		ret &= PORT_STATUS_CMODE_MASK;
2405		if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2406		    (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2407		    (ret == PORT_STATUS_CMODE_SGMII)) {
2408			ret = mv88e6xxx_power_on_serdes(ds);
2409			if (ret < 0)
2410				goto abort;
2411		}
2412	}
2413
2414	/* Port Control 2: don't force a good FCS, set the maximum frame size to
2415	 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2416	 * untagged frames on this port, do a destination address lookup on all
2417	 * received packets as usual, disable ARP mirroring and don't send a
2418	 * copy of all transmitted/received frames on this port to the CPU.
2419	 */
2420	reg = 0;
2421	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2422	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2423	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
2424		reg = PORT_CONTROL_2_MAP_DA;
2425
2426	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2427	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
2428		reg |= PORT_CONTROL_2_JUMBO_10240;
2429
2430	if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2431		/* Set the upstream port this port should use */
2432		reg |= dsa_upstream_port(ds);
2433		/* enable forwarding of unknown multicast addresses to
2434		 * the upstream port
2435		 */
2436		if (port == dsa_upstream_port(ds))
2437			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2438	}
2439
2440	reg |= PORT_CONTROL_2_8021Q_DISABLED;
2441
2442	if (reg) {
2443		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2444					   PORT_CONTROL_2, reg);
2445		if (ret)
2446			goto abort;
2447	}
2448
2449	/* Port Association Vector: when learning source addresses
2450	 * of packets, add the address to the address database using
2451	 * a port bitmap that has only the bit for this port set and
2452	 * the other bits clear.
2453	 */
2454	reg = 1 << port;
2455	/* Disable learning for CPU port */
2456	if (dsa_is_cpu_port(ds, port))
2457		reg = 0;
2458
2459	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2460	if (ret)
2461		goto abort;
2462
2463	/* Egress rate control 2: disable egress rate control. */
2464	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2465				   0x0000);
2466	if (ret)
2467		goto abort;
2468
2469	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2470	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2471	    mv88e6xxx_6320_family(ds)) {
2472		/* Do not limit the period of time that this port can
2473		 * be paused for by the remote end or the period of
2474		 * time that this port can pause the remote end.
2475		 */
2476		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2477					   PORT_PAUSE_CTRL, 0x0000);
2478		if (ret)
2479			goto abort;
2480
2481		/* Port ATU control: disable limiting the number of
2482		 * address database entries that this port is allowed
2483		 * to use.
2484		 */
2485		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2486					   PORT_ATU_CONTROL, 0x0000);
2487		/* Priority Override: disable DA, SA and VTU priority
2488		 * override.
2489		 */
2490		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2491					   PORT_PRI_OVERRIDE, 0x0000);
2492		if (ret)
2493			goto abort;
2494
2495		/* Port Ethertype: use the Ethertype DSA Ethertype
2496		 * value.
2497		 */
2498		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2499					   PORT_ETH_TYPE, ETH_P_EDSA);
2500		if (ret)
2501			goto abort;
2502		/* Tag Remap: use an identity 802.1p prio -> switch
2503		 * prio mapping.
2504		 */
2505		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2506					   PORT_TAG_REGMAP_0123, 0x3210);
2507		if (ret)
2508			goto abort;
2509
2510		/* Tag Remap 2: use an identity 802.1p prio -> switch
2511		 * prio mapping.
2512		 */
2513		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2514					   PORT_TAG_REGMAP_4567, 0x7654);
2515		if (ret)
2516			goto abort;
2517	}
2518
2519	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2520	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2521	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2522	    mv88e6xxx_6320_family(ds)) {
2523		/* Rate Control: disable ingress rate limiting. */
2524		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2525					   PORT_RATE_CONTROL, 0x0001);
2526		if (ret)
2527			goto abort;
2528	}
2529
2530	/* Port Control 1: disable trunking, disable sending
2531	 * learning messages to this port.
2532	 */
2533	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2534	if (ret)
2535		goto abort;
2536
2537	/* Port based VLAN map: give each port the same default address
2538	 * database, and allow bidirectional communication between the
2539	 * CPU and DSA port(s), and the other ports.
2540	 */
2541	ret = _mv88e6xxx_port_fid_set(ds, port, 0);
2542	if (ret)
2543		goto abort;
2544
2545	ret = _mv88e6xxx_port_based_vlan_map(ds, port);
2546	if (ret)
2547		goto abort;
2548
2549	/* Default VLAN ID and priority: don't set a default VLAN
2550	 * ID, and set the default packet priority to zero.
2551	 */
2552	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2553				   0x0000);
2554abort:
2555	mutex_unlock(&ps->smi_mutex);
2556	return ret;
2557}
2558
2559int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2560{
2561	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2562	int ret;
2563	int i;
2564
2565	for (i = 0; i < ps->num_ports; i++) {
2566		ret = mv88e6xxx_setup_port(ds, i);
2567		if (ret < 0)
2568			return ret;
2569	}
2570	return 0;
2571}
2572
2573int mv88e6xxx_setup_common(struct dsa_switch *ds)
2574{
2575	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2576
2577	mutex_init(&ps->smi_mutex);
2578
2579	ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
2580
2581	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2582
2583	return 0;
2584}
2585
2586int mv88e6xxx_setup_global(struct dsa_switch *ds)
2587{
2588	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2589	int ret;
2590	int i;
2591
2592	/* Set the default address aging time to 5 minutes, and
2593	 * enable address learn messages to be sent to all message
2594	 * ports.
2595	 */
2596	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2597		  0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2598
2599	/* Configure the IP ToS mapping registers. */
2600	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2601	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2602	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2603	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2604	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2605	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2606	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2607	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2608
2609	/* Configure the IEEE 802.1p priority mapping register. */
2610	REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2611
2612	/* Send all frames with destination addresses matching
2613	 * 01:80:c2:00:00:0x to the CPU port.
2614	 */
2615	REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2616
2617	/* Ignore removed tag data on doubly tagged packets, disable
2618	 * flow control messages, force flow control priority to the
2619	 * highest, and send all special multicast frames to the CPU
2620	 * port at the highest priority.
2621	 */
2622	REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2623		  0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2624		  GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2625
2626	/* Program the DSA routing table. */
2627	for (i = 0; i < 32; i++) {
2628		int nexthop = 0x1f;
2629
2630		if (ds->pd->rtable &&
2631		    i != ds->index && i < ds->dst->pd->nr_chips)
2632			nexthop = ds->pd->rtable[i] & 0x1f;
2633
2634		REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2635			  GLOBAL2_DEVICE_MAPPING_UPDATE |
2636			  (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2637			  nexthop);
2638	}
2639
2640	/* Clear all trunk masks. */
2641	for (i = 0; i < 8; i++)
2642		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2643			  0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2644			  ((1 << ps->num_ports) - 1));
2645
2646	/* Clear all trunk mappings. */
2647	for (i = 0; i < 16; i++)
2648		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2649			  GLOBAL2_TRUNK_MAPPING_UPDATE |
2650			  (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2651
2652	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2653	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2654	    mv88e6xxx_6320_family(ds)) {
2655		/* Send all frames with destination addresses matching
2656		 * 01:80:c2:00:00:2x to the CPU port.
2657		 */
2658		REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2659
2660		/* Initialise cross-chip port VLAN table to reset
2661		 * defaults.
2662		 */
2663		REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2664
2665		/* Clear the priority override table. */
2666		for (i = 0; i < 16; i++)
2667			REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2668				  0x8000 | (i << 8));
2669	}
2670
2671	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2672	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2673	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2674	    mv88e6xxx_6320_family(ds)) {
2675		/* Disable ingress rate limiting by resetting all
2676		 * ingress rate limit registers to their initial
2677		 * state.
2678		 */
2679		for (i = 0; i < ps->num_ports; i++)
2680			REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2681				  0x9000 | (i << 8));
2682	}
2683
2684	/* Clear the statistics counters for all ports */
2685	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2686
2687	/* Wait for the flush to complete. */
2688	mutex_lock(&ps->smi_mutex);
2689	ret = _mv88e6xxx_stats_wait(ds);
2690	if (ret < 0)
2691		goto unlock;
2692
2693	/* Clear all ATU entries */
2694	ret = _mv88e6xxx_atu_flush(ds, 0, true);
2695	if (ret < 0)
2696		goto unlock;
2697
2698	/* Clear all the VTU and STU entries */
2699	ret = _mv88e6xxx_vtu_stu_flush(ds);
2700unlock:
2701	mutex_unlock(&ps->smi_mutex);
2702
2703	return ret;
2704}
2705
2706int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2707{
2708	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2709	u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2710	struct gpio_desc *gpiod = ds->pd->reset;
2711	unsigned long timeout;
2712	int ret;
2713	int i;
2714
2715	/* Set all ports to the disabled state. */
2716	for (i = 0; i < ps->num_ports; i++) {
2717		ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2718		REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
2719	}
2720
2721	/* Wait for transmit queues to drain. */
2722	usleep_range(2000, 4000);
2723
2724	/* If there is a gpio connected to the reset pin, toggle it */
2725	if (gpiod) {
2726		gpiod_set_value_cansleep(gpiod, 1);
2727		usleep_range(10000, 20000);
2728		gpiod_set_value_cansleep(gpiod, 0);
2729		usleep_range(10000, 20000);
2730	}
2731
2732	/* Reset the switch. Keep the PPU active if requested. The PPU
2733	 * needs to be active to support indirect phy register access
2734	 * through global registers 0x18 and 0x19.
2735	 */
2736	if (ppu_active)
2737		REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2738	else
2739		REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2740
2741	/* Wait up to one second for reset to complete. */
2742	timeout = jiffies + 1 * HZ;
2743	while (time_before(jiffies, timeout)) {
2744		ret = REG_READ(REG_GLOBAL, 0x00);
2745		if ((ret & is_reset) == is_reset)
2746			break;
2747		usleep_range(1000, 2000);
2748	}
2749	if (time_after(jiffies, timeout))
2750		return -ETIMEDOUT;
2751
2752	return 0;
2753}
2754
2755int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2756{
2757	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2758	int ret;
2759
2760	mutex_lock(&ps->smi_mutex);
2761	ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
2762	mutex_unlock(&ps->smi_mutex);
2763
2764	return ret;
2765}
2766
2767int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2768			     int reg, int val)
2769{
2770	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2771	int ret;
2772
2773	mutex_lock(&ps->smi_mutex);
2774	ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
2775	mutex_unlock(&ps->smi_mutex);
2776
2777	return ret;
2778}
2779
2780static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2781{
2782	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2783
2784	if (port >= 0 && port < ps->num_ports)
2785		return port;
2786	return -EINVAL;
2787}
2788
2789int
2790mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2791{
2792	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2793	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2794	int ret;
2795
2796	if (addr < 0)
2797		return addr;
2798
2799	mutex_lock(&ps->smi_mutex);
2800	ret = _mv88e6xxx_phy_read(ds, addr, regnum);
2801	mutex_unlock(&ps->smi_mutex);
2802	return ret;
2803}
2804
2805int
2806mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2807{
2808	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2809	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2810	int ret;
2811
2812	if (addr < 0)
2813		return addr;
2814
2815	mutex_lock(&ps->smi_mutex);
2816	ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
2817	mutex_unlock(&ps->smi_mutex);
2818	return ret;
2819}
2820
2821int
2822mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2823{
2824	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2825	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2826	int ret;
2827
2828	if (addr < 0)
2829		return addr;
2830
2831	mutex_lock(&ps->smi_mutex);
2832	ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
2833	mutex_unlock(&ps->smi_mutex);
2834	return ret;
2835}
2836
2837int
2838mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2839			     u16 val)
2840{
2841	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2842	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2843	int ret;
2844
2845	if (addr < 0)
2846		return addr;
2847
2848	mutex_lock(&ps->smi_mutex);
2849	ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
2850	mutex_unlock(&ps->smi_mutex);
2851	return ret;
2852}
2853
2854#ifdef CONFIG_NET_DSA_HWMON
2855
2856static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2857{
2858	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2859	int ret;
2860	int val;
2861
2862	*temp = 0;
2863
2864	mutex_lock(&ps->smi_mutex);
2865
2866	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2867	if (ret < 0)
2868		goto error;
2869
2870	/* Enable temperature sensor */
2871	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2872	if (ret < 0)
2873		goto error;
2874
2875	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2876	if (ret < 0)
2877		goto error;
2878
2879	/* Wait for temperature to stabilize */
2880	usleep_range(10000, 12000);
2881
2882	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2883	if (val < 0) {
2884		ret = val;
2885		goto error;
2886	}
2887
2888	/* Disable temperature sensor */
2889	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2890	if (ret < 0)
2891		goto error;
2892
2893	*temp = ((val & 0x1f) - 5) * 5;
2894
2895error:
2896	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2897	mutex_unlock(&ps->smi_mutex);
2898	return ret;
2899}
2900
2901static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2902{
2903	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2904	int ret;
2905
2906	*temp = 0;
2907
2908	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2909	if (ret < 0)
2910		return ret;
2911
2912	*temp = (ret & 0xff) - 25;
2913
2914	return 0;
2915}
2916
2917int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2918{
2919	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2920		return mv88e63xx_get_temp(ds, temp);
2921
2922	return mv88e61xx_get_temp(ds, temp);
2923}
2924
2925int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2926{
2927	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2928	int ret;
2929
2930	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2931		return -EOPNOTSUPP;
2932
2933	*temp = 0;
2934
2935	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2936	if (ret < 0)
2937		return ret;
2938
2939	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
2940
2941	return 0;
2942}
2943
2944int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2945{
2946	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2947	int ret;
2948
2949	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2950		return -EOPNOTSUPP;
2951
2952	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2953	if (ret < 0)
2954		return ret;
2955	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2956	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2957					(ret & 0xe0ff) | (temp << 8));
2958}
2959
2960int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2961{
2962	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2963	int ret;
2964
2965	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2966		return -EOPNOTSUPP;
2967
2968	*alarm = false;
2969
2970	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2971	if (ret < 0)
2972		return ret;
2973
2974	*alarm = !!(ret & 0x40);
2975
2976	return 0;
2977}
2978#endif /* CONFIG_NET_DSA_HWMON */
2979
2980char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
2981			    const struct mv88e6xxx_switch_id *table,
2982			    unsigned int num)
2983{
2984	struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
2985	int i, ret;
2986
2987	if (!bus)
2988		return NULL;
2989
2990	ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
2991	if (ret < 0)
2992		return NULL;
2993
2994	/* Look up the exact switch ID */
2995	for (i = 0; i < num; ++i)
2996		if (table[i].id == ret)
2997			return table[i].name;
2998
2999	/* Look up only the product number */
3000	for (i = 0; i < num; ++i) {
3001		if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
3002			dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
3003				 ret & PORT_SWITCH_ID_REV_MASK,
3004				 ret & PORT_SWITCH_ID_PROD_NUM_MASK);
3005			return table[i].name;
3006		}
3007	}
3008
3009	return NULL;
3010}
3011
3012static int __init mv88e6xxx_init(void)
3013{
3014#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3015	register_switch_driver(&mv88e6131_switch_driver);
3016#endif
3017#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3018	register_switch_driver(&mv88e6123_switch_driver);
3019#endif
3020#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3021	register_switch_driver(&mv88e6352_switch_driver);
3022#endif
3023#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3024	register_switch_driver(&mv88e6171_switch_driver);
3025#endif
3026	return 0;
3027}
3028module_init(mv88e6xxx_init);
3029
3030static void __exit mv88e6xxx_cleanup(void)
3031{
3032#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3033	unregister_switch_driver(&mv88e6171_switch_driver);
3034#endif
3035#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3036	unregister_switch_driver(&mv88e6352_switch_driver);
3037#endif
3038#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3039	unregister_switch_driver(&mv88e6123_switch_driver);
3040#endif
3041#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3042	unregister_switch_driver(&mv88e6131_switch_driver);
3043#endif
3044}
3045module_exit(mv88e6xxx_cleanup);
3046
3047MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3048MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3049MODULE_LICENSE("GPL");