Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2022 Schneider-Electric
   4 *
   5 * Clément Léger <clement.leger@bootlin.com>
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/etherdevice.h>
  10#include <linux/if_bridge.h>
  11#include <linux/if_ether.h>
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/of_mdio.h>
  16#include <net/dsa.h>
  17
  18#include "rzn1_a5psw.h"
  19
  20struct a5psw_stats {
  21	u16 offset;
  22	const char name[ETH_GSTRING_LEN];
  23};
  24
  25#define STAT_DESC(_offset) {	\
  26	.offset = A5PSW_##_offset,	\
  27	.name = __stringify(_offset),	\
  28}
  29
  30static const struct a5psw_stats a5psw_stats[] = {
  31	STAT_DESC(aFramesTransmittedOK),
  32	STAT_DESC(aFramesReceivedOK),
  33	STAT_DESC(aFrameCheckSequenceErrors),
  34	STAT_DESC(aAlignmentErrors),
  35	STAT_DESC(aOctetsTransmittedOK),
  36	STAT_DESC(aOctetsReceivedOK),
  37	STAT_DESC(aTxPAUSEMACCtrlFrames),
  38	STAT_DESC(aRxPAUSEMACCtrlFrames),
  39	STAT_DESC(ifInErrors),
  40	STAT_DESC(ifOutErrors),
  41	STAT_DESC(ifInUcastPkts),
  42	STAT_DESC(ifInMulticastPkts),
  43	STAT_DESC(ifInBroadcastPkts),
  44	STAT_DESC(ifOutDiscards),
  45	STAT_DESC(ifOutUcastPkts),
  46	STAT_DESC(ifOutMulticastPkts),
  47	STAT_DESC(ifOutBroadcastPkts),
  48	STAT_DESC(etherStatsDropEvents),
  49	STAT_DESC(etherStatsOctets),
  50	STAT_DESC(etherStatsPkts),
  51	STAT_DESC(etherStatsUndersizePkts),
  52	STAT_DESC(etherStatsOversizePkts),
  53	STAT_DESC(etherStatsPkts64Octets),
  54	STAT_DESC(etherStatsPkts65to127Octets),
  55	STAT_DESC(etherStatsPkts128to255Octets),
  56	STAT_DESC(etherStatsPkts256to511Octets),
  57	STAT_DESC(etherStatsPkts1024to1518Octets),
  58	STAT_DESC(etherStatsPkts1519toXOctets),
  59	STAT_DESC(etherStatsJabbers),
  60	STAT_DESC(etherStatsFragments),
  61	STAT_DESC(VLANReceived),
  62	STAT_DESC(VLANTransmitted),
  63	STAT_DESC(aDeferred),
  64	STAT_DESC(aMultipleCollisions),
  65	STAT_DESC(aSingleCollisions),
  66	STAT_DESC(aLateCollisions),
  67	STAT_DESC(aExcessiveCollisions),
  68	STAT_DESC(aCarrierSenseErrors),
  69};
  70
  71static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
  72{
  73	writel(value, a5psw->base + offset);
  74}
  75
  76static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
  77{
  78	return readl(a5psw->base + offset);
  79}
  80
  81static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
  82{
  83	u32 reg;
  84
  85	spin_lock(&a5psw->reg_lock);
  86
  87	reg = a5psw_reg_readl(a5psw, offset);
  88	reg &= ~mask;
  89	reg |= val;
  90	a5psw_reg_writel(a5psw, offset, reg);
  91
  92	spin_unlock(&a5psw->reg_lock);
  93}
  94
  95static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
  96						    int port,
  97						    enum dsa_tag_protocol mp)
  98{
  99	return DSA_TAG_PROTO_RZN1_A5PSW;
 100}
 101
 102static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
 103				   bool enable)
 104{
 105	u32 rx_match = 0;
 106
 107	if (enable)
 108		rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
 109
 110	a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
 111		      A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
 112}
 113
 114static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
 115{
 116	/* Enable "management forward" pattern matching, this will forward
 117	 * packets from this port only towards the management port and thus
 118	 * isolate the port.
 119	 */
 120	a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
 121}
 122
 123static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
 124{
 125	u32 port_ena = 0;
 126
 127	if (enable)
 128		port_ena |= A5PSW_PORT_ENA_TX_RX(port);
 129
 130	a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
 131		      port_ena);
 132}
 133
 134static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
 135{
 136	int ret;
 137
 138	a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
 139
 140	ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
 141				 !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
 142				 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
 143	if (ret)
 144		dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
 145
 146	return ret;
 147}
 148
 149static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
 150{
 151	u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
 152
 153	mutex_lock(&a5psw->lk_lock);
 154	a5psw_lk_execute_ctrl(a5psw, &ctrl);
 155	mutex_unlock(&a5psw->lk_lock);
 156}
 157
 158static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
 159				     bool authorize)
 160{
 161	u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
 162
 163	if (authorize)
 164		reg |= A5PSW_AUTH_PORT_AUTHORIZED;
 165	else
 166		reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
 167
 168	a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
 169}
 170
 171static void a5psw_port_disable(struct dsa_switch *ds, int port)
 172{
 173	struct a5psw *a5psw = ds->priv;
 174
 175	a5psw_port_authorize_set(a5psw, port, false);
 176	a5psw_port_enable_set(a5psw, port, false);
 177}
 178
 179static int a5psw_port_enable(struct dsa_switch *ds, int port,
 180			     struct phy_device *phy)
 181{
 182	struct a5psw *a5psw = ds->priv;
 183
 184	a5psw_port_authorize_set(a5psw, port, true);
 185	a5psw_port_enable_set(a5psw, port, true);
 186
 187	return 0;
 188}
 189
 190static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
 191{
 192	struct a5psw *a5psw = ds->priv;
 193
 194	new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
 195	a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
 196
 197	return 0;
 198}
 199
 200static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
 201{
 202	return A5PSW_MAX_MTU;
 203}
 204
 205static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
 206				   struct phylink_config *config)
 207{
 208	unsigned long *intf = config->supported_interfaces;
 209
 210	config->mac_capabilities = MAC_1000FD;
 211
 212	if (dsa_is_cpu_port(ds, port)) {
 213		/* GMII is used internally and GMAC2 is connected to the switch
 214		 * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
 215		 */
 216		__set_bit(PHY_INTERFACE_MODE_GMII, intf);
 217	} else {
 218		config->mac_capabilities |= MAC_100 | MAC_10;
 219		phy_interface_set_rgmii(intf);
 220		__set_bit(PHY_INTERFACE_MODE_RMII, intf);
 221		__set_bit(PHY_INTERFACE_MODE_MII, intf);
 222	}
 223}
 224
 225static struct phylink_pcs *
 226a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
 227			     phy_interface_t interface)
 228{
 229	struct dsa_port *dp = dsa_to_port(ds, port);
 230	struct a5psw *a5psw = ds->priv;
 231
 232	if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
 233		return a5psw->pcs[port];
 234
 235	return NULL;
 236}
 237
 238static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
 239					unsigned int mode,
 240					phy_interface_t interface)
 241{
 242	struct a5psw *a5psw = ds->priv;
 243	u32 cmd_cfg;
 244
 245	cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
 246	cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
 247	a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
 248}
 249
 250static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
 251				      unsigned int mode,
 252				      phy_interface_t interface,
 253				      struct phy_device *phydev, int speed,
 254				      int duplex, bool tx_pause, bool rx_pause)
 255{
 256	u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
 257		      A5PSW_CMD_CFG_TX_CRC_APPEND;
 258	struct a5psw *a5psw = ds->priv;
 259
 260	if (speed == SPEED_1000)
 261		cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
 262
 263	if (duplex == DUPLEX_HALF)
 264		cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
 265
 266	cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
 267
 268	if (!rx_pause)
 269		cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
 270
 271	a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
 272}
 273
 274static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
 275{
 276	struct a5psw *a5psw = ds->priv;
 277	unsigned long rate;
 278	u64 max, tmp;
 279	u32 agetime;
 280
 281	rate = clk_get_rate(a5psw->clk);
 282	max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
 283		       rate) * 1000;
 284	if (msecs > max)
 285		return -EINVAL;
 286
 287	tmp = div_u64(rate, MSEC_PER_SEC);
 288	agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
 289
 290	a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
 291
 292	return 0;
 293}
 294
 295static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
 296					  bool set)
 297{
 298	u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
 299			A5PSW_MCAST_DEF_MASK};
 300	int i;
 301
 302	if (set)
 303		a5psw->bridged_ports |= BIT(port);
 304	else
 305		a5psw->bridged_ports &= ~BIT(port);
 306
 307	for (i = 0; i < ARRAY_SIZE(offsets); i++)
 308		a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
 309}
 310
 311static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
 312				  struct dsa_bridge bridge,
 313				  bool *tx_fwd_offload,
 314				  struct netlink_ext_ack *extack)
 315{
 316	struct a5psw *a5psw = ds->priv;
 317
 318	/* We only support 1 bridge device */
 319	if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
 320		NL_SET_ERR_MSG_MOD(extack,
 321				   "Forwarding offload supported for a single bridge");
 322		return -EOPNOTSUPP;
 323	}
 324
 325	a5psw->br_dev = bridge.dev;
 326	a5psw_flooding_set_resolution(a5psw, port, true);
 327	a5psw_port_mgmtfwd_set(a5psw, port, false);
 328
 329	return 0;
 330}
 331
 332static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
 333				    struct dsa_bridge bridge)
 334{
 335	struct a5psw *a5psw = ds->priv;
 336
 337	a5psw_flooding_set_resolution(a5psw, port, false);
 338	a5psw_port_mgmtfwd_set(a5psw, port, true);
 339
 340	/* No more ports bridged */
 341	if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
 342		a5psw->br_dev = NULL;
 343}
 344
 345static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 346{
 347	u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
 348	struct a5psw *a5psw = ds->priv;
 349	u32 reg = 0;
 350
 351	switch (state) {
 352	case BR_STATE_DISABLED:
 353	case BR_STATE_BLOCKING:
 354		reg |= A5PSW_INPUT_LEARN_DIS(port);
 355		reg |= A5PSW_INPUT_LEARN_BLOCK(port);
 356		break;
 357	case BR_STATE_LISTENING:
 358		reg |= A5PSW_INPUT_LEARN_DIS(port);
 359		break;
 360	case BR_STATE_LEARNING:
 361		reg |= A5PSW_INPUT_LEARN_BLOCK(port);
 362		break;
 363	case BR_STATE_FORWARDING:
 364	default:
 365		break;
 366	}
 367
 368	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
 369}
 370
 371static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
 372{
 373	struct a5psw *a5psw = ds->priv;
 374
 375	a5psw_port_fdb_flush(a5psw, port);
 376}
 377
 378static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
 379				   u16 *entry)
 380{
 381	u32 ctrl;
 382	int ret;
 383
 384	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
 385	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
 386
 387	ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
 388	ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
 389	if (ret)
 390		return ret;
 391
 392	*entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
 393
 394	return 0;
 395}
 396
 397static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
 398			      const unsigned char *addr, u16 vid,
 399			      struct dsa_db db)
 400{
 401	struct a5psw *a5psw = ds->priv;
 402	union lk_data lk_data = {0};
 403	bool inc_learncount = false;
 404	int ret = 0;
 405	u16 entry;
 406	u32 reg;
 407
 408	ether_addr_copy(lk_data.entry.mac, addr);
 409	lk_data.entry.port_mask = BIT(port);
 410
 411	mutex_lock(&a5psw->lk_lock);
 412
 413	/* Set the value to be written in the lookup table */
 414	ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
 415	if (ret)
 416		goto lk_unlock;
 417
 418	lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
 419	if (!lk_data.entry.valid) {
 420		inc_learncount = true;
 421		/* port_mask set to 0x1f when entry is not valid, clear it */
 422		lk_data.entry.port_mask = 0;
 423		lk_data.entry.prio = 0;
 424	}
 425
 426	lk_data.entry.port_mask |= BIT(port);
 427	lk_data.entry.is_static = 1;
 428	lk_data.entry.valid = 1;
 429
 430	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
 431
 432	reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
 433	ret = a5psw_lk_execute_ctrl(a5psw, &reg);
 434	if (ret)
 435		goto lk_unlock;
 436
 437	if (inc_learncount) {
 438		reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
 439		a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
 440	}
 441
 442lk_unlock:
 443	mutex_unlock(&a5psw->lk_lock);
 444
 445	return ret;
 446}
 447
 448static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
 449			      const unsigned char *addr, u16 vid,
 450			      struct dsa_db db)
 451{
 452	struct a5psw *a5psw = ds->priv;
 453	union lk_data lk_data = {0};
 454	bool clear = false;
 455	u16 entry;
 456	u32 reg;
 457	int ret;
 458
 459	ether_addr_copy(lk_data.entry.mac, addr);
 460
 461	mutex_lock(&a5psw->lk_lock);
 462
 463	ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
 464	if (ret)
 465		goto lk_unlock;
 466
 467	lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
 468
 469	/* Our hardware does not associate any VID to the FDB entries so this
 470	 * means that if two entries were added for the same mac but for
 471	 * different VID, then, on the deletion of the first one, we would also
 472	 * delete the second one. Since there is unfortunately nothing we can do
 473	 * about that, do not return an error...
 474	 */
 475	if (!lk_data.entry.valid)
 476		goto lk_unlock;
 477
 478	lk_data.entry.port_mask &= ~BIT(port);
 479	/* If there is no more port in the mask, clear the entry */
 480	if (lk_data.entry.port_mask == 0)
 481		clear = true;
 482
 483	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
 484
 485	reg = entry;
 486	if (clear)
 487		reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
 488	else
 489		reg |= A5PSW_LK_ADDR_CTRL_WRITE;
 490
 491	ret = a5psw_lk_execute_ctrl(a5psw, &reg);
 492	if (ret)
 493		goto lk_unlock;
 494
 495	/* Decrement LEARNCOUNT */
 496	if (clear) {
 497		reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
 498		a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
 499	}
 500
 501lk_unlock:
 502	mutex_unlock(&a5psw->lk_lock);
 503
 504	return ret;
 505}
 506
 507static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
 508			       dsa_fdb_dump_cb_t *cb, void *data)
 509{
 510	struct a5psw *a5psw = ds->priv;
 511	union lk_data lk_data;
 512	int i = 0, ret = 0;
 513	u32 reg;
 514
 515	mutex_lock(&a5psw->lk_lock);
 516
 517	for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
 518		reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
 519
 520		ret = a5psw_lk_execute_ctrl(a5psw, &reg);
 521		if (ret)
 522			goto out_unlock;
 523
 524		lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
 525		/* If entry is not valid or does not contain the port, skip */
 526		if (!lk_data.entry.valid ||
 527		    !(lk_data.entry.port_mask & BIT(port)))
 528			continue;
 529
 530		lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
 531
 532		ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
 533		if (ret)
 534			goto out_unlock;
 535	}
 536
 537out_unlock:
 538	mutex_unlock(&a5psw->lk_lock);
 539
 540	return ret;
 541}
 542
 543static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
 544{
 545	u32 reg_lo, reg_hi;
 546
 547	reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
 548	/* A5PSW_STATS_HIWORD is latched on stat read */
 549	reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
 550
 551	return ((u64)reg_hi << 32) | reg_lo;
 552}
 553
 554static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
 555			      uint8_t *data)
 556{
 557	unsigned int u;
 558
 559	if (stringset != ETH_SS_STATS)
 560		return;
 561
 562	for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
 563		memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
 564		       ETH_GSTRING_LEN);
 565	}
 566}
 567
 568static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
 569				    uint64_t *data)
 570{
 571	struct a5psw *a5psw = ds->priv;
 572	unsigned int u;
 573
 574	for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
 575		data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
 576}
 577
 578static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
 579{
 580	if (sset != ETH_SS_STATS)
 581		return 0;
 582
 583	return ARRAY_SIZE(a5psw_stats);
 584}
 585
 586static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
 587				    struct ethtool_eth_mac_stats *mac_stats)
 588{
 589	struct a5psw *a5psw = ds->priv;
 590
 591#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
 592	mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
 593	mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
 594	mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
 595	mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
 596	mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
 597	mac_stats->AlignmentErrors = RD(aAlignmentErrors);
 598	mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
 599	mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
 600	mac_stats->LateCollisions = RD(aLateCollisions);
 601	mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
 602	mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
 603	mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
 604	mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
 605	mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
 606	mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
 607	mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
 608	mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
 609	mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
 610	mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
 611#undef RD
 612}
 613
 614static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
 615	{ 0, 64 },
 616	{ 65, 127 },
 617	{ 128, 255 },
 618	{ 256, 511 },
 619	{ 512, 1023 },
 620	{ 1024, 1518 },
 621	{ 1519, A5PSW_MAX_MTU },
 622	{}
 623};
 624
 625static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
 626				 struct ethtool_rmon_stats *rmon_stats,
 627				 const struct ethtool_rmon_hist_range **ranges)
 628{
 629	struct a5psw *a5psw = ds->priv;
 630
 631#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
 632	rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
 633	rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
 634	rmon_stats->fragments = RD(etherStatsFragments);
 635	rmon_stats->jabbers = RD(etherStatsJabbers);
 636	rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
 637	rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
 638	rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
 639	rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
 640	rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
 641	rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
 642	rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
 643#undef RD
 644
 645	*ranges = a5psw_rmon_ranges;
 646}
 647
 648static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
 649				     struct ethtool_eth_ctrl_stats *ctrl_stats)
 650{
 651	struct a5psw *a5psw = ds->priv;
 652	u64 stat;
 653
 654	stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
 655	ctrl_stats->MACControlFramesTransmitted = stat;
 656	stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
 657	ctrl_stats->MACControlFramesReceived = stat;
 658}
 659
 660static int a5psw_setup(struct dsa_switch *ds)
 661{
 662	struct a5psw *a5psw = ds->priv;
 663	int port, vlan, ret;
 664	struct dsa_port *dp;
 665	u32 reg;
 666
 667	/* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
 668	dsa_switch_for_each_cpu_port(dp, ds) {
 669		if (dp->index != A5PSW_CPU_PORT) {
 670			dev_err(a5psw->dev, "Invalid CPU port\n");
 671			return -EINVAL;
 672		}
 673	}
 674
 675	/* Configure management port */
 676	reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
 677	a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
 678
 679	/* Set pattern 0 to forward all frame to mgmt port */
 680	a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
 681			 A5PSW_PATTERN_CTRL_MGMTFWD);
 682
 683	/* Enable port tagging */
 684	reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
 685	reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
 686	a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
 687
 688	/* Enable normal switch operation */
 689	reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
 690	      A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
 691	      A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
 692	a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
 693
 694	ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
 695				 !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
 696				 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
 697	if (ret) {
 698		dev_err(a5psw->dev, "Failed to clear lookup table\n");
 699		return ret;
 700	}
 701
 702	/* Reset learn count to 0 */
 703	reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
 704	a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
 705
 706	/* Clear VLAN resource table */
 707	reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
 708	for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
 709		a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
 710
 711	/* Reset all ports */
 712	dsa_switch_for_each_port(dp, ds) {
 713		port = dp->index;
 714
 715		/* Reset the port */
 716		a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
 717				 A5PSW_CMD_CFG_SW_RESET);
 718
 719		/* Enable only CPU port */
 720		a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
 721
 722		if (dsa_port_is_unused(dp))
 723			continue;
 724
 725		/* Enable egress flooding for CPU port */
 726		if (dsa_port_is_cpu(dp))
 727			a5psw_flooding_set_resolution(a5psw, port, true);
 728
 729		/* Enable management forward only for user ports */
 730		if (dsa_port_is_user(dp))
 731			a5psw_port_mgmtfwd_set(a5psw, port, true);
 732	}
 733
 734	return 0;
 735}
 736
 737static const struct dsa_switch_ops a5psw_switch_ops = {
 738	.get_tag_protocol = a5psw_get_tag_protocol,
 739	.setup = a5psw_setup,
 740	.port_disable = a5psw_port_disable,
 741	.port_enable = a5psw_port_enable,
 742	.phylink_get_caps = a5psw_phylink_get_caps,
 743	.phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
 744	.phylink_mac_link_down = a5psw_phylink_mac_link_down,
 745	.phylink_mac_link_up = a5psw_phylink_mac_link_up,
 746	.port_change_mtu = a5psw_port_change_mtu,
 747	.port_max_mtu = a5psw_port_max_mtu,
 748	.get_sset_count = a5psw_get_sset_count,
 749	.get_strings = a5psw_get_strings,
 750	.get_ethtool_stats = a5psw_get_ethtool_stats,
 751	.get_eth_mac_stats = a5psw_get_eth_mac_stats,
 752	.get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
 753	.get_rmon_stats = a5psw_get_rmon_stats,
 754	.set_ageing_time = a5psw_set_ageing_time,
 755	.port_bridge_join = a5psw_port_bridge_join,
 756	.port_bridge_leave = a5psw_port_bridge_leave,
 757	.port_stp_state_set = a5psw_port_stp_state_set,
 758	.port_fast_age = a5psw_port_fast_age,
 759	.port_fdb_add = a5psw_port_fdb_add,
 760	.port_fdb_del = a5psw_port_fdb_del,
 761	.port_fdb_dump = a5psw_port_fdb_dump,
 762};
 763
 764static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
 765{
 766	u32 status;
 767	int err;
 768
 769	err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
 770				 !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
 771				 1000 * USEC_PER_MSEC);
 772	if (err)
 773		dev_err(a5psw->dev, "MDIO command timeout\n");
 774
 775	return err;
 776}
 777
 778static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
 779{
 780	struct a5psw *a5psw = bus->priv;
 781	u32 cmd, status;
 782	int ret;
 783
 784	if (phy_reg & MII_ADDR_C45)
 785		return -EOPNOTSUPP;
 786
 787	cmd = A5PSW_MDIO_COMMAND_READ;
 788	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
 789	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
 790
 791	a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
 792
 793	ret = a5psw_mdio_wait_busy(a5psw);
 794	if (ret)
 795		return ret;
 796
 797	ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
 798
 799	status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
 800	if (status & A5PSW_MDIO_CFG_STATUS_READERR)
 801		return -EIO;
 802
 803	return ret;
 804}
 805
 806static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
 807			    u16 phy_data)
 808{
 809	struct a5psw *a5psw = bus->priv;
 810	u32 cmd;
 811
 812	if (phy_reg & MII_ADDR_C45)
 813		return -EOPNOTSUPP;
 814
 815	cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
 816	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
 817
 818	a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
 819	a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
 820
 821	return a5psw_mdio_wait_busy(a5psw);
 822}
 823
 824static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
 825{
 826	unsigned long rate;
 827	unsigned long div;
 828	u32 cfgstatus;
 829
 830	rate = clk_get_rate(a5psw->hclk);
 831	div = ((rate / mdio_freq) / 2);
 832	if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
 833	    div < A5PSW_MDIO_CLK_DIV_MIN) {
 834		dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
 835		return -ERANGE;
 836	}
 837
 838	cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
 839
 840	a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
 841
 842	return 0;
 843}
 844
 845static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
 846{
 847	struct device *dev = a5psw->dev;
 848	struct mii_bus *bus;
 849	u32 mdio_freq;
 850	int ret;
 851
 852	if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
 853		mdio_freq = A5PSW_MDIO_DEF_FREQ;
 854
 855	ret = a5psw_mdio_config(a5psw, mdio_freq);
 856	if (ret)
 857		return ret;
 858
 859	bus = devm_mdiobus_alloc(dev);
 860	if (!bus)
 861		return -ENOMEM;
 862
 863	bus->name = "a5psw_mdio";
 864	bus->read = a5psw_mdio_read;
 865	bus->write = a5psw_mdio_write;
 866	bus->priv = a5psw;
 867	bus->parent = dev;
 868	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
 869
 870	a5psw->mii_bus = bus;
 871
 872	return devm_of_mdiobus_register(dev, bus, node);
 873}
 874
 875static void a5psw_pcs_free(struct a5psw *a5psw)
 876{
 877	int i;
 878
 879	for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
 880		if (a5psw->pcs[i])
 881			miic_destroy(a5psw->pcs[i]);
 882	}
 883}
 884
 885static int a5psw_pcs_get(struct a5psw *a5psw)
 886{
 887	struct device_node *ports, *port, *pcs_node;
 888	struct phylink_pcs *pcs;
 889	int ret;
 890	u32 reg;
 891
 892	ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
 893	if (!ports)
 894		return -EINVAL;
 895
 896	for_each_available_child_of_node(ports, port) {
 897		pcs_node = of_parse_phandle(port, "pcs-handle", 0);
 898		if (!pcs_node)
 899			continue;
 900
 901		if (of_property_read_u32(port, "reg", &reg)) {
 902			ret = -EINVAL;
 903			goto free_pcs;
 904		}
 905
 906		if (reg >= ARRAY_SIZE(a5psw->pcs)) {
 907			ret = -ENODEV;
 908			goto free_pcs;
 909		}
 910
 911		pcs = miic_create(a5psw->dev, pcs_node);
 912		if (IS_ERR(pcs)) {
 913			dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
 914				reg);
 915			ret = PTR_ERR(pcs);
 916			goto free_pcs;
 917		}
 918
 919		a5psw->pcs[reg] = pcs;
 920		of_node_put(pcs_node);
 921	}
 922	of_node_put(ports);
 923
 924	return 0;
 925
 926free_pcs:
 927	of_node_put(pcs_node);
 928	of_node_put(port);
 929	of_node_put(ports);
 930	a5psw_pcs_free(a5psw);
 931
 932	return ret;
 933}
 934
 935static int a5psw_probe(struct platform_device *pdev)
 936{
 937	struct device *dev = &pdev->dev;
 938	struct device_node *mdio;
 939	struct dsa_switch *ds;
 940	struct a5psw *a5psw;
 941	int ret;
 942
 943	a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
 944	if (!a5psw)
 945		return -ENOMEM;
 946
 947	a5psw->dev = dev;
 948	mutex_init(&a5psw->lk_lock);
 949	spin_lock_init(&a5psw->reg_lock);
 950	a5psw->base = devm_platform_ioremap_resource(pdev, 0);
 951	if (IS_ERR(a5psw->base))
 952		return PTR_ERR(a5psw->base);
 953
 954	ret = a5psw_pcs_get(a5psw);
 955	if (ret)
 956		return ret;
 957
 958	a5psw->hclk = devm_clk_get(dev, "hclk");
 959	if (IS_ERR(a5psw->hclk)) {
 960		dev_err(dev, "failed get hclk clock\n");
 961		ret = PTR_ERR(a5psw->hclk);
 962		goto free_pcs;
 963	}
 964
 965	a5psw->clk = devm_clk_get(dev, "clk");
 966	if (IS_ERR(a5psw->clk)) {
 967		dev_err(dev, "failed get clk_switch clock\n");
 968		ret = PTR_ERR(a5psw->clk);
 969		goto free_pcs;
 970	}
 971
 972	ret = clk_prepare_enable(a5psw->clk);
 973	if (ret)
 974		goto free_pcs;
 975
 976	ret = clk_prepare_enable(a5psw->hclk);
 977	if (ret)
 978		goto clk_disable;
 979
 980	mdio = of_get_child_by_name(dev->of_node, "mdio");
 981	if (of_device_is_available(mdio)) {
 982		ret = a5psw_probe_mdio(a5psw, mdio);
 983		if (ret) {
 984			of_node_put(mdio);
 985			dev_err(dev, "Failed to register MDIO: %d\n", ret);
 986			goto hclk_disable;
 987		}
 988	}
 989
 990	of_node_put(mdio);
 991
 992	ds = &a5psw->ds;
 993	ds->dev = dev;
 994	ds->num_ports = A5PSW_PORTS_NUM;
 995	ds->ops = &a5psw_switch_ops;
 996	ds->priv = a5psw;
 997
 998	ret = dsa_register_switch(ds);
 999	if (ret) {
1000		dev_err(dev, "Failed to register DSA switch: %d\n", ret);
1001		goto hclk_disable;
1002	}
1003
1004	return 0;
1005
1006hclk_disable:
1007	clk_disable_unprepare(a5psw->hclk);
1008clk_disable:
1009	clk_disable_unprepare(a5psw->clk);
1010free_pcs:
1011	a5psw_pcs_free(a5psw);
1012
1013	return ret;
1014}
1015
1016static int a5psw_remove(struct platform_device *pdev)
1017{
1018	struct a5psw *a5psw = platform_get_drvdata(pdev);
1019
1020	if (!a5psw)
1021		return 0;
1022
1023	dsa_unregister_switch(&a5psw->ds);
1024	a5psw_pcs_free(a5psw);
1025	clk_disable_unprepare(a5psw->hclk);
1026	clk_disable_unprepare(a5psw->clk);
1027
1028	return 0;
1029}
1030
1031static void a5psw_shutdown(struct platform_device *pdev)
1032{
1033	struct a5psw *a5psw = platform_get_drvdata(pdev);
1034
1035	if (!a5psw)
1036		return;
1037
1038	dsa_switch_shutdown(&a5psw->ds);
1039
1040	platform_set_drvdata(pdev, NULL);
1041}
1042
1043static const struct of_device_id a5psw_of_mtable[] = {
1044	{ .compatible = "renesas,rzn1-a5psw", },
1045	{ /* sentinel */ },
1046};
1047MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
1048
1049static struct platform_driver a5psw_driver = {
1050	.driver = {
1051		.name	 = "rzn1_a5psw",
1052		.of_match_table = of_match_ptr(a5psw_of_mtable),
1053	},
1054	.probe = a5psw_probe,
1055	.remove = a5psw_remove,
1056	.shutdown = a5psw_shutdown,
1057};
1058module_platform_driver(a5psw_driver);
1059
1060MODULE_LICENSE("GPL");
1061MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
1062MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");