Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Microchip Sparx5 Switch driver
   3 *
   4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/phy/phy.h>
   9#include <net/dcbnl.h>
  10
  11#include "sparx5_main_regs.h"
  12#include "sparx5_main.h"
  13#include "sparx5_port.h"
  14
  15#define SPX5_ETYPE_TAG_C     0x8100
  16#define SPX5_ETYPE_TAG_S     0x88a8
  17
  18#define SPX5_WAIT_US         1000
  19#define SPX5_WAIT_MAX_US     2000
  20
  21enum port_error {
  22	SPX5_PERR_SPEED,
  23	SPX5_PERR_IFTYPE,
  24};
  25
  26#define PAUSE_DISCARD        0xC
  27#define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
  28
  29static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
  30{
  31	status->an_complete = true;
  32	if (!(lp_abil & LPA_SGMII_LINK)) {
  33		status->link = false;
  34		return;
  35	}
  36
  37	switch (lp_abil & LPA_SGMII_SPD_MASK) {
  38	case LPA_SGMII_10:
  39		status->speed = SPEED_10;
  40		break;
  41	case LPA_SGMII_100:
  42		status->speed = SPEED_100;
  43		break;
  44	case LPA_SGMII_1000:
  45		status->speed = SPEED_1000;
  46		break;
  47	default:
  48		status->link = false;
  49		return;
  50	}
  51	if (lp_abil & LPA_SGMII_FULL_DUPLEX)
  52		status->duplex = DUPLEX_FULL;
  53	else
  54		status->duplex = DUPLEX_HALF;
  55}
  56
  57static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
  58{
  59	status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
  60	status->an_complete = true;
  61	status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
  62		DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
  63
  64	if ((ld_abil & ADVERTISE_1000XPAUSE) &&
  65	    (lp_abil & ADVERTISE_1000XPAUSE)) {
  66		status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
  67	} else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
  68		   (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
  69		status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
  70			MLO_PAUSE_TX : 0;
  71		status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
  72			MLO_PAUSE_RX : 0;
  73	} else {
  74		status->pause = MLO_PAUSE_NONE;
  75	}
  76}
  77
  78static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
  79				    struct sparx5_port *port,
  80				    struct sparx5_port_status *status)
  81{
  82	u32 portno = port->portno;
  83	u16 lp_adv, ld_adv;
  84	u32 value;
  85
  86	/* Get PCS Link down sticky */
  87	value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
  88	status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
  89	if (status->link_down)	/* Clear the sticky */
  90		spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
  91
  92	/* Get both current Link and Sync status */
  93	value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
  94	status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
  95		       DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
  96
  97	if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
  98		status->speed = SPEED_1000;
  99	else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
 100		status->speed = SPEED_2500;
 101
 102	status->duplex = DUPLEX_FULL;
 103
 104	/* Get PCS ANEG status register */
 105	value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
 106
 107	/* Aneg complete provides more information  */
 108	if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
 109		lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
 110		if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
 111			decode_sgmii_word(lp_adv, status);
 112		} else {
 113			value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
 114			ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
 115			decode_cl37_word(lp_adv, ld_adv, status);
 116		}
 117	}
 118	return 0;
 119}
 120
 121static int sparx5_get_sfi_status(struct sparx5 *sparx5,
 122				 struct sparx5_port *port,
 123				 struct sparx5_port_status *status)
 124{
 125	bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
 126	u32 portno = port->portno;
 127	u32 value, dev, tinst;
 128	void __iomem *inst;
 129
 130	if (!high_speed_dev) {
 131		netdev_err(port->ndev, "error: low speed and SFI mode\n");
 132		return -EINVAL;
 133	}
 134
 135	dev = sparx5_to_high_dev(portno);
 136	tinst = sparx5_port_dev_index(portno);
 137	inst = spx5_inst_get(sparx5, dev, tinst);
 138
 139	value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 140	if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
 141		/* The link is or has been down. Clear the sticky bit */
 142		status->link_down = 1;
 143		spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 144		value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 145	}
 146	status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
 147	status->duplex = DUPLEX_FULL;
 148	if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
 149		status->speed = SPEED_5000;
 150	else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
 151		status->speed = SPEED_10000;
 152	else
 153		status->speed = SPEED_25000;
 154
 155	return 0;
 156}
 157
 158/* Get link status of 1000Base-X/in-band and SFI ports.
 159 */
 160int sparx5_get_port_status(struct sparx5 *sparx5,
 161			   struct sparx5_port *port,
 162			   struct sparx5_port_status *status)
 163{
 164	memset(status, 0, sizeof(*status));
 165	status->speed = port->conf.speed;
 166	if (port->conf.power_down) {
 167		status->link = false;
 168		return 0;
 169	}
 170	switch (port->conf.portmode) {
 171	case PHY_INTERFACE_MODE_SGMII:
 172	case PHY_INTERFACE_MODE_QSGMII:
 173	case PHY_INTERFACE_MODE_1000BASEX:
 174	case PHY_INTERFACE_MODE_2500BASEX:
 175		return sparx5_get_dev2g5_status(sparx5, port, status);
 176	case PHY_INTERFACE_MODE_5GBASER:
 177	case PHY_INTERFACE_MODE_10GBASER:
 178	case PHY_INTERFACE_MODE_25GBASER:
 179		return sparx5_get_sfi_status(sparx5, port, status);
 180	case PHY_INTERFACE_MODE_NA:
 181		return 0;
 182	default:
 183		netdev_err(port->ndev, "Status not supported");
 184		return -ENODEV;
 185	}
 186	return 0;
 187}
 188
 189static int sparx5_port_error(struct sparx5_port *port,
 190			     struct sparx5_port_config *conf,
 191			     enum port_error errtype)
 192{
 193	switch (errtype) {
 194	case SPX5_PERR_SPEED:
 195		netdev_err(port->ndev,
 196			   "Interface does not support speed: %u: for %s\n",
 197			   conf->speed, phy_modes(conf->portmode));
 198		break;
 199	case SPX5_PERR_IFTYPE:
 200		netdev_err(port->ndev,
 201			   "Switch port does not support interface type: %s\n",
 202			   phy_modes(conf->portmode));
 203		break;
 204	default:
 205		netdev_err(port->ndev,
 206			   "Interface configuration error\n");
 207	}
 208
 209	return -EINVAL;
 210}
 211
 212static int sparx5_port_verify_speed(struct sparx5 *sparx5,
 213				    struct sparx5_port *port,
 214				    struct sparx5_port_config *conf)
 215{
 216	if ((sparx5_port_is_2g5(port->portno) &&
 
 
 217	     conf->speed > SPEED_2500) ||
 218	    (sparx5_port_is_5g(port->portno)  &&
 219	     conf->speed > SPEED_5000) ||
 220	    (sparx5_port_is_10g(port->portno) &&
 221	     conf->speed > SPEED_10000))
 222		return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 223
 224	switch (conf->portmode) {
 225	case PHY_INTERFACE_MODE_NA:
 226		return -EINVAL;
 227	case PHY_INTERFACE_MODE_1000BASEX:
 228		if (conf->speed != SPEED_1000 ||
 229		    sparx5_port_is_2g5(port->portno))
 230			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 231		if (sparx5_port_is_2g5(port->portno))
 232			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 233		break;
 234	case PHY_INTERFACE_MODE_2500BASEX:
 235		if (conf->speed != SPEED_2500 ||
 236		    sparx5_port_is_2g5(port->portno))
 237			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 238		break;
 239	case PHY_INTERFACE_MODE_QSGMII:
 240		if (port->portno > 47)
 241			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 242		fallthrough;
 243	case PHY_INTERFACE_MODE_SGMII:
 244		if (conf->speed != SPEED_1000 &&
 245		    conf->speed != SPEED_100 &&
 246		    conf->speed != SPEED_10 &&
 247		    conf->speed != SPEED_2500)
 248			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 249		break;
 250	case PHY_INTERFACE_MODE_5GBASER:
 251	case PHY_INTERFACE_MODE_10GBASER:
 252	case PHY_INTERFACE_MODE_25GBASER:
 253		if ((conf->speed != SPEED_5000 &&
 254		     conf->speed != SPEED_10000 &&
 255		     conf->speed != SPEED_25000))
 256			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 257		break;
 258	default:
 259		return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 260	}
 261	return 0;
 262}
 263
 264static bool sparx5_dev_change(struct sparx5 *sparx5,
 265			      struct sparx5_port *port,
 266			      struct sparx5_port_config *conf)
 267{
 268	return sparx5_is_baser(port->conf.portmode) ^
 269		sparx5_is_baser(conf->portmode);
 270}
 271
 272static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
 273{
 274	u32  value, resource, prio, delay_cnt = 0;
 275	bool poll_src = true;
 276	char *mem = "";
 277
 278	/* Resource == 0: Memory tracked per source (SRC-MEM)
 279	 * Resource == 1: Frame references tracked per source (SRC-REF)
 280	 * Resource == 2: Memory tracked per destination (DST-MEM)
 281	 * Resource == 3: Frame references tracked per destination. (DST-REF)
 282	 */
 283	while (1) {
 284		bool empty = true;
 285
 286		for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
 287			u32 base;
 288
 289			base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
 290			for (prio = 0; prio < SPX5_PRIOS; prio++) {
 291				value = spx5_rd(sparx5,
 292						QRES_RES_STAT(base + prio));
 293				if (value) {
 294					mem = resource == 0 ?
 295						"DST-MEM" : "SRC-MEM";
 296					empty = false;
 297				}
 298			}
 299		}
 300
 301		if (empty)
 302			break;
 303
 304		if (delay_cnt++ == 2000) {
 305			dev_err(sparx5->dev,
 306				"Flush timeout port %u. %s queue not empty\n",
 307				portno, mem);
 308			return -EINVAL;
 309		}
 310
 311		usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
 312	}
 313	return 0;
 314}
 315
 316static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
 317{
 318	u32 tinst = high_spd_dev ?
 319		    sparx5_port_dev_index(port->portno) : port->portno;
 320	u32 dev = high_spd_dev ?
 321		  sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
 322	void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
 
 323	u32 spd = port->conf.speed;
 324	u32 spd_prm;
 325	int err;
 326
 327	if (high_spd_dev) {
 328		/* 1: Reset the PCS Rx clock domain  */
 329		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
 330			      DEV10G_DEV_RST_CTRL_PCS_RX_RST,
 331			      devinst,
 332			      DEV10G_DEV_RST_CTRL(0));
 333
 334		/* 2: Disable MAC frame reception */
 335		spx5_inst_rmw(0,
 336			      DEV10G_MAC_ENA_CFG_RX_ENA,
 337			      devinst,
 338			      DEV10G_MAC_ENA_CFG(0));
 339	} else {
 340		/* 1: Reset the PCS Rx clock domain  */
 341		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 342			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 343			      devinst,
 344			      DEV2G5_DEV_RST_CTRL(0));
 345		/* 2: Disable MAC frame reception */
 346		spx5_inst_rmw(0,
 347			      DEV2G5_MAC_ENA_CFG_RX_ENA,
 348			      devinst,
 349			      DEV2G5_MAC_ENA_CFG(0));
 350	}
 351	/* 3: Disable traffic being sent to or from switch port->portno */
 352	spx5_rmw(0,
 353		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
 354		 sparx5,
 355		 QFWD_SWITCH_PORT_MODE(port->portno));
 356
 357	/* 4: Disable dequeuing from the egress queues  */
 358	spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
 359		 HSCH_PORT_MODE_DEQUEUE_DIS,
 360		 sparx5,
 361		 HSCH_PORT_MODE(port->portno));
 362
 363	/* 5: Disable Flowcontrol */
 364	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
 365		 QSYS_PAUSE_CFG_PAUSE_STOP,
 366		 sparx5,
 367		 QSYS_PAUSE_CFG(port->portno));
 368
 369	spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
 370	/* 6: Wait while the last frame is exiting the queues */
 371	usleep_range(8 * spd_prm, 10 * spd_prm);
 372
 373	/* 7: Flush the queues accociated with the port->portno */
 374	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
 375		 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
 376		 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
 377		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
 378		 HSCH_FLUSH_CTRL_FLUSH_PORT |
 379		 HSCH_FLUSH_CTRL_FLUSH_DST |
 380		 HSCH_FLUSH_CTRL_FLUSH_SRC |
 381		 HSCH_FLUSH_CTRL_FLUSH_ENA,
 382		 sparx5,
 383		 HSCH_FLUSH_CTRL);
 384
 385	/* 8: Enable dequeuing from the egress queues */
 386	spx5_rmw(0,
 387		 HSCH_PORT_MODE_DEQUEUE_DIS,
 388		 sparx5,
 389		 HSCH_PORT_MODE(port->portno));
 390
 391	/* 9: Wait until flushing is complete */
 392	err = sparx5_port_flush_poll(sparx5, port->portno);
 393	if (err)
 394		return err;
 395
 396	/* 10: Reset the  MAC clock domain */
 397	if (high_spd_dev) {
 398		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
 399			      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
 400			      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
 401			      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
 402			      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
 403			      DEV10G_DEV_RST_CTRL_MAC_TX_RST,
 404			      devinst,
 405			      DEV10G_DEV_RST_CTRL(0));
 406
 407	} else {
 408		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
 409			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
 410			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
 411			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
 412			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
 413			      DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 414			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
 415			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
 416			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
 417			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
 418			      devinst,
 419			      DEV2G5_DEV_RST_CTRL(0));
 420	}
 421	/* 11: Clear flushing */
 422	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
 423		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
 424		 HSCH_FLUSH_CTRL_FLUSH_PORT |
 425		 HSCH_FLUSH_CTRL_FLUSH_ENA,
 426		 sparx5,
 427		 HSCH_FLUSH_CTRL);
 428
 429	if (high_spd_dev) {
 430		u32 pcs = sparx5_to_pcs_dev(port->portno);
 431		void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
 432
 433		/* 12: Disable 5G/10G/25 BaseR PCS */
 434		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
 435			      PCS10G_BR_PCS_CFG_PCS_ENA,
 436			      pcsinst,
 437			      PCS10G_BR_PCS_CFG(0));
 438
 439		if (sparx5_port_is_25g(port->portno))
 440			/* Disable 25G PCS */
 441			spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
 442				 DEV25G_PCS25G_CFG_PCS25G_ENA,
 443				 sparx5,
 444				 DEV25G_PCS25G_CFG(tinst));
 445	} else {
 446		/* 12: Disable 1G PCS */
 447		spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
 448			 DEV2G5_PCS1G_CFG_PCS_ENA,
 449			 sparx5,
 450			 DEV2G5_PCS1G_CFG(port->portno));
 451	}
 452
 453	/* The port is now flushed and disabled  */
 454	return 0;
 455}
 456
 457static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
 458			       u32 portno, u32 speed)
 459{
 460	u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
 461	const u32 taxi_dist[SPX5_PORTS_ALL] = {
 462		6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
 463		4, 4, 4, 4,
 464		11, 12, 13, 14, 15, 16, 17, 18,
 465		11, 12, 13, 14, 15, 16, 17, 18,
 466		11, 12, 13, 14, 15, 16, 17, 18,
 467		11, 12, 13, 14, 15, 16, 17, 18,
 468		4, 6, 8, 4, 6, 8, 6, 8,
 469		2, 2, 2, 2, 2, 2, 2, 4, 2
 470	};
 471	u32 mac_per    = 6400, tmp1, tmp2, tmp3;
 472	u32 fifo_width = 16;
 473	u32 mac_width  = 8;
 474	u32 addition   = 0;
 475
 
 
 
 476	switch (speed) {
 477	case SPEED_25000:
 478		return 0;
 479	case SPEED_10000:
 480		mac_per = 6400;
 481		mac_width = 8;
 482		addition = 1;
 483		break;
 484	case SPEED_5000:
 485		mac_per = 12800;
 486		mac_width = 8;
 487		addition = 0;
 488		break;
 489	case SPEED_2500:
 490		mac_per = 3200;
 491		mac_width = 1;
 492		addition = 0;
 493		break;
 494	case SPEED_1000:
 495		mac_per =  8000;
 496		mac_width = 1;
 497		addition = 0;
 498		break;
 499	case SPEED_100:
 500	case SPEED_10:
 501		return 1;
 502	default:
 503		break;
 504	}
 505
 506	tmp1 = 1000 * mac_width / fifo_width;
 507	tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
 508		       * sys_clk / mac_per);
 509	tmp3 = tmp1 * tmp2 / 1000;
 510	return  (tmp3 + 2000 + 999) / 1000 + addition;
 511}
 512
 513/* Configure port muxing:
 514 * QSGMII:     4x2G5 devices
 515 */
 516static int sparx5_port_mux_set(struct sparx5 *sparx5,
 517			       struct sparx5_port *port,
 518			       struct sparx5_port_config *conf)
 519{
 520	u32 portno = port->portno;
 521	u32 inst;
 522
 523	if (port->conf.portmode == conf->portmode)
 524		return 0; /* Nothing to do */
 525
 526	switch (conf->portmode) {
 527	case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
 528		inst = (portno - portno % 4) / 4;
 529		spx5_rmw(BIT(inst),
 530			 BIT(inst),
 531			 sparx5,
 532			 PORT_CONF_QSGMII_ENA);
 533
 534		if ((portno / 4 % 2) == 0) {
 535			/* Affects d0-d3,d8-d11..d40-d43 */
 536			spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
 537				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
 538				 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
 539				 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
 540				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
 541				 PORT_CONF_USGMII_CFG_QUAD_MODE,
 542				 sparx5,
 543				 PORT_CONF_USGMII_CFG((portno / 8)));
 544		}
 545		break;
 546	default:
 547		break;
 548	}
 549	return 0;
 550}
 551
 552static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
 553				    struct sparx5_port *port)
 554{
 555	enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
 556	int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
 557			      max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
 558	bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
 559	enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
 560	bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
 561	u32 dev             = sparx5_to_high_dev(port->portno);
 562	u32 tinst           = sparx5_port_dev_index(port->portno);
 563	void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
 
 564	u32 etype;
 565
 566	etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
 567		 port->custom_etype :
 568		 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
 569		 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
 570
 571	spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
 572		DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
 573		DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
 574		DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
 575		sparx5,
 576		DEV2G5_MAC_TAGS_CFG(port->portno));
 577
 578	if (sparx5_port_is_2g5(port->portno))
 579		return 0;
 580
 581	spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
 582		      DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
 583		      DEV10G_MAC_TAGS_CFG_TAG_ID |
 584		      DEV10G_MAC_TAGS_CFG_TAG_ENA,
 585		      inst,
 586		      DEV10G_MAC_TAGS_CFG(0, 0));
 587
 588	spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
 589		      DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
 590		      inst,
 591		      DEV10G_MAC_NUM_TAGS_CFG(0));
 592
 593	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
 594		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
 595		      inst,
 596		      DEV10G_MAC_MAXLEN_CFG(0));
 597	return 0;
 598}
 599
 600int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
 601{
 602	u32 clk_period_ps = 1600; /* 625Mhz for now */
 603	u32 urg = 672000;
 604
 605	switch (speed) {
 606	case SPEED_10:
 607	case SPEED_100:
 608	case SPEED_1000:
 609		urg = 672000;
 610		break;
 611	case SPEED_2500:
 612		urg = 270000;
 613		break;
 614	case SPEED_5000:
 615		urg = 135000;
 616		break;
 617	case SPEED_10000:
 618		urg = 67200;
 619		break;
 620	case SPEED_25000:
 621		urg = 27000;
 622		break;
 623	}
 624	return urg / clk_period_ps - 1;
 625}
 626
 627static u16 sparx5_wm_enc(u16 value)
 628{
 629	if (value >= 2048)
 630		return 2048 + value / 16;
 631
 632	return value;
 633}
 634
 635static int sparx5_port_fc_setup(struct sparx5 *sparx5,
 636				struct sparx5_port *port,
 637				struct sparx5_port_config *conf)
 638{
 639	bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
 640	u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
 641
 642	if (conf->pause & MLO_PAUSE_TX)
 643		pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
 644						 SPX5_BUFFER_CELL_SZ));
 645
 646	/* Set HDX flowcontrol */
 647	spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
 648		 DSM_MAC_CFG_HDX_BACKPREASSURE,
 649		 sparx5,
 650		 DSM_MAC_CFG(port->portno));
 651
 652	/* Obey flowcontrol  */
 653	spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
 654		 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
 655		 sparx5,
 656		 DSM_RX_PAUSE_CFG(port->portno));
 657
 658	/* Disable forward pressure */
 659	spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
 660		 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
 661		 sparx5,
 662		 QSYS_FWD_PRESSURE(port->portno));
 663
 664	/* Generate pause frames */
 665	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
 666		 QSYS_PAUSE_CFG_PAUSE_STOP,
 667		 sparx5,
 668		 QSYS_PAUSE_CFG(port->portno));
 669
 670	return 0;
 671}
 672
 673static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
 674{
 675	if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
 676		return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
 677	else
 678		return 1; /* Enable SGMII Aneg */
 679}
 680
 681int sparx5_serdes_set(struct sparx5 *sparx5,
 682		      struct sparx5_port *port,
 683		      struct sparx5_port_config *conf)
 684{
 685	int portmode, err, speed = conf->speed;
 686
 687	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
 688	    ((port->portno % 4) != 0)) {
 689		return 0;
 690	}
 691	if (sparx5_is_baser(conf->portmode)) {
 692		if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
 693			speed = SPEED_25000;
 694		else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
 695			speed = SPEED_10000;
 696		else
 697			speed = SPEED_5000;
 698	}
 699
 700	err = phy_set_media(port->serdes, conf->media);
 701	if (err)
 702		return err;
 703	if (speed > 0) {
 704		err = phy_set_speed(port->serdes, speed);
 705		if (err)
 706			return err;
 707	}
 708	if (conf->serdes_reset) {
 709		err = phy_reset(port->serdes);
 710		if (err)
 711			return err;
 712	}
 713
 714	/* Configure SerDes with port parameters
 715	 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
 716	 */
 717	portmode = conf->portmode;
 718	if (sparx5_is_baser(conf->portmode))
 719		portmode = PHY_INTERFACE_MODE_10GBASER;
 720	err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
 721	if (err)
 722		return err;
 723	conf->serdes_reset = false;
 724	return err;
 725}
 726
 727static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
 728				   struct sparx5_port *port,
 729				   struct sparx5_port_config *conf)
 730{
 731	bool sgmii = false, inband_aneg = false;
 732	int err;
 733
 734	if (port->conf.inband) {
 735		if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
 736		    conf->portmode == PHY_INTERFACE_MODE_QSGMII)
 737			inband_aneg = true; /* Cisco-SGMII in-band-aneg */
 738		else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
 739			 conf->autoneg)
 740			inband_aneg = true; /* Clause-37 in-band-aneg */
 741
 742		err = sparx5_serdes_set(sparx5, port, conf);
 743		if (err)
 744			return -EINVAL;
 745	} else {
 746		sgmii = true; /* Phy is connected to the MAC */
 747	}
 748
 749	/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
 750	spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
 751		 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
 752		 sparx5,
 753		 DEV2G5_PCS1G_MODE_CFG(port->portno));
 754
 755	/* Enable PCS */
 756	spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
 757		sparx5,
 758		DEV2G5_PCS1G_CFG(port->portno));
 759
 760	if (inband_aneg) {
 761		u16 abil = sparx5_get_aneg_word(conf);
 762
 763		/* Enable in-band aneg */
 764		spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
 765			DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
 766			DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
 767			DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
 768			sparx5,
 769			DEV2G5_PCS1G_ANEG_CFG(port->portno));
 770	} else {
 771		spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
 772	}
 773
 774	/* Take PCS out of reset */
 775	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
 776		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
 777		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
 778		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 779		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
 780		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 781		 sparx5,
 782		 DEV2G5_DEV_RST_CTRL(port->portno));
 783
 784	return 0;
 785}
 786
 787static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
 788				    struct sparx5_port *port,
 789				    struct sparx5_port_config *conf)
 790{
 791	u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
 792	u32 pix = sparx5_port_dev_index(port->portno);
 793	u32 dev = sparx5_to_high_dev(port->portno);
 794	u32 pcs = sparx5_to_pcs_dev(port->portno);
 795	void __iomem *devinst;
 796	void __iomem *pcsinst;
 797	int err;
 798
 799	devinst = spx5_inst_get(sparx5, dev, pix);
 800	pcsinst = spx5_inst_get(sparx5, pcs, pix);
 801
 802	/*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
 803	err = sparx5_serdes_set(sparx5, port, conf);
 804	if (err)
 805		return -EINVAL;
 806	if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
 807		/* Enable PCS for 25G device, speed 25G */
 808		spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
 809			 DEV25G_PCS25G_CFG_PCS25G_ENA,
 810			 sparx5,
 811			 DEV25G_PCS25G_CFG(pix));
 812	} else {
 813		/* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
 814		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
 815			      PCS10G_BR_PCS_CFG_PCS_ENA,
 816			      pcsinst,
 817			      PCS10G_BR_PCS_CFG(0));
 818	}
 819
 820	/* Enable 5G/10G/25G MAC module */
 821	spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
 822		     DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
 823		     devinst,
 824		     DEV10G_MAC_ENA_CFG(0));
 825
 826	/* Take the device out of reset */
 827	spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
 828		      DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
 829		      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
 830		      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
 831		      DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
 832		      DEV10G_DEV_RST_CTRL_PCS_RX_RST |
 833		      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
 834		      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
 835		      DEV10G_DEV_RST_CTRL_MAC_TX_RST |
 836		      DEV10G_DEV_RST_CTRL_SPEED_SEL,
 837		      devinst,
 838		      DEV10G_DEV_RST_CTRL(0));
 839
 840	return 0;
 841}
 842
 843/* Switch between 1G/2500 and 5G/10G/25G devices */
 844static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
 845{
 846	int bt_indx = BIT(sparx5_port_dev_index(port));
 
 
 
 847
 848	if (sparx5_port_is_5g(port)) {
 849		spx5_rmw(hsd ? 0 : bt_indx,
 850			 bt_indx,
 851			 sparx5,
 852			 PORT_CONF_DEV5G_MODES);
 853	} else if (sparx5_port_is_10g(port)) {
 854		spx5_rmw(hsd ? 0 : bt_indx,
 855			 bt_indx,
 856			 sparx5,
 857			 PORT_CONF_DEV10G_MODES);
 858	} else if (sparx5_port_is_25g(port)) {
 859		spx5_rmw(hsd ? 0 : bt_indx,
 860			 bt_indx,
 861			 sparx5,
 862			 PORT_CONF_DEV25G_MODES);
 863	}
 864}
 865
 866/* Configure speed/duplex dependent registers */
 867static int sparx5_port_config_low_set(struct sparx5 *sparx5,
 868				      struct sparx5_port *port,
 869				      struct sparx5_port_config *conf)
 870{
 871	u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
 872	bool fdx = conf->duplex == DUPLEX_FULL;
 873	int spd = conf->speed;
 874
 875	clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
 876	gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
 877	tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
 878	hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
 879	hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
 880
 881	/* GIG/FDX mode */
 882	spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
 883		 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
 884		 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
 885		 DEV2G5_MAC_MODE_CFG_FDX_ENA,
 886		 sparx5,
 887		 DEV2G5_MAC_MODE_CFG(port->portno));
 888
 889	/* Set MAC IFG Gaps */
 890	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
 891		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
 892		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
 893		sparx5,
 894		DEV2G5_MAC_IFG_CFG(port->portno));
 895
 896	/* Disabling frame aging when in HDX (due to HDX issue) */
 897	spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
 898		 HSCH_PORT_MODE_AGE_DIS,
 899		 sparx5,
 900		 HSCH_PORT_MODE(port->portno));
 901
 902	/* Enable MAC module */
 903	spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
 904		DEV2G5_MAC_ENA_CFG_TX_ENA,
 905		sparx5,
 906		DEV2G5_MAC_ENA_CFG(port->portno));
 907
 908	/* Select speed and take MAC out of reset */
 909	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
 910		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
 911		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
 912		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 913		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
 914		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
 915		 sparx5,
 916		 DEV2G5_DEV_RST_CTRL(port->portno));
 917
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918	return 0;
 919}
 920
 921int sparx5_port_pcs_set(struct sparx5 *sparx5,
 922			struct sparx5_port *port,
 923			struct sparx5_port_config *conf)
 924
 925{
 926	bool high_speed_dev = sparx5_is_baser(conf->portmode);
 927	int err;
 928
 929	if (sparx5_dev_change(sparx5, port, conf)) {
 930		/* switch device */
 931		sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
 932
 933		/* Disable the not-in-use device */
 934		err = sparx5_port_disable(sparx5, port, !high_speed_dev);
 935		if (err)
 936			return err;
 937	}
 938	/* Disable the port before re-configuring */
 939	err = sparx5_port_disable(sparx5, port, high_speed_dev);
 940	if (err)
 941		return -EINVAL;
 942
 943	if (high_speed_dev)
 944		err = sparx5_port_pcs_high_set(sparx5, port, conf);
 945	else
 946		err = sparx5_port_pcs_low_set(sparx5, port, conf);
 947
 948	if (err)
 949		return -EINVAL;
 950
 951	if (port->conf.inband) {
 952		/* Enable/disable 1G counters in ASM */
 953		spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
 954			 ASM_PORT_CFG_CSC_STAT_DIS,
 955			 sparx5,
 956			 ASM_PORT_CFG(port->portno));
 957
 958		/* Enable/disable 1G counters in DSM */
 959		spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
 960			 DSM_BUF_CFG_CSC_STAT_DIS,
 961			 sparx5,
 962			 DSM_BUF_CFG(port->portno));
 963	}
 964
 965	port->conf = *conf;
 966
 967	return 0;
 968}
 969
 970int sparx5_port_config(struct sparx5 *sparx5,
 971		       struct sparx5_port *port,
 972		       struct sparx5_port_config *conf)
 973{
 974	bool high_speed_dev = sparx5_is_baser(conf->portmode);
 
 975	int err, urgency, stop_wm;
 976
 977	err = sparx5_port_verify_speed(sparx5, port, conf);
 978	if (err)
 979		return err;
 980
 981	/* high speed device is already configured */
 982	if (!high_speed_dev)
 983		sparx5_port_config_low_set(sparx5, port, conf);
 984
 985	/* Configure flow control */
 986	err = sparx5_port_fc_setup(sparx5, port, conf);
 987	if (err)
 988		return err;
 989
 
 
 
 
 
 
 
 990	/* Set the DSM stop watermark */
 991	stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
 992	spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
 993		 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
 994		 sparx5,
 995		 DSM_DEV_TX_STOP_WM_CFG(port->portno));
 996
 997	/* Enable port in queue system */
 998	urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
 999	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1000		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1001		 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1002		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1003		 sparx5,
1004		 QFWD_SWITCH_PORT_MODE(port->portno));
1005
1006	/* Save the new values */
1007	port->conf = *conf;
1008
1009	return 0;
1010}
1011
1012/* Initialize port config to default */
1013int sparx5_port_init(struct sparx5 *sparx5,
1014		     struct sparx5_port *port,
1015		     struct sparx5_port_config *conf)
1016{
1017	u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018	u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1019	u32 devhigh = sparx5_to_high_dev(port->portno);
1020	u32 pix = sparx5_port_dev_index(port->portno);
1021	u32 pcs = sparx5_to_pcs_dev(port->portno);
 
1022	bool sd_pol = port->signd_active_high;
1023	bool sd_sel = !port->signd_internal;
1024	bool sd_ena = port->signd_enable;
1025	u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1026	void __iomem *devinst;
1027	void __iomem *pcsinst;
1028	int err;
1029
1030	devinst = spx5_inst_get(sparx5, devhigh, pix);
1031	pcsinst = spx5_inst_get(sparx5, pcs, pix);
1032
1033	/* Set the mux port mode  */
1034	err = sparx5_port_mux_set(sparx5, port, conf);
1035	if (err)
1036		return err;
1037
1038	/* Configure MAC vlan awareness */
1039	err = sparx5_port_max_tags_set(sparx5, port);
1040	if (err)
1041		return err;
1042
1043	/* Set Max Length */
1044	spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1045		 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1046		 sparx5,
1047		 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1048
1049	/* 1G/2G5: Signal Detect configuration */
1050	spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1051		DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1052		DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1053		sparx5,
1054		DEV2G5_PCS1G_SD_CFG(port->portno));
1055
1056	/* Set Pause WM hysteresis */
1057	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1058		 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1059		 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1060		 QSYS_PAUSE_CFG_PAUSE_START |
1061		 QSYS_PAUSE_CFG_PAUSE_STOP |
1062		 QSYS_PAUSE_CFG_PAUSE_ENA,
1063		 sparx5,
1064		 QSYS_PAUSE_CFG(port->portno));
1065
1066	/* Port ATOP. Frames are tail dropped when this WM is hit */
1067	spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1068		sparx5,
1069		QSYS_ATOP(port->portno));
1070
1071	/* Discard pause frame 01-80-C2-00-00-01 */
1072	spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1073
1074	/* Discard SMAC multicast */
1075	spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1076		 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1077		 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1078
1079	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1080	    conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1081		err = sparx5_serdes_set(sparx5, port, conf);
1082		if (err)
1083			return err;
1084
1085		if (!sparx5_port_is_2g5(port->portno))
1086			/* Enable shadow device */
1087			spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1088				 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1089				 sparx5,
1090				 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1091
1092		sparx5_dev_switch(sparx5, port->portno, false);
1093	}
1094	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1095		// All ports must be PCS enabled in QSGMII mode
1096		spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1097			 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1098			 sparx5,
1099			 DEV2G5_DEV_RST_CTRL(port->portno));
1100	}
1101	/* Default IFGs for 1G */
1102	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1103		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1104		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1105		sparx5,
1106		DEV2G5_MAC_IFG_CFG(port->portno));
1107
1108	if (sparx5_port_is_2g5(port->portno))
1109		return 0; /* Low speed device only - return */
1110
1111	/* Now setup the high speed device */
1112	if (conf->portmode == PHY_INTERFACE_MODE_NA)
1113		conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1114
1115	if (sparx5_is_baser(conf->portmode))
1116		sparx5_dev_switch(sparx5, port->portno, true);
1117
1118	/* Set Max Length */
1119	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1120		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1121		      devinst,
1122		      DEV10G_MAC_ENA_CFG(0));
1123
1124	/* Handle Signal Detect in 10G PCS */
1125	spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1126		     PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1127		     PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1128		     pcsinst,
1129		     PCS10G_BR_PCS_SD_CFG(0));
1130
1131	if (sparx5_port_is_25g(port->portno)) {
1132		/* Handle Signal Detect in 25G PCS */
1133		spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1134			DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1135			DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1136			sparx5,
1137			DEV25G_PCS25G_SD_CFG(pix));
1138	}
1139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140	return 0;
1141}
1142
1143void sparx5_port_enable(struct sparx5_port *port, bool enable)
1144{
1145	struct sparx5 *sparx5 = port->sparx5;
1146
1147	/* Enable port for frame transfer? */
1148	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1149		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1150		 sparx5,
1151		 QFWD_SWITCH_PORT_MODE(port->portno));
1152}
1153
1154int sparx5_port_qos_set(struct sparx5_port *port,
1155			struct sparx5_port_qos *qos)
1156{
1157	sparx5_port_qos_dscp_set(port, &qos->dscp);
1158	sparx5_port_qos_pcp_set(port, &qos->pcp);
1159	sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1160	sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1161	sparx5_port_qos_default_set(port, qos);
1162
1163	return 0;
1164}
1165
1166int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1167				 struct sparx5_port_qos_pcp_rewr *qos)
1168{
1169	int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1170	struct sparx5 *sparx5 = port->sparx5;
1171	u8 pcp, dei;
1172
1173	/* Use mapping table, with classified QoS as index, to map QoS and DP
1174	 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1175	 * PCP. Classified PCP equals frame PCP.
1176	 */
1177	if (qos->enable)
1178		mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1179
1180	spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1181		 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1182		 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1183		 port->sparx5, REW_TAG_CTRL(port->portno));
1184
1185	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1186		/* Extract PCP and DEI */
1187		pcp = qos->map.map[i];
1188		if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1189			dei = 1;
1190		else
1191			dei = 0;
1192
1193		/* Rewrite PCP and DEI, for each classified QoS class and DP
1194		 * level. This table is only used if tag ctrl mode is set to
1195		 * 'mapped'.
1196		 *
1197		 * 0:0nd   - prio=0 and dp:0 => pcp=0 and dei=0
1198		 * 0:0de   - prio=0 and dp:1 => pcp=0 and dei=1
1199		 */
1200		if (dei) {
1201			spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1202				 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1203				 REW_PCP_MAP_DE1(port->portno, i));
1204
1205			spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1206				 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1207				 REW_DEI_MAP_DE1(port->portno, i));
1208		} else {
1209			spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1210				 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1211				 REW_PCP_MAP_DE0(port->portno, i));
1212
1213			spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1214				 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1215				 REW_DEI_MAP_DE0(port->portno, i));
1216		}
1217	}
1218
1219	return 0;
1220}
1221
1222int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1223			    struct sparx5_port_qos_pcp *qos)
1224{
1225	struct sparx5 *sparx5 = port->sparx5;
1226	u8 *pcp_itr = qos->map.map;
1227	u8 pcp, dp;
1228	int i;
1229
1230	/* Enable/disable pcp and dp for qos classification. */
1231	spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1232		 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1233		 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1234		 sparx5, ANA_CL_QOS_CFG(port->portno));
1235
1236	/* Map each pcp and dei value to priority and dp */
1237	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1238		pcp = *(pcp_itr + i);
1239		dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1240		spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1241			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1242			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1243			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1244			 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1245	}
1246
1247	return 0;
1248}
1249
1250void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1251					int mode)
1252{
1253	spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1254		 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1255		 ANA_CL_QOS_CFG(port->portno));
1256}
1257
1258int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1259				  struct sparx5_port_qos_dscp_rewr *qos)
1260{
1261	struct sparx5 *sparx5 = port->sparx5;
1262	bool rewr = false;
1263	u16 dscp;
1264	int i;
1265
1266	/* On egress, rewrite DSCP value to either classified DSCP or frame
1267	 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1268	 */
1269	if (qos->enable)
1270		rewr = true;
1271
1272	spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1273		 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1274		 REW_DSCP_MAP(port->portno));
1275
1276	/* On ingress, map each classified QoS class and DP to classified DSCP
1277	 * value. This mapping table is global for all ports.
1278	 */
1279	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1280		dscp = qos->map.map[i];
1281		spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1282			 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1283			 ANA_CL_QOS_MAP_CFG(i));
1284	}
1285
1286	return 0;
1287}
1288
1289int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1290			     struct sparx5_port_qos_dscp *qos)
1291{
1292	struct sparx5 *sparx5 = port->sparx5;
1293	u8 *dscp = qos->map.map;
1294	int i;
1295
1296	/* Enable/disable dscp and dp for qos classification.
1297	 * Disable rewrite of dscp values for now.
1298	 */
1299	spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1300		 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1301		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1302		 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1303		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1304		 ANA_CL_QOS_CFG(port->portno));
1305
1306	/* Map each dscp value to priority and dp */
1307	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1308		spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1309			 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1310			 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1311			 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1312			 ANA_CL_DSCP_CFG(i));
1313	}
1314
1315	/* Set per-dscp trust */
1316	for (i = 0; i <  ARRAY_SIZE(qos->map.map); i++) {
1317		if (qos->qos_enable) {
1318			spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1319				 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1320				 ANA_CL_DSCP_CFG(i));
1321		}
1322	}
1323
1324	return 0;
1325}
1326
1327int sparx5_port_qos_default_set(const struct sparx5_port *port,
1328				const struct sparx5_port_qos *qos)
1329{
1330	struct sparx5 *sparx5 = port->sparx5;
1331
1332	/* Set default prio and dp level */
1333	spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1334		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1335		 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1336		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1337		 sparx5, ANA_CL_QOS_CFG(port->portno));
1338
1339	/* Set default pcp and dei for untagged frames */
1340	spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1341		 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1342		 ANA_CL_VLAN_CTRL_PORT_PCP |
1343		 ANA_CL_VLAN_CTRL_PORT_DEI,
1344		 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1345
1346	return 0;
 
 
 
 
 
1347}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Microchip Sparx5 Switch driver
   3 *
   4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/phy/phy.h>
   9#include <net/dcbnl.h>
  10
  11#include "sparx5_main_regs.h"
  12#include "sparx5_main.h"
  13#include "sparx5_port.h"
  14
  15#define SPX5_ETYPE_TAG_C     0x8100
  16#define SPX5_ETYPE_TAG_S     0x88a8
  17
  18#define SPX5_WAIT_US         1000
  19#define SPX5_WAIT_MAX_US     2000
  20
  21enum port_error {
  22	SPX5_PERR_SPEED,
  23	SPX5_PERR_IFTYPE,
  24};
  25
  26#define PAUSE_DISCARD        0xC
  27#define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
  28
  29static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
  30{
  31	status->an_complete = true;
  32	if (!(lp_abil & LPA_SGMII_LINK)) {
  33		status->link = false;
  34		return;
  35	}
  36
  37	switch (lp_abil & LPA_SGMII_SPD_MASK) {
  38	case LPA_SGMII_10:
  39		status->speed = SPEED_10;
  40		break;
  41	case LPA_SGMII_100:
  42		status->speed = SPEED_100;
  43		break;
  44	case LPA_SGMII_1000:
  45		status->speed = SPEED_1000;
  46		break;
  47	default:
  48		status->link = false;
  49		return;
  50	}
  51	if (lp_abil & LPA_SGMII_FULL_DUPLEX)
  52		status->duplex = DUPLEX_FULL;
  53	else
  54		status->duplex = DUPLEX_HALF;
  55}
  56
  57static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
  58{
  59	status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
  60	status->an_complete = true;
  61	status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
  62		DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
  63
  64	if ((ld_abil & ADVERTISE_1000XPAUSE) &&
  65	    (lp_abil & ADVERTISE_1000XPAUSE)) {
  66		status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
  67	} else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
  68		   (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
  69		status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
  70			MLO_PAUSE_TX : 0;
  71		status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
  72			MLO_PAUSE_RX : 0;
  73	} else {
  74		status->pause = MLO_PAUSE_NONE;
  75	}
  76}
  77
  78static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
  79				    struct sparx5_port *port,
  80				    struct sparx5_port_status *status)
  81{
  82	u32 portno = port->portno;
  83	u16 lp_adv, ld_adv;
  84	u32 value;
  85
  86	/* Get PCS Link down sticky */
  87	value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
  88	status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
  89	if (status->link_down)	/* Clear the sticky */
  90		spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
  91
  92	/* Get both current Link and Sync status */
  93	value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
  94	status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
  95		       DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
  96
  97	if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
  98		status->speed = SPEED_1000;
  99	else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
 100		status->speed = SPEED_2500;
 101
 102	status->duplex = DUPLEX_FULL;
 103
 104	/* Get PCS ANEG status register */
 105	value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
 106
 107	/* Aneg complete provides more information  */
 108	if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
 109		lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
 110		if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
 111			decode_sgmii_word(lp_adv, status);
 112		} else {
 113			value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
 114			ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
 115			decode_cl37_word(lp_adv, ld_adv, status);
 116		}
 117	}
 118	return 0;
 119}
 120
 121static int sparx5_get_sfi_status(struct sparx5 *sparx5,
 122				 struct sparx5_port *port,
 123				 struct sparx5_port_status *status)
 124{
 125	bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
 126	u32 portno = port->portno;
 127	u32 value, dev, tinst;
 128	void __iomem *inst;
 129
 130	if (!high_speed_dev) {
 131		netdev_err(port->ndev, "error: low speed and SFI mode\n");
 132		return -EINVAL;
 133	}
 134
 135	dev = sparx5_to_high_dev(sparx5, portno);
 136	tinst = sparx5_port_dev_index(sparx5, portno);
 137	inst = spx5_inst_get(sparx5, dev, tinst);
 138
 139	value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 140	if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
 141		/* The link is or has been down. Clear the sticky bit */
 142		status->link_down = 1;
 143		spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 144		value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
 145	}
 146	status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
 147	status->duplex = DUPLEX_FULL;
 148	if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
 149		status->speed = SPEED_5000;
 150	else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
 151		status->speed = SPEED_10000;
 152	else
 153		status->speed = SPEED_25000;
 154
 155	return 0;
 156}
 157
 158/* Get link status of 1000Base-X/in-band and SFI ports.
 159 */
 160int sparx5_get_port_status(struct sparx5 *sparx5,
 161			   struct sparx5_port *port,
 162			   struct sparx5_port_status *status)
 163{
 164	memset(status, 0, sizeof(*status));
 165	status->speed = port->conf.speed;
 166	if (port->conf.power_down) {
 167		status->link = false;
 168		return 0;
 169	}
 170	switch (port->conf.portmode) {
 171	case PHY_INTERFACE_MODE_SGMII:
 172	case PHY_INTERFACE_MODE_QSGMII:
 173	case PHY_INTERFACE_MODE_1000BASEX:
 174	case PHY_INTERFACE_MODE_2500BASEX:
 175		return sparx5_get_dev2g5_status(sparx5, port, status);
 176	case PHY_INTERFACE_MODE_5GBASER:
 177	case PHY_INTERFACE_MODE_10GBASER:
 178	case PHY_INTERFACE_MODE_25GBASER:
 179		return sparx5_get_sfi_status(sparx5, port, status);
 180	case PHY_INTERFACE_MODE_NA:
 181		return 0;
 182	default:
 183		netdev_err(port->ndev, "Status not supported");
 184		return -ENODEV;
 185	}
 186	return 0;
 187}
 188
 189static int sparx5_port_error(struct sparx5_port *port,
 190			     struct sparx5_port_config *conf,
 191			     enum port_error errtype)
 192{
 193	switch (errtype) {
 194	case SPX5_PERR_SPEED:
 195		netdev_err(port->ndev,
 196			   "Interface does not support speed: %u: for %s\n",
 197			   conf->speed, phy_modes(conf->portmode));
 198		break;
 199	case SPX5_PERR_IFTYPE:
 200		netdev_err(port->ndev,
 201			   "Switch port does not support interface type: %s\n",
 202			   phy_modes(conf->portmode));
 203		break;
 204	default:
 205		netdev_err(port->ndev,
 206			   "Interface configuration error\n");
 207	}
 208
 209	return -EINVAL;
 210}
 211
 212static int sparx5_port_verify_speed(struct sparx5 *sparx5,
 213				    struct sparx5_port *port,
 214				    struct sparx5_port_config *conf)
 215{
 216	const struct sparx5_ops *ops = sparx5->data->ops;
 217
 218	if ((ops->is_port_2g5(port->portno) &&
 219	     conf->speed > SPEED_2500) ||
 220	    (ops->is_port_5g(port->portno)  &&
 221	     conf->speed > SPEED_5000) ||
 222	    (ops->is_port_10g(port->portno) &&
 223	     conf->speed > SPEED_10000))
 224		return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 225
 226	switch (conf->portmode) {
 227	case PHY_INTERFACE_MODE_NA:
 228		return -EINVAL;
 229	case PHY_INTERFACE_MODE_1000BASEX:
 230		if (conf->speed != SPEED_1000 ||
 231		    ops->is_port_2g5(port->portno))
 232			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 233		if (ops->is_port_2g5(port->portno))
 234			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 235		break;
 236	case PHY_INTERFACE_MODE_2500BASEX:
 237		if (conf->speed != SPEED_2500 ||
 238		    ops->is_port_2g5(port->portno))
 239			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 240		break;
 241	case PHY_INTERFACE_MODE_QSGMII:
 242		if (port->portno > 47)
 243			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 244		fallthrough;
 245	case PHY_INTERFACE_MODE_SGMII:
 246		if (conf->speed != SPEED_1000 &&
 247		    conf->speed != SPEED_100 &&
 248		    conf->speed != SPEED_10 &&
 249		    conf->speed != SPEED_2500)
 250			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 251		break;
 252	case PHY_INTERFACE_MODE_5GBASER:
 253	case PHY_INTERFACE_MODE_10GBASER:
 254	case PHY_INTERFACE_MODE_25GBASER:
 255		if ((conf->speed != SPEED_5000 &&
 256		     conf->speed != SPEED_10000 &&
 257		     conf->speed != SPEED_25000))
 258			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
 259		break;
 260	default:
 261		return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
 262	}
 263	return 0;
 264}
 265
 266static bool sparx5_dev_change(struct sparx5 *sparx5,
 267			      struct sparx5_port *port,
 268			      struct sparx5_port_config *conf)
 269{
 270	return sparx5_is_baser(port->conf.portmode) ^
 271		sparx5_is_baser(conf->portmode);
 272}
 273
 274static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
 275{
 276	u32  value, resource, prio, delay_cnt = 0;
 277	bool poll_src = true;
 278	char *mem = "";
 279
 280	/* Resource == 0: Memory tracked per source (SRC-MEM)
 281	 * Resource == 1: Frame references tracked per source (SRC-REF)
 282	 * Resource == 2: Memory tracked per destination (DST-MEM)
 283	 * Resource == 3: Frame references tracked per destination. (DST-REF)
 284	 */
 285	while (1) {
 286		bool empty = true;
 287
 288		for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
 289			u32 base;
 290
 291			base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
 292			for (prio = 0; prio < SPX5_PRIOS; prio++) {
 293				value = spx5_rd(sparx5,
 294						QRES_RES_STAT(base + prio));
 295				if (value) {
 296					mem = resource == 0 ?
 297						"DST-MEM" : "SRC-MEM";
 298					empty = false;
 299				}
 300			}
 301		}
 302
 303		if (empty)
 304			break;
 305
 306		if (delay_cnt++ == 2000) {
 307			dev_err(sparx5->dev,
 308				"Flush timeout port %u. %s queue not empty\n",
 309				portno, mem);
 310			return -EINVAL;
 311		}
 312
 313		usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
 314	}
 315	return 0;
 316}
 317
 318static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
 319{
 320	u32 tinst = high_spd_dev ?
 321		    sparx5_port_dev_index(sparx5, port->portno) : port->portno;
 322	u32 dev = high_spd_dev ?
 323		  sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
 324	void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
 325	const struct sparx5_ops *ops = sparx5->data->ops;
 326	u32 spd = port->conf.speed;
 327	u32 spd_prm;
 328	int err;
 329
 330	if (high_spd_dev) {
 331		/* 1: Reset the PCS Rx clock domain  */
 332		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
 333			      DEV10G_DEV_RST_CTRL_PCS_RX_RST,
 334			      devinst,
 335			      DEV10G_DEV_RST_CTRL(0));
 336
 337		/* 2: Disable MAC frame reception */
 338		spx5_inst_rmw(0,
 339			      DEV10G_MAC_ENA_CFG_RX_ENA,
 340			      devinst,
 341			      DEV10G_MAC_ENA_CFG(0));
 342	} else {
 343		/* 1: Reset the PCS Rx clock domain  */
 344		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 345			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 346			      devinst,
 347			      DEV2G5_DEV_RST_CTRL(0));
 348		/* 2: Disable MAC frame reception */
 349		spx5_inst_rmw(0,
 350			      DEV2G5_MAC_ENA_CFG_RX_ENA,
 351			      devinst,
 352			      DEV2G5_MAC_ENA_CFG(0));
 353	}
 354	/* 3: Disable traffic being sent to or from switch port->portno */
 355	spx5_rmw(0,
 356		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
 357		 sparx5,
 358		 QFWD_SWITCH_PORT_MODE(port->portno));
 359
 360	/* 4: Disable dequeuing from the egress queues  */
 361	spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
 362		 HSCH_PORT_MODE_DEQUEUE_DIS,
 363		 sparx5,
 364		 HSCH_PORT_MODE(port->portno));
 365
 366	/* 5: Disable Flowcontrol */
 367	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
 368		 QSYS_PAUSE_CFG_PAUSE_STOP,
 369		 sparx5,
 370		 QSYS_PAUSE_CFG(port->portno));
 371
 372	spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
 373	/* 6: Wait while the last frame is exiting the queues */
 374	usleep_range(8 * spd_prm, 10 * spd_prm);
 375
 376	/* 7: Flush the queues associated with the port->portno */
 377	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
 378		 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
 379		 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
 380		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
 381		 HSCH_FLUSH_CTRL_FLUSH_PORT |
 382		 HSCH_FLUSH_CTRL_FLUSH_DST |
 383		 HSCH_FLUSH_CTRL_FLUSH_SRC |
 384		 HSCH_FLUSH_CTRL_FLUSH_ENA,
 385		 sparx5,
 386		 HSCH_FLUSH_CTRL);
 387
 388	/* 8: Enable dequeuing from the egress queues */
 389	spx5_rmw(0,
 390		 HSCH_PORT_MODE_DEQUEUE_DIS,
 391		 sparx5,
 392		 HSCH_PORT_MODE(port->portno));
 393
 394	/* 9: Wait until flushing is complete */
 395	err = sparx5_port_flush_poll(sparx5, port->portno);
 396	if (err)
 397		return err;
 398
 399	/* 10: Reset the  MAC clock domain */
 400	if (high_spd_dev) {
 401		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
 402			      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
 403			      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
 404			      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
 405			      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
 406			      DEV10G_DEV_RST_CTRL_MAC_TX_RST,
 407			      devinst,
 408			      DEV10G_DEV_RST_CTRL(0));
 409
 410	} else {
 411		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
 412			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
 413			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
 414			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
 415			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
 416			      DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 417			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
 418			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
 419			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
 420			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
 421			      devinst,
 422			      DEV2G5_DEV_RST_CTRL(0));
 423	}
 424	/* 11: Clear flushing */
 425	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
 426		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
 427		 HSCH_FLUSH_CTRL_FLUSH_PORT |
 428		 HSCH_FLUSH_CTRL_FLUSH_ENA,
 429		 sparx5,
 430		 HSCH_FLUSH_CTRL);
 431
 432	if (high_spd_dev) {
 433		u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
 434		void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
 435
 436		/* 12: Disable 5G/10G/25 BaseR PCS */
 437		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
 438			      PCS10G_BR_PCS_CFG_PCS_ENA,
 439			      pcsinst,
 440			      PCS10G_BR_PCS_CFG(0));
 441
 442		if (ops->is_port_25g(port->portno))
 443			/* Disable 25G PCS */
 444			spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
 445				 DEV25G_PCS25G_CFG_PCS25G_ENA,
 446				 sparx5,
 447				 DEV25G_PCS25G_CFG(tinst));
 448	} else {
 449		/* 12: Disable 1G PCS */
 450		spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
 451			 DEV2G5_PCS1G_CFG_PCS_ENA,
 452			 sparx5,
 453			 DEV2G5_PCS1G_CFG(port->portno));
 454	}
 455
 456	/* The port is now flushed and disabled  */
 457	return 0;
 458}
 459
 460static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
 461			       u32 portno, u32 speed)
 462{
 463	u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
 464	const u32 taxi_dist[SPX5_PORTS_ALL] = {
 465		6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
 466		4, 4, 4, 4,
 467		11, 12, 13, 14, 15, 16, 17, 18,
 468		11, 12, 13, 14, 15, 16, 17, 18,
 469		11, 12, 13, 14, 15, 16, 17, 18,
 470		11, 12, 13, 14, 15, 16, 17, 18,
 471		4, 6, 8, 4, 6, 8, 6, 8,
 472		2, 2, 2, 2, 2, 2, 2, 4, 2
 473	};
 474	u32 mac_per    = 6400, tmp1, tmp2, tmp3;
 475	u32 fifo_width = 16;
 476	u32 mac_width  = 8;
 477	u32 addition   = 0;
 478
 479	if (!is_sparx5(sparx5))
 480		return 0;
 481
 482	switch (speed) {
 483	case SPEED_25000:
 484		return 0;
 485	case SPEED_10000:
 486		mac_per = 6400;
 487		mac_width = 8;
 488		addition = 1;
 489		break;
 490	case SPEED_5000:
 491		mac_per = 12800;
 492		mac_width = 8;
 493		addition = 0;
 494		break;
 495	case SPEED_2500:
 496		mac_per = 3200;
 497		mac_width = 1;
 498		addition = 0;
 499		break;
 500	case SPEED_1000:
 501		mac_per =  8000;
 502		mac_width = 1;
 503		addition = 0;
 504		break;
 505	case SPEED_100:
 506	case SPEED_10:
 507		return 1;
 508	default:
 509		break;
 510	}
 511
 512	tmp1 = 1000 * mac_width / fifo_width;
 513	tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
 514		       * sys_clk / mac_per);
 515	tmp3 = tmp1 * tmp2 / 1000;
 516	return  (tmp3 + 2000 + 999) / 1000 + addition;
 517}
 518
 519/* Configure port muxing:
 520 * QSGMII:     4x2G5 devices
 521 */
 522int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
 523			struct sparx5_port_config *conf)
 
 524{
 525	u32 portno = port->portno;
 526	u32 inst;
 527
 528	if (port->conf.portmode == conf->portmode)
 529		return 0; /* Nothing to do */
 530
 531	switch (conf->portmode) {
 532	case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
 533		inst = (portno - portno % 4) / 4;
 534		spx5_rmw(BIT(inst),
 535			 BIT(inst),
 536			 sparx5,
 537			 PORT_CONF_QSGMII_ENA);
 538
 539		if ((portno / 4 % 2) == 0) {
 540			/* Affects d0-d3,d8-d11..d40-d43 */
 541			spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
 542				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
 543				 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
 544				 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
 545				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
 546				 PORT_CONF_USGMII_CFG_QUAD_MODE,
 547				 sparx5,
 548				 PORT_CONF_USGMII_CFG((portno / 8)));
 549		}
 550		break;
 551	default:
 552		break;
 553	}
 554	return 0;
 555}
 556
 557static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
 558				    struct sparx5_port *port)
 559{
 560	enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
 561	int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
 562			      max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
 563	bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
 564	enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
 565	bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
 566	u32 dev             = sparx5_to_high_dev(sparx5, port->portno);
 567	u32 tinst           = sparx5_port_dev_index(sparx5, port->portno);
 568	void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
 569	const struct sparx5_ops *ops = sparx5->data->ops;
 570	u32 etype;
 571
 572	etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
 573		 port->custom_etype :
 574		 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
 575		 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
 576
 577	spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
 578		DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
 579		DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
 580		DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
 581		sparx5,
 582		DEV2G5_MAC_TAGS_CFG(port->portno));
 583
 584	if (ops->is_port_2g5(port->portno))
 585		return 0;
 586
 587	spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
 588		      DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
 589		      DEV10G_MAC_TAGS_CFG_TAG_ID |
 590		      DEV10G_MAC_TAGS_CFG_TAG_ENA,
 591		      inst,
 592		      DEV10G_MAC_TAGS_CFG(0, 0));
 593
 594	spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
 595		      DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
 596		      inst,
 597		      DEV10G_MAC_NUM_TAGS_CFG(0));
 598
 599	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
 600		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
 601		      inst,
 602		      DEV10G_MAC_MAXLEN_CFG(0));
 603	return 0;
 604}
 605
 606int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
 607{
 608	u32 clk_period_ps = 1600; /* 625Mhz for now */
 609	u32 urg = 672000;
 610
 611	switch (speed) {
 612	case SPEED_10:
 613	case SPEED_100:
 614	case SPEED_1000:
 615		urg = 672000;
 616		break;
 617	case SPEED_2500:
 618		urg = 270000;
 619		break;
 620	case SPEED_5000:
 621		urg = 135000;
 622		break;
 623	case SPEED_10000:
 624		urg = 67200;
 625		break;
 626	case SPEED_25000:
 627		urg = 27000;
 628		break;
 629	}
 630	return urg / clk_period_ps - 1;
 631}
 632
 633static u16 sparx5_wm_enc(u16 value)
 634{
 635	if (value >= 2048)
 636		return 2048 + value / 16;
 637
 638	return value;
 639}
 640
 641static int sparx5_port_fc_setup(struct sparx5 *sparx5,
 642				struct sparx5_port *port,
 643				struct sparx5_port_config *conf)
 644{
 645	bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
 646	u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
 647
 648	if (conf->pause & MLO_PAUSE_TX)
 649		pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
 650						 SPX5_BUFFER_CELL_SZ));
 651
 652	/* Set HDX flowcontrol */
 653	spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
 654		 DSM_MAC_CFG_HDX_BACKPREASSURE,
 655		 sparx5,
 656		 DSM_MAC_CFG(port->portno));
 657
 658	/* Obey flowcontrol  */
 659	spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
 660		 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
 661		 sparx5,
 662		 DSM_RX_PAUSE_CFG(port->portno));
 663
 664	/* Disable forward pressure */
 665	spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
 666		 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
 667		 sparx5,
 668		 QSYS_FWD_PRESSURE(port->portno));
 669
 670	/* Generate pause frames */
 671	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
 672		 QSYS_PAUSE_CFG_PAUSE_STOP,
 673		 sparx5,
 674		 QSYS_PAUSE_CFG(port->portno));
 675
 676	return 0;
 677}
 678
 679static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
 680{
 681	if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
 682		return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
 683	else
 684		return 1; /* Enable SGMII Aneg */
 685}
 686
 687int sparx5_serdes_set(struct sparx5 *sparx5,
 688		      struct sparx5_port *port,
 689		      struct sparx5_port_config *conf)
 690{
 691	int portmode, err, speed = conf->speed;
 692
 693	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
 694	    ((port->portno % 4) != 0)) {
 695		return 0;
 696	}
 697	if (sparx5_is_baser(conf->portmode)) {
 698		if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
 699			speed = SPEED_25000;
 700		else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
 701			speed = SPEED_10000;
 702		else
 703			speed = SPEED_5000;
 704	}
 705
 706	err = phy_set_media(port->serdes, conf->media);
 707	if (err)
 708		return err;
 709	if (speed > 0) {
 710		err = phy_set_speed(port->serdes, speed);
 711		if (err)
 712			return err;
 713	}
 714	if (conf->serdes_reset) {
 715		err = phy_reset(port->serdes);
 716		if (err)
 717			return err;
 718	}
 719
 720	/* Configure SerDes with port parameters
 721	 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
 722	 */
 723	portmode = conf->portmode;
 724	if (sparx5_is_baser(conf->portmode))
 725		portmode = PHY_INTERFACE_MODE_10GBASER;
 726	err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
 727	if (err)
 728		return err;
 729	conf->serdes_reset = false;
 730	return err;
 731}
 732
 733static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
 734				   struct sparx5_port *port,
 735				   struct sparx5_port_config *conf)
 736{
 737	bool sgmii = false, inband_aneg = false;
 738	int err;
 739
 740	if (conf->inband) {
 741		if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
 742		    conf->portmode == PHY_INTERFACE_MODE_QSGMII)
 743			inband_aneg = true; /* Cisco-SGMII in-band-aneg */
 744		else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
 745			 conf->autoneg)
 746			inband_aneg = true; /* Clause-37 in-band-aneg */
 747
 748		err = sparx5_serdes_set(sparx5, port, conf);
 749		if (err)
 750			return -EINVAL;
 751	} else {
 752		sgmii = true; /* Phy is connected to the MAC */
 753	}
 754
 755	/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
 756	spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
 757		 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
 758		 sparx5,
 759		 DEV2G5_PCS1G_MODE_CFG(port->portno));
 760
 761	/* Enable PCS */
 762	spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
 763		sparx5,
 764		DEV2G5_PCS1G_CFG(port->portno));
 765
 766	if (inband_aneg) {
 767		u16 abil = sparx5_get_aneg_word(conf);
 768
 769		/* Enable in-band aneg */
 770		spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
 771			DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
 772			DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
 773			DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
 774			sparx5,
 775			DEV2G5_PCS1G_ANEG_CFG(port->portno));
 776	} else {
 777		spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
 778	}
 779
 780	/* Take PCS out of reset */
 781	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
 782		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
 783		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
 784		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 785		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
 786		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
 787		 sparx5,
 788		 DEV2G5_DEV_RST_CTRL(port->portno));
 789
 790	return 0;
 791}
 792
 793static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
 794				    struct sparx5_port *port,
 795				    struct sparx5_port_config *conf)
 796{
 797	u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
 798	u32 pix = sparx5_port_dev_index(sparx5, port->portno);
 799	u32 dev = sparx5_to_high_dev(sparx5, port->portno);
 800	u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
 801	void __iomem *devinst;
 802	void __iomem *pcsinst;
 803	int err;
 804
 805	devinst = spx5_inst_get(sparx5, dev, pix);
 806	pcsinst = spx5_inst_get(sparx5, pcs, pix);
 807
 808	/*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
 809	err = sparx5_serdes_set(sparx5, port, conf);
 810	if (err)
 811		return -EINVAL;
 812	if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
 813		/* Enable PCS for 25G device, speed 25G */
 814		spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
 815			 DEV25G_PCS25G_CFG_PCS25G_ENA,
 816			 sparx5,
 817			 DEV25G_PCS25G_CFG(pix));
 818	} else {
 819		/* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
 820		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
 821			      PCS10G_BR_PCS_CFG_PCS_ENA,
 822			      pcsinst,
 823			      PCS10G_BR_PCS_CFG(0));
 824	}
 825
 826	/* Enable 5G/10G/25G MAC module */
 827	spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
 828		     DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
 829		     devinst,
 830		     DEV10G_MAC_ENA_CFG(0));
 831
 832	/* Take the device out of reset */
 833	spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
 834		      DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
 835		      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
 836		      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
 837		      DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
 838		      DEV10G_DEV_RST_CTRL_PCS_RX_RST |
 839		      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
 840		      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
 841		      DEV10G_DEV_RST_CTRL_MAC_TX_RST |
 842		      DEV10G_DEV_RST_CTRL_SPEED_SEL,
 843		      devinst,
 844		      DEV10G_DEV_RST_CTRL(0));
 845
 846	return 0;
 847}
 848
 849/* Switch between 1G/2500 and 5G/10G/25G devices */
 850static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
 851{
 852	const struct sparx5_ops *ops = sparx5->data->ops;
 853	int bt_indx;
 854
 855	bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
 856
 857	if (ops->is_port_5g(port)) {
 858		spx5_rmw(hsd ? 0 : bt_indx,
 859			 bt_indx,
 860			 sparx5,
 861			 PORT_CONF_DEV5G_MODES);
 862	} else if (ops->is_port_10g(port)) {
 863		spx5_rmw(hsd ? 0 : bt_indx,
 864			 bt_indx,
 865			 sparx5,
 866			 PORT_CONF_DEV10G_MODES);
 867	} else if (ops->is_port_25g(port)) {
 868		spx5_rmw(hsd ? 0 : bt_indx,
 869			 bt_indx,
 870			 sparx5,
 871			 PORT_CONF_DEV25G_MODES);
 872	}
 873}
 874
 875/* Configure speed/duplex dependent registers */
 876static int sparx5_port_config_low_set(struct sparx5 *sparx5,
 877				      struct sparx5_port *port,
 878				      struct sparx5_port_config *conf)
 879{
 880	u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
 881	bool fdx = conf->duplex == DUPLEX_FULL;
 882	int spd = conf->speed;
 883
 884	clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
 885	gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
 886	tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
 887	hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
 888	hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
 889
 890	/* GIG/FDX mode */
 891	spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
 892		 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
 893		 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
 894		 DEV2G5_MAC_MODE_CFG_FDX_ENA,
 895		 sparx5,
 896		 DEV2G5_MAC_MODE_CFG(port->portno));
 897
 898	/* Set MAC IFG Gaps */
 899	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
 900		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
 901		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
 902		sparx5,
 903		DEV2G5_MAC_IFG_CFG(port->portno));
 904
 905	/* Disabling frame aging when in HDX (due to HDX issue) */
 906	spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
 907		 HSCH_PORT_MODE_AGE_DIS,
 908		 sparx5,
 909		 HSCH_PORT_MODE(port->portno));
 910
 911	/* Enable MAC module */
 912	spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
 913		DEV2G5_MAC_ENA_CFG_TX_ENA,
 914		sparx5,
 915		DEV2G5_MAC_ENA_CFG(port->portno));
 916
 917	/* Select speed and take MAC out of reset */
 918	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
 919		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
 920		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
 921		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
 922		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
 923		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
 924		 sparx5,
 925		 DEV2G5_DEV_RST_CTRL(port->portno));
 926
 927	/* Enable PHAD_CTRL for better timestamping */
 928	if (!is_sparx5(sparx5)) {
 929		for (int i = 0; i < 2; ++i) {
 930			/* Divide the port clock by three for the two
 931			 * phase detection registers.
 932			 */
 933			spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
 934				 DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
 935				 DEV2G5_PHAD_CTRL_DIV_CFG |
 936				 DEV2G5_PHAD_CTRL_PHAD_ENA,
 937				 sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
 938		}
 939	}
 940
 941	return 0;
 942}
 943
 944int sparx5_port_pcs_set(struct sparx5 *sparx5,
 945			struct sparx5_port *port,
 946			struct sparx5_port_config *conf)
 947
 948{
 949	bool high_speed_dev = sparx5_is_baser(conf->portmode);
 950	int err;
 951
 952	if (sparx5_dev_change(sparx5, port, conf)) {
 953		/* switch device */
 954		sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
 955
 956		/* Disable the not-in-use device */
 957		err = sparx5_port_disable(sparx5, port, !high_speed_dev);
 958		if (err)
 959			return err;
 960	}
 961	/* Disable the port before re-configuring */
 962	err = sparx5_port_disable(sparx5, port, high_speed_dev);
 963	if (err)
 964		return -EINVAL;
 965
 966	if (high_speed_dev)
 967		err = sparx5_port_pcs_high_set(sparx5, port, conf);
 968	else
 969		err = sparx5_port_pcs_low_set(sparx5, port, conf);
 970
 971	if (err)
 972		return -EINVAL;
 973
 974	if (conf->inband) {
 975		/* Enable/disable 1G counters in ASM */
 976		spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
 977			 ASM_PORT_CFG_CSC_STAT_DIS,
 978			 sparx5,
 979			 ASM_PORT_CFG(port->portno));
 980
 981		/* Enable/disable 1G counters in DSM */
 982		spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
 983			 DSM_BUF_CFG_CSC_STAT_DIS,
 984			 sparx5,
 985			 DSM_BUF_CFG(port->portno));
 986	}
 987
 988	port->conf = *conf;
 989
 990	return 0;
 991}
 992
 993int sparx5_port_config(struct sparx5 *sparx5,
 994		       struct sparx5_port *port,
 995		       struct sparx5_port_config *conf)
 996{
 997	bool high_speed_dev = sparx5_is_baser(conf->portmode);
 998	const struct sparx5_ops *ops = sparx5->data->ops;
 999	int err, urgency, stop_wm;
1000
1001	err = sparx5_port_verify_speed(sparx5, port, conf);
1002	if (err)
1003		return err;
1004
1005	/* high speed device is already configured */
1006	if (!high_speed_dev)
1007		sparx5_port_config_low_set(sparx5, port, conf);
1008
1009	/* Configure flow control */
1010	err = sparx5_port_fc_setup(sparx5, port, conf);
1011	if (err)
1012		return err;
1013
1014	if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
1015	    conf->speed < SPEED_10000)
1016		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1017			 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1018			 sparx5,
1019			 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1020
1021	/* Set the DSM stop watermark */
1022	stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
1023	spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
1024		 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
1025		 sparx5,
1026		 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1027
1028	/* Enable port in queue system */
1029	urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
1030	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1031		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1032		 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1033		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1034		 sparx5,
1035		 QFWD_SWITCH_PORT_MODE(port->portno));
1036
1037	/* Save the new values */
1038	port->conf = *conf;
1039
1040	return 0;
1041}
1042
1043/* Initialize port config to default */
1044int sparx5_port_init(struct sparx5 *sparx5,
1045		     struct sparx5_port *port,
1046		     struct sparx5_port_config *conf)
1047{
1048	u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1049	u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1050	const struct sparx5_ops *ops = sparx5->data->ops;
1051	u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
1052	u32 pix = sparx5_port_dev_index(sparx5, port->portno);
1053	u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
1054	bool sd_pol = port->signd_active_high;
1055	bool sd_sel = !port->signd_internal;
1056	bool sd_ena = port->signd_enable;
1057	u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1058	void __iomem *devinst;
1059	void __iomem *pcsinst;
1060	int err;
1061
1062	devinst = spx5_inst_get(sparx5, devhigh, pix);
1063	pcsinst = spx5_inst_get(sparx5, pcs, pix);
1064
1065	/* Set the mux port mode  */
1066	err = ops->set_port_mux(sparx5, port, conf);
1067	if (err)
1068		return err;
1069
1070	/* Configure MAC vlan awareness */
1071	err = sparx5_port_max_tags_set(sparx5, port);
1072	if (err)
1073		return err;
1074
1075	/* Set Max Length */
1076	spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1077		 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1078		 sparx5,
1079		 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1080
1081	/* 1G/2G5: Signal Detect configuration */
1082	spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1083		DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1084		DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1085		sparx5,
1086		DEV2G5_PCS1G_SD_CFG(port->portno));
1087
1088	/* Set Pause WM hysteresis */
1089	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1090		 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1091		 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1092		 QSYS_PAUSE_CFG_PAUSE_START |
1093		 QSYS_PAUSE_CFG_PAUSE_STOP |
1094		 QSYS_PAUSE_CFG_PAUSE_ENA,
1095		 sparx5,
1096		 QSYS_PAUSE_CFG(port->portno));
1097
1098	/* Port ATOP. Frames are tail dropped when this WM is hit */
1099	spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1100		sparx5,
1101		QSYS_ATOP(port->portno));
1102
1103	/* Discard pause frame 01-80-C2-00-00-01 */
1104	spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1105
1106	/* Discard SMAC multicast */
1107	spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1108		 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1109		 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1110
1111	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1112	    conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1113		err = sparx5_serdes_set(sparx5, port, conf);
1114		if (err)
1115			return err;
1116
1117		if (!ops->is_port_2g5(port->portno))
1118			/* Enable shadow device */
1119			spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1120				 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1121				 sparx5,
1122				 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1123
1124		sparx5_dev_switch(sparx5, port->portno, false);
1125	}
1126	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1127		// All ports must be PCS enabled in QSGMII mode
1128		spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1129			 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1130			 sparx5,
1131			 DEV2G5_DEV_RST_CTRL(port->portno));
1132	}
1133	/* Default IFGs for 1G */
1134	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1135		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1136		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1137		sparx5,
1138		DEV2G5_MAC_IFG_CFG(port->portno));
1139
1140	if (ops->is_port_2g5(port->portno))
1141		return 0; /* Low speed device only - return */
1142
1143	/* Now setup the high speed device */
1144	if (conf->portmode == PHY_INTERFACE_MODE_NA)
1145		conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1146
1147	if (sparx5_is_baser(conf->portmode))
1148		sparx5_dev_switch(sparx5, port->portno, true);
1149
1150	/* Set Max Length */
1151	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1152		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1153		      devinst,
1154		      DEV10G_MAC_MAXLEN_CFG(0));
1155
1156	/* Handle Signal Detect in 10G PCS */
1157	spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1158		     PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1159		     PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1160		     pcsinst,
1161		     PCS10G_BR_PCS_SD_CFG(0));
1162
1163	if (ops->is_port_25g(port->portno)) {
1164		/* Handle Signal Detect in 25G PCS */
1165		spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1166			DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1167			DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1168			sparx5,
1169			DEV25G_PCS25G_SD_CFG(pix));
1170	}
1171
1172	if (!is_sparx5(sparx5)) {
1173		void __iomem *inst;
1174		u32 dev, tinst;
1175
1176		if (ops->is_port_10g(port->portno)) {
1177			dev = sparx5_to_high_dev(sparx5, port->portno);
1178			tinst = sparx5_port_dev_index(sparx5, port->portno);
1179			inst = spx5_inst_get(sparx5, dev, tinst);
1180
1181			spx5_inst_wr(5, inst,
1182				     DEV10G_PTP_STAMPER_CFG(port->portno));
1183		} else if (ops->is_port_5g(port->portno)) {
1184			dev = sparx5_to_high_dev(sparx5, port->portno);
1185			tinst = sparx5_port_dev_index(sparx5, port->portno);
1186			inst = spx5_inst_get(sparx5, dev, tinst);
1187
1188			spx5_inst_wr(5, inst,
1189				     DEV5G_PTP_STAMPER_CFG(port->portno));
1190		}
1191	}
1192
1193	return 0;
1194}
1195
1196void sparx5_port_enable(struct sparx5_port *port, bool enable)
1197{
1198	struct sparx5 *sparx5 = port->sparx5;
1199
1200	/* Enable port for frame transfer? */
1201	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1202		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1203		 sparx5,
1204		 QFWD_SWITCH_PORT_MODE(port->portno));
1205}
1206
1207int sparx5_port_qos_set(struct sparx5_port *port,
1208			struct sparx5_port_qos *qos)
1209{
1210	sparx5_port_qos_dscp_set(port, &qos->dscp);
1211	sparx5_port_qos_pcp_set(port, &qos->pcp);
1212	sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1213	sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1214	sparx5_port_qos_default_set(port, qos);
1215
1216	return 0;
1217}
1218
1219int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1220				 struct sparx5_port_qos_pcp_rewr *qos)
1221{
1222	int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1223	struct sparx5 *sparx5 = port->sparx5;
1224	u8 pcp, dei;
1225
1226	/* Use mapping table, with classified QoS as index, to map QoS and DP
1227	 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1228	 * PCP. Classified PCP equals frame PCP.
1229	 */
1230	if (qos->enable)
1231		mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1232
1233	spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1234		 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1235		 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1236		 port->sparx5, REW_TAG_CTRL(port->portno));
1237
1238	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1239		/* Extract PCP and DEI */
1240		pcp = qos->map.map[i];
1241		if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1242			dei = 1;
1243		else
1244			dei = 0;
1245
1246		/* Rewrite PCP and DEI, for each classified QoS class and DP
1247		 * level. This table is only used if tag ctrl mode is set to
1248		 * 'mapped'.
1249		 *
1250		 * 0:0nd   - prio=0 and dp:0 => pcp=0 and dei=0
1251		 * 0:0de   - prio=0 and dp:1 => pcp=0 and dei=1
1252		 */
1253		if (dei) {
1254			spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1255				 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1256				 REW_PCP_MAP_DE1(port->portno, i));
1257
1258			spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1259				 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1260				 REW_DEI_MAP_DE1(port->portno, i));
1261		} else {
1262			spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1263				 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1264				 REW_PCP_MAP_DE0(port->portno, i));
1265
1266			spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1267				 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1268				 REW_DEI_MAP_DE0(port->portno, i));
1269		}
1270	}
1271
1272	return 0;
1273}
1274
1275int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1276			    struct sparx5_port_qos_pcp *qos)
1277{
1278	struct sparx5 *sparx5 = port->sparx5;
1279	u8 *pcp_itr = qos->map.map;
1280	u8 pcp, dp;
1281	int i;
1282
1283	/* Enable/disable pcp and dp for qos classification. */
1284	spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1285		 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1286		 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1287		 sparx5, ANA_CL_QOS_CFG(port->portno));
1288
1289	/* Map each pcp and dei value to priority and dp */
1290	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1291		pcp = *(pcp_itr + i);
1292		dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1293		spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1294			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1295			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1296			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1297			 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1298	}
1299
1300	return 0;
1301}
1302
1303void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1304					int mode)
1305{
1306	spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1307		 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1308		 ANA_CL_QOS_CFG(port->portno));
1309}
1310
1311int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1312				  struct sparx5_port_qos_dscp_rewr *qos)
1313{
1314	struct sparx5 *sparx5 = port->sparx5;
1315	bool rewr = false;
1316	u16 dscp;
1317	int i;
1318
1319	/* On egress, rewrite DSCP value to either classified DSCP or frame
1320	 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1321	 */
1322	if (qos->enable)
1323		rewr = true;
1324
1325	spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1326		 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1327		 REW_DSCP_MAP(port->portno));
1328
1329	/* On ingress, map each classified QoS class and DP to classified DSCP
1330	 * value. This mapping table is global for all ports.
1331	 */
1332	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1333		dscp = qos->map.map[i];
1334		spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1335			 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1336			 ANA_CL_QOS_MAP_CFG(i));
1337	}
1338
1339	return 0;
1340}
1341
1342int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1343			     struct sparx5_port_qos_dscp *qos)
1344{
1345	struct sparx5 *sparx5 = port->sparx5;
1346	u8 *dscp = qos->map.map;
1347	int i;
1348
1349	/* Enable/disable dscp and dp for qos classification.
1350	 * Disable rewrite of dscp values for now.
1351	 */
1352	spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1353		 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1354		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1355		 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1356		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1357		 ANA_CL_QOS_CFG(port->portno));
1358
1359	/* Map each dscp value to priority and dp */
1360	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1361		spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1362			 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1363			 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1364			 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1365			 ANA_CL_DSCP_CFG(i));
1366	}
1367
1368	/* Set per-dscp trust */
1369	for (i = 0; i <  ARRAY_SIZE(qos->map.map); i++) {
1370		if (qos->qos_enable) {
1371			spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1372				 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1373				 ANA_CL_DSCP_CFG(i));
1374		}
1375	}
1376
1377	return 0;
1378}
1379
1380int sparx5_port_qos_default_set(const struct sparx5_port *port,
1381				const struct sparx5_port_qos *qos)
1382{
1383	struct sparx5 *sparx5 = port->sparx5;
1384
1385	/* Set default prio and dp level */
1386	spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1387		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1388		 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1389		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1390		 sparx5, ANA_CL_QOS_CFG(port->portno));
1391
1392	/* Set default pcp and dei for untagged frames */
1393	spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1394		 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1395		 ANA_CL_VLAN_CTRL_PORT_PCP |
1396		 ANA_CL_VLAN_CTRL_PORT_DEI,
1397		 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1398
1399	return 0;
1400}
1401
1402int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
1403{
1404	return sparx5->data->consts->n_ports + port;
1405}