Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2/*
   3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
   4 * stmmac XGMAC support.
   5 */
   6
   7#include <linux/bitrev.h>
   8#include <linux/crc32.h>
   9#include <linux/iopoll.h>
  10#include "stmmac.h"
  11#include "stmmac_ptp.h"
  12#include "dwxlgmac2.h"
  13#include "dwxgmac2.h"
  14
  15static void dwxgmac2_core_init(struct mac_device_info *hw,
  16			       struct net_device *dev)
  17{
  18	void __iomem *ioaddr = hw->pcsr;
  19	u32 tx, rx;
  20
  21	tx = readl(ioaddr + XGMAC_TX_CONFIG);
  22	rx = readl(ioaddr + XGMAC_RX_CONFIG);
  23
  24	tx |= XGMAC_CORE_INIT_TX;
  25	rx |= XGMAC_CORE_INIT_RX;
  26
  27	if (hw->ps) {
  28		tx |= XGMAC_CONFIG_TE;
  29		tx &= ~hw->link.speed_mask;
  30
  31		switch (hw->ps) {
  32		case SPEED_10000:
  33			tx |= hw->link.xgmii.speed10000;
  34			break;
  35		case SPEED_2500:
  36			tx |= hw->link.speed2500;
  37			break;
  38		case SPEED_1000:
  39		default:
  40			tx |= hw->link.speed1000;
  41			break;
  42		}
  43	}
  44
  45	writel(tx, ioaddr + XGMAC_TX_CONFIG);
  46	writel(rx, ioaddr + XGMAC_RX_CONFIG);
  47	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
  48}
  49
  50static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
  51{
  52	priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
  53						 MAC_10000FD | MAC_25000FD |
  54						 MAC_40000FD | MAC_50000FD |
  55						 MAC_100000FD;
  56}
  57
  58static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
  59{
  60	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
  61	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
  62
  63	if (enable) {
  64		tx |= XGMAC_CONFIG_TE;
  65		rx |= XGMAC_CONFIG_RE;
  66	} else {
  67		tx &= ~XGMAC_CONFIG_TE;
  68		rx &= ~XGMAC_CONFIG_RE;
  69	}
  70
  71	writel(tx, ioaddr + XGMAC_TX_CONFIG);
  72	writel(rx, ioaddr + XGMAC_RX_CONFIG);
  73}
  74
  75static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
  76{
  77	void __iomem *ioaddr = hw->pcsr;
  78	u32 value;
  79
  80	value = readl(ioaddr + XGMAC_RX_CONFIG);
  81	if (hw->rx_csum)
  82		value |= XGMAC_CONFIG_IPC;
  83	else
  84		value &= ~XGMAC_CONFIG_IPC;
  85	writel(value, ioaddr + XGMAC_RX_CONFIG);
  86
  87	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
  88}
  89
  90static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
  91				     u32 queue)
  92{
  93	void __iomem *ioaddr = hw->pcsr;
  94	u32 value;
  95
  96	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
  97	if (mode == MTL_QUEUE_AVB)
  98		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
  99	else if (mode == MTL_QUEUE_DCB)
 100		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
 101	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
 102}
 103
 104static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
 105				   u32 queue)
 106{
 107	void __iomem *ioaddr = hw->pcsr;
 108	u32 value, reg;
 109
 110	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
 111	if (queue >= 4)
 112		queue -= 4;
 113
 114	value = readl(ioaddr + reg);
 115	value &= ~XGMAC_PSRQ(queue);
 116	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
 117
 118	writel(value, ioaddr + reg);
 119}
 120
 121static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
 122				   u32 queue)
 123{
 124	void __iomem *ioaddr = hw->pcsr;
 125	u32 value, reg;
 126
 127	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
 128	if (queue >= 4)
 129		queue -= 4;
 130
 131	value = readl(ioaddr + reg);
 132	value &= ~XGMAC_PSTC(queue);
 133	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
 134
 135	writel(value, ioaddr + reg);
 136}
 137
 138static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
 139				      u8 packet, u32 queue)
 140{
 141	void __iomem *ioaddr = hw->pcsr;
 142	u32 value;
 143
 144	static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
 145		{ XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
 146		{ XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
 147		{ XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
 148		{ XGMAC_UPQ, XGMAC_UPQ_SHIFT },
 149		{ XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
 150	};
 151
 152	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
 153
 154	/* routing configuration */
 155	value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
 156	value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
 157		 dwxgmac2_route_possibilities[packet - 1].reg_mask;
 158
 159	/* some packets require extra ops */
 160	if (packet == PACKET_AVCPQ)
 161		value |= FIELD_PREP(XGMAC_TACPQE, 1);
 162	else if (packet == PACKET_MCBCQ)
 163		value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
 164
 165	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
 166}
 167
 168static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
 169					    u32 rx_alg)
 170{
 171	void __iomem *ioaddr = hw->pcsr;
 172	u32 value;
 173
 174	value = readl(ioaddr + XGMAC_MTL_OPMODE);
 175	value &= ~XGMAC_RAA;
 176
 177	switch (rx_alg) {
 178	case MTL_RX_ALGORITHM_SP:
 179		break;
 180	case MTL_RX_ALGORITHM_WSP:
 181		value |= XGMAC_RAA;
 182		break;
 183	default:
 184		break;
 185	}
 186
 187	writel(value, ioaddr + XGMAC_MTL_OPMODE);
 188}
 189
 190static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
 191					    u32 tx_alg)
 192{
 193	void __iomem *ioaddr = hw->pcsr;
 194	bool ets = true;
 195	u32 value;
 196	int i;
 197
 198	value = readl(ioaddr + XGMAC_MTL_OPMODE);
 199	value &= ~XGMAC_ETSALG;
 200
 201	switch (tx_alg) {
 202	case MTL_TX_ALGORITHM_WRR:
 203		value |= XGMAC_WRR;
 204		break;
 205	case MTL_TX_ALGORITHM_WFQ:
 206		value |= XGMAC_WFQ;
 207		break;
 208	case MTL_TX_ALGORITHM_DWRR:
 209		value |= XGMAC_DWRR;
 210		break;
 211	default:
 212		ets = false;
 213		break;
 214	}
 215
 216	writel(value, ioaddr + XGMAC_MTL_OPMODE);
 217
 218	/* Set ETS if desired */
 219	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
 220		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
 221		value &= ~XGMAC_TSA;
 222		if (ets)
 223			value |= XGMAC_ETS;
 224		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
 225	}
 226}
 227
 228static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
 229					     struct mac_device_info *hw,
 230					     u32 weight, u32 queue)
 231{
 232	void __iomem *ioaddr = hw->pcsr;
 233
 234	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
 235}
 236
 237static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
 238				    u32 chan)
 239{
 240	void __iomem *ioaddr = hw->pcsr;
 241	u32 value, reg;
 242
 243	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
 244	if (queue >= 4)
 245		queue -= 4;
 246
 247	value = readl(ioaddr + reg);
 248	value &= ~XGMAC_QxMDMACH(queue);
 249	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
 250
 251	writel(value, ioaddr + reg);
 252}
 253
 254static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
 255				struct mac_device_info *hw,
 256				u32 send_slope, u32 idle_slope,
 257				u32 high_credit, u32 low_credit, u32 queue)
 258{
 259	void __iomem *ioaddr = hw->pcsr;
 260	u32 value;
 261
 262	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
 263	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
 264	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
 265	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
 266
 267	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
 268	value &= ~XGMAC_TSA;
 269	value |= XGMAC_CC | XGMAC_CBS;
 270	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
 271}
 272
 273static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
 274{
 275	void __iomem *ioaddr = hw->pcsr;
 276	int i;
 277
 278	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
 279		reg_space[i] = readl(ioaddr + i * 4);
 280}
 281
 282static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
 283				    struct stmmac_extra_stats *x)
 284{
 285	void __iomem *ioaddr = hw->pcsr;
 286	u32 stat, en;
 287	int ret = 0;
 288
 289	en = readl(ioaddr + XGMAC_INT_EN);
 290	stat = readl(ioaddr + XGMAC_INT_STATUS);
 291
 292	stat &= en;
 293
 294	if (stat & XGMAC_PMTIS) {
 295		x->irq_receive_pmt_irq_n++;
 296		readl(ioaddr + XGMAC_PMT);
 297	}
 298
 299	if (stat & XGMAC_LPIIS) {
 300		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
 301
 302		if (lpi & XGMAC_TLPIEN) {
 303			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
 304			x->irq_tx_path_in_lpi_mode_n++;
 305		}
 306		if (lpi & XGMAC_TLPIEX) {
 307			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
 308			x->irq_tx_path_exit_lpi_mode_n++;
 309		}
 310		if (lpi & XGMAC_RLPIEN)
 311			x->irq_rx_path_in_lpi_mode_n++;
 312		if (lpi & XGMAC_RLPIEX)
 313			x->irq_rx_path_exit_lpi_mode_n++;
 314	}
 315
 316	return ret;
 317}
 318
 319static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
 320					struct mac_device_info *hw, u32 chan)
 321{
 322	void __iomem *ioaddr = hw->pcsr;
 323	int ret = 0;
 324	u32 status;
 325
 326	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
 327	if (status & BIT(chan)) {
 328		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
 329
 330		if (chan_status & XGMAC_RXOVFIS)
 331			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
 332
 333		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
 334	}
 335
 336	return ret;
 337}
 338
 339static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
 340			       unsigned int fc, unsigned int pause_time,
 341			       u32 tx_cnt)
 342{
 343	void __iomem *ioaddr = hw->pcsr;
 344	u32 i;
 345
 346	if (fc & FLOW_RX)
 347		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
 348	if (fc & FLOW_TX) {
 349		for (i = 0; i < tx_cnt; i++) {
 350			u32 value = XGMAC_TFE;
 351
 352			if (duplex)
 353				value |= pause_time << XGMAC_PT_SHIFT;
 354
 355			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
 356		}
 357	}
 358}
 359
 360static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
 361{
 362	void __iomem *ioaddr = hw->pcsr;
 363	u32 val = 0x0;
 364
 365	if (mode & WAKE_MAGIC)
 366		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
 367	if (mode & WAKE_UCAST)
 368		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
 369	if (val) {
 370		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
 371		cfg |= XGMAC_CONFIG_RE;
 372		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
 373	}
 374
 375	writel(val, ioaddr + XGMAC_PMT);
 376}
 377
 378static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
 379				   const unsigned char *addr,
 380				   unsigned int reg_n)
 381{
 382	void __iomem *ioaddr = hw->pcsr;
 383	u32 value;
 384
 385	value = (addr[5] << 8) | addr[4];
 386	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
 387
 388	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 389	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
 390}
 391
 392static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
 393				   unsigned char *addr, unsigned int reg_n)
 394{
 395	void __iomem *ioaddr = hw->pcsr;
 396	u32 hi_addr, lo_addr;
 397
 398	/* Read the MAC address from the hardware */
 399	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
 400	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
 401
 402	/* Extract the MAC address from the high and low words */
 403	addr[0] = lo_addr & 0xff;
 404	addr[1] = (lo_addr >> 8) & 0xff;
 405	addr[2] = (lo_addr >> 16) & 0xff;
 406	addr[3] = (lo_addr >> 24) & 0xff;
 407	addr[4] = hi_addr & 0xff;
 408	addr[5] = (hi_addr >> 8) & 0xff;
 409}
 410
 411static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
 412				  bool en_tx_lpi_clockgating)
 413{
 414	void __iomem *ioaddr = hw->pcsr;
 415	u32 value;
 416
 417	value = readl(ioaddr + XGMAC_LPI_CTRL);
 418
 419	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
 420	if (en_tx_lpi_clockgating)
 421		value |= XGMAC_TXCGE;
 422
 423	writel(value, ioaddr + XGMAC_LPI_CTRL);
 424}
 425
 426static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
 427{
 428	void __iomem *ioaddr = hw->pcsr;
 429	u32 value;
 430
 431	value = readl(ioaddr + XGMAC_LPI_CTRL);
 432	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
 433	writel(value, ioaddr + XGMAC_LPI_CTRL);
 434}
 435
 436static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
 437{
 438	void __iomem *ioaddr = hw->pcsr;
 439	u32 value;
 440
 441	value = readl(ioaddr + XGMAC_LPI_CTRL);
 442	if (link)
 443		value |= XGMAC_PLS;
 444	else
 445		value &= ~XGMAC_PLS;
 446	writel(value, ioaddr + XGMAC_LPI_CTRL);
 447}
 448
 449static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
 450{
 451	void __iomem *ioaddr = hw->pcsr;
 452	u32 value;
 453
 454	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
 455	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
 456}
 457
 458static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
 459				int mcbitslog2)
 460{
 461	int numhashregs, regs;
 462
 463	switch (mcbitslog2) {
 464	case 6:
 465		numhashregs = 2;
 466		break;
 467	case 7:
 468		numhashregs = 4;
 469		break;
 470	case 8:
 471		numhashregs = 8;
 472		break;
 473	default:
 474		return;
 475	}
 476
 477	for (regs = 0; regs < numhashregs; regs++)
 478		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
 479}
 480
 481static void dwxgmac2_set_filter(struct mac_device_info *hw,
 482				struct net_device *dev)
 483{
 484	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
 485	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
 486	int mcbitslog2 = hw->mcast_bits_log2;
 487	u32 mc_filter[8];
 488	int i;
 489
 490	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
 491	value |= XGMAC_FILTER_HPF;
 492
 493	memset(mc_filter, 0, sizeof(mc_filter));
 494
 495	if (dev->flags & IFF_PROMISC) {
 496		value |= XGMAC_FILTER_PR;
 497		value |= XGMAC_FILTER_PCF;
 498	} else if ((dev->flags & IFF_ALLMULTI) ||
 499		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
 500		value |= XGMAC_FILTER_PM;
 501
 502		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
 503			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
 504	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
 505		struct netdev_hw_addr *ha;
 506
 507		value |= XGMAC_FILTER_HMC;
 508
 509		netdev_for_each_mc_addr(ha, dev) {
 510			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
 511					(32 - mcbitslog2));
 512			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
 513		}
 514	}
 515
 516	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
 517
 518	/* Handle multiple unicast addresses */
 519	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
 520		value |= XGMAC_FILTER_PR;
 521	} else {
 522		struct netdev_hw_addr *ha;
 523		int reg = 1;
 524
 525		netdev_for_each_uc_addr(ha, dev) {
 526			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
 527			reg++;
 528		}
 529
 530		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
 531			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
 532			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
 533		}
 534	}
 535
 536	writel(value, ioaddr + XGMAC_PACKET_FILTER);
 537}
 538
 539static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
 540{
 541	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
 542
 543	if (enable)
 544		value |= XGMAC_CONFIG_LM;
 545	else
 546		value &= ~XGMAC_CONFIG_LM;
 547
 548	writel(value, ioaddr + XGMAC_RX_CONFIG);
 549}
 550
 551static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
 552				  u32 val)
 553{
 554	u32 ctrl = 0;
 555
 556	writel(val, ioaddr + XGMAC_RSS_DATA);
 557	ctrl |= idx << XGMAC_RSSIA_SHIFT;
 558	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
 559	ctrl |= XGMAC_OB;
 560	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
 561
 562	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
 563				  !(ctrl & XGMAC_OB), 100, 10000);
 564}
 565
 566static int dwxgmac2_rss_configure(struct mac_device_info *hw,
 567				  struct stmmac_rss *cfg, u32 num_rxq)
 568{
 569	void __iomem *ioaddr = hw->pcsr;
 570	u32 value, *key;
 571	int i, ret;
 572
 573	value = readl(ioaddr + XGMAC_RSS_CTRL);
 574	if (!cfg || !cfg->enable) {
 575		value &= ~XGMAC_RSSE;
 576		writel(value, ioaddr + XGMAC_RSS_CTRL);
 577		return 0;
 578	}
 579
 580	key = (u32 *)cfg->key;
 581	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
 582		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
 583		if (ret)
 584			return ret;
 585	}
 586
 587	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
 588		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
 589		if (ret)
 590			return ret;
 591	}
 592
 593	for (i = 0; i < num_rxq; i++)
 594		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
 595
 596	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
 597	writel(value, ioaddr + XGMAC_RSS_CTRL);
 598	return 0;
 599}
 600
 601static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
 602				      __le16 perfect_match, bool is_double)
 603{
 604	void __iomem *ioaddr = hw->pcsr;
 605
 606	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
 607
 608	if (hash) {
 609		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
 610
 611		value |= XGMAC_FILTER_VTFE;
 612
 613		writel(value, ioaddr + XGMAC_PACKET_FILTER);
 614
 615		value = readl(ioaddr + XGMAC_VLAN_TAG);
 616
 617		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
 618		if (is_double) {
 619			value |= XGMAC_VLAN_EDVLP;
 620			value |= XGMAC_VLAN_ESVL;
 621			value |= XGMAC_VLAN_DOVLTC;
 622		} else {
 623			value &= ~XGMAC_VLAN_EDVLP;
 624			value &= ~XGMAC_VLAN_ESVL;
 625			value &= ~XGMAC_VLAN_DOVLTC;
 626		}
 627
 628		value &= ~XGMAC_VLAN_VID;
 629		writel(value, ioaddr + XGMAC_VLAN_TAG);
 630	} else if (perfect_match) {
 631		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
 632
 633		value |= XGMAC_FILTER_VTFE;
 634
 635		writel(value, ioaddr + XGMAC_PACKET_FILTER);
 636
 637		value = readl(ioaddr + XGMAC_VLAN_TAG);
 638
 639		value &= ~XGMAC_VLAN_VTHM;
 640		value |= XGMAC_VLAN_ETV;
 641		if (is_double) {
 642			value |= XGMAC_VLAN_EDVLP;
 643			value |= XGMAC_VLAN_ESVL;
 644			value |= XGMAC_VLAN_DOVLTC;
 645		} else {
 646			value &= ~XGMAC_VLAN_EDVLP;
 647			value &= ~XGMAC_VLAN_ESVL;
 648			value &= ~XGMAC_VLAN_DOVLTC;
 649		}
 650
 651		value &= ~XGMAC_VLAN_VID;
 652		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
 653	} else {
 654		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
 655
 656		value &= ~XGMAC_FILTER_VTFE;
 657
 658		writel(value, ioaddr + XGMAC_PACKET_FILTER);
 659
 660		value = readl(ioaddr + XGMAC_VLAN_TAG);
 661
 662		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
 663		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
 664		value &= ~XGMAC_VLAN_DOVLTC;
 665		value &= ~XGMAC_VLAN_VID;
 666
 667		writel(value, ioaddr + XGMAC_VLAN_TAG);
 668	}
 669}
 670
 671struct dwxgmac3_error_desc {
 672	bool valid;
 673	const char *desc;
 674	const char *detailed_desc;
 675};
 676
 677#define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
 678
 679static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
 680			       const char *module_name,
 681			       const struct dwxgmac3_error_desc *desc,
 682			       unsigned long field_offset,
 683			       struct stmmac_safety_stats *stats)
 684{
 685	unsigned long loc, mask;
 686	u8 *bptr = (u8 *)stats;
 687	unsigned long *ptr;
 688
 689	ptr = (unsigned long *)(bptr + field_offset);
 690
 691	mask = value;
 692	for_each_set_bit(loc, &mask, 32) {
 693		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
 694				"correctable" : "uncorrectable", module_name,
 695				desc[loc].desc, desc[loc].detailed_desc);
 696
 697		/* Update counters */
 698		ptr[loc]++;
 699	}
 700}
 701
 702static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
 703	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
 704	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
 705	{ true, "TPES", "TSO Data Path Parity Check Error" },
 706	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
 707	{ true, "MTPES", "MTL Data Path Parity Check Error" },
 708	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
 709	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
 710	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
 711	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
 712	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
 713	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
 714	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
 715	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
 716	{ true, "TTES", "TX FSM Timeout Error" },
 717	{ true, "RTES", "RX FSM Timeout Error" },
 718	{ true, "CTES", "CSR FSM Timeout Error" },
 719	{ true, "ATES", "APP FSM Timeout Error" },
 720	{ true, "PTES", "PTP FSM Timeout Error" },
 721	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
 722	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
 723	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
 724	{ true, "MSTTES", "Master Read/Write Timeout Error" },
 725	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
 726	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
 727	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
 728	{ true, "FSMPES", "FSM State Parity Error" },
 729	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
 730	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
 731	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
 732	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
 733	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
 734	{ true, "CPI", "Control Register Parity Check Error" },
 735};
 736
 737static void dwxgmac3_handle_mac_err(struct net_device *ndev,
 738				    void __iomem *ioaddr, bool correctable,
 739				    struct stmmac_safety_stats *stats)
 740{
 741	u32 value;
 742
 743	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
 744	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
 745
 746	dwxgmac3_log_error(ndev, value, correctable, "MAC",
 747			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
 748}
 749
 750static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
 751	{ true, "TXCES", "MTL TX Memory Error" },
 752	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
 753	{ true, "TXUES", "MTL TX Memory Error" },
 754	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
 755	{ true, "RXCES", "MTL RX Memory Error" },
 756	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
 757	{ true, "RXUES", "MTL RX Memory Error" },
 758	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
 759	{ true, "ECES", "MTL EST Memory Error" },
 760	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
 761	{ true, "EUES", "MTL EST Memory Error" },
 762	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
 763	{ true, "RPCES", "MTL RX Parser Memory Error" },
 764	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
 765	{ true, "RPUES", "MTL RX Parser Memory Error" },
 766	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
 767	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
 768	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
 769	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
 770	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
 771	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
 772	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
 773	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
 774	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
 775	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
 776	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
 777	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
 778	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
 779	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
 780	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
 781	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
 782	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
 783};
 784
 785static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
 786				    void __iomem *ioaddr, bool correctable,
 787				    struct stmmac_safety_stats *stats)
 788{
 789	u32 value;
 790
 791	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
 792	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
 793
 794	dwxgmac3_log_error(ndev, value, correctable, "MTL",
 795			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
 796}
 797
 798static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
 799	{ true, "TCES", "DMA TSO Memory Error" },
 800	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
 801	{ true, "TUES", "DMA TSO Memory Error" },
 802	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
 803	{ true, "DCES", "DMA DCACHE Memory Error" },
 804	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
 805	{ true, "DUES", "DMA DCACHE Memory Error" },
 806	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
 807	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
 808	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
 809	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
 810	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
 811	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
 812	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
 813	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
 814	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
 815	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
 816	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
 817	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
 818	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
 819	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
 820	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
 821	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
 822	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
 823	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
 824	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
 825	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
 826	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
 827	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
 828	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
 829	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
 830	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
 831};
 832
 833#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
 834#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
 835
 836static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
 837	{ true, "TDPES0", DPP_TX_ERR },
 838	{ true, "TDPES1", DPP_TX_ERR },
 839	{ true, "TDPES2", DPP_TX_ERR },
 840	{ true, "TDPES3", DPP_TX_ERR },
 841	{ true, "TDPES4", DPP_TX_ERR },
 842	{ true, "TDPES5", DPP_TX_ERR },
 843	{ true, "TDPES6", DPP_TX_ERR },
 844	{ true, "TDPES7", DPP_TX_ERR },
 845	{ true, "TDPES8", DPP_TX_ERR },
 846	{ true, "TDPES9", DPP_TX_ERR },
 847	{ true, "TDPES10", DPP_TX_ERR },
 848	{ true, "TDPES11", DPP_TX_ERR },
 849	{ true, "TDPES12", DPP_TX_ERR },
 850	{ true, "TDPES13", DPP_TX_ERR },
 851	{ true, "TDPES14", DPP_TX_ERR },
 852	{ true, "TDPES15", DPP_TX_ERR },
 853	{ true, "RDPES0", DPP_RX_ERR },
 854	{ true, "RDPES1", DPP_RX_ERR },
 855	{ true, "RDPES2", DPP_RX_ERR },
 856	{ true, "RDPES3", DPP_RX_ERR },
 857	{ true, "RDPES4", DPP_RX_ERR },
 858	{ true, "RDPES5", DPP_RX_ERR },
 859	{ true, "RDPES6", DPP_RX_ERR },
 860	{ true, "RDPES7", DPP_RX_ERR },
 861	{ true, "RDPES8", DPP_RX_ERR },
 862	{ true, "RDPES9", DPP_RX_ERR },
 863	{ true, "RDPES10", DPP_RX_ERR },
 864	{ true, "RDPES11", DPP_RX_ERR },
 865	{ true, "RDPES12", DPP_RX_ERR },
 866	{ true, "RDPES13", DPP_RX_ERR },
 867	{ true, "RDPES14", DPP_RX_ERR },
 868	{ true, "RDPES15", DPP_RX_ERR },
 869};
 870
 871static void dwxgmac3_handle_dma_err(struct net_device *ndev,
 872				    void __iomem *ioaddr, bool correctable,
 873				    struct stmmac_safety_stats *stats)
 874{
 875	u32 value;
 876
 877	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
 878	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
 879
 880	dwxgmac3_log_error(ndev, value, correctable, "DMA",
 881			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
 882
 883	value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
 884	writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
 885
 886	dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
 887			   dwxgmac3_dma_dpp_errors,
 888			   STAT_OFF(dma_dpp_errors), stats);
 889}
 890
 891static int
 892dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
 893			    struct stmmac_safety_feature_cfg *safety_cfg)
 894{
 895	u32 value;
 896
 897	if (!asp)
 898		return -EINVAL;
 899
 900	/* 1. Enable Safety Features */
 901	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
 902
 903	/* 2. Enable MTL Safety Interrupts */
 904	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
 905	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
 906	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
 907	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
 908	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
 909	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
 910
 911	/* 3. Enable DMA Safety Interrupts */
 912	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
 913	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
 914	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
 915	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
 916
 917	/* 0x2: Without ECC or Parity Ports on External Application Interface
 918	 * 0x4: Only ECC Protection for External Memory feature is selected
 919	 */
 920	if (asp == 0x2 || asp == 0x4)
 921		return 0;
 922
 923	/* 4. Enable Parity and Timeout for FSM */
 924	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
 925	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
 926	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
 927	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
 928
 929	/* 5. Enable Data Path Parity Protection */
 930	value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
 931	/* already enabled by default, explicit enable it again */
 932	value &= ~XGMAC_DPP_DISABLE;
 933	writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
 934
 935	return 0;
 936}
 937
 938static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
 939					   void __iomem *ioaddr,
 940					   unsigned int asp,
 941					   struct stmmac_safety_stats *stats)
 942{
 943	bool err, corr;
 944	u32 mtl, dma;
 945	int ret = 0;
 946
 947	if (!asp)
 948		return -EINVAL;
 949
 950	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
 951	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
 952
 953	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
 954	corr = false;
 955	if (err) {
 956		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
 957		ret |= !corr;
 958	}
 959
 960	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
 961	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
 962	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
 963	if (err) {
 964		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
 965		ret |= !corr;
 966	}
 967
 968	/* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
 969	 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
 970	 * Parity Errors here
 971	 */
 972	err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
 973	corr = dma & XGMAC_DECIS;
 974	if (err) {
 975		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
 976		ret |= !corr;
 977	}
 978
 979	return ret;
 980}
 981
 982static const struct dwxgmac3_error {
 983	const struct dwxgmac3_error_desc *desc;
 984} dwxgmac3_all_errors[] = {
 985	{ dwxgmac3_mac_errors },
 986	{ dwxgmac3_mtl_errors },
 987	{ dwxgmac3_dma_errors },
 988	{ dwxgmac3_dma_dpp_errors },
 989};
 990
 991static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
 992				     int index, unsigned long *count,
 993				     const char **desc)
 994{
 995	int module = index / 32, offset = index % 32;
 996	unsigned long *ptr = (unsigned long *)stats;
 997
 998	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
 999		return -EINVAL;
1000	if (!dwxgmac3_all_errors[module].desc[offset].valid)
1001		return -EINVAL;
1002	if (count)
1003		*count = *(ptr + index);
1004	if (desc)
1005		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
1006	return 0;
1007}
1008
1009static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
1010{
1011	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
1012
1013	val &= ~XGMAC_FRPE;
1014	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1015
1016	return 0;
1017}
1018
1019static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
1020{
1021	u32 val;
1022
1023	val = readl(ioaddr + XGMAC_MTL_OPMODE);
1024	val |= XGMAC_FRPE;
1025	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1026}
1027
1028static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
1029					    struct stmmac_tc_entry *entry,
1030					    int pos)
1031{
1032	int ret, i;
1033
1034	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
1035		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
1036		u32 val;
1037
1038		/* Wait for ready */
1039		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1040					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1041		if (ret)
1042			return ret;
1043
1044		/* Write data */
1045		val = *((u32 *)&entry->val + i);
1046		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
1047
1048		/* Write pos */
1049		val = real_pos & XGMAC_ADDR;
1050		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1051
1052		/* Write OP */
1053		val |= XGMAC_WRRDN;
1054		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1055
1056		/* Start Write */
1057		val |= XGMAC_STARTBUSY;
1058		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1059
1060		/* Wait for done */
1061		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1062					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1063		if (ret)
1064			return ret;
1065	}
1066
1067	return 0;
1068}
1069
1070static struct stmmac_tc_entry *
1071dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1072			    unsigned int count, u32 curr_prio)
1073{
1074	struct stmmac_tc_entry *entry;
1075	u32 min_prio = ~0x0;
1076	int i, min_prio_idx;
1077	bool found = false;
1078
1079	for (i = count - 1; i >= 0; i--) {
1080		entry = &entries[i];
1081
1082		/* Do not update unused entries */
1083		if (!entry->in_use)
1084			continue;
1085		/* Do not update already updated entries (i.e. fragments) */
1086		if (entry->in_hw)
1087			continue;
1088		/* Let last entry be updated last */
1089		if (entry->is_last)
1090			continue;
1091		/* Do not return fragments */
1092		if (entry->is_frag)
1093			continue;
1094		/* Check if we already checked this prio */
1095		if (entry->prio < curr_prio)
1096			continue;
1097		/* Check if this is the minimum prio */
1098		if (entry->prio < min_prio) {
1099			min_prio = entry->prio;
1100			min_prio_idx = i;
1101			found = true;
1102		}
1103	}
1104
1105	if (found)
1106		return &entries[min_prio_idx];
1107	return NULL;
1108}
1109
1110static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1111			       struct stmmac_tc_entry *entries,
1112			       unsigned int count)
1113{
1114	struct stmmac_tc_entry *entry, *frag;
1115	int i, ret, nve = 0;
1116	u32 curr_prio = 0;
1117	u32 old_val, val;
1118
1119	/* Force disable RX */
1120	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1121	val = old_val & ~XGMAC_CONFIG_RE;
1122	writel(val, ioaddr + XGMAC_RX_CONFIG);
1123
1124	/* Disable RX Parser */
1125	ret = dwxgmac3_rxp_disable(ioaddr);
1126	if (ret)
1127		goto re_enable;
1128
1129	/* Set all entries as NOT in HW */
1130	for (i = 0; i < count; i++) {
1131		entry = &entries[i];
1132		entry->in_hw = false;
1133	}
1134
1135	/* Update entries by reverse order */
1136	while (1) {
1137		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1138		if (!entry)
1139			break;
1140
1141		curr_prio = entry->prio;
1142		frag = entry->frag_ptr;
1143
1144		/* Set special fragment requirements */
1145		if (frag) {
1146			entry->val.af = 0;
1147			entry->val.rf = 0;
1148			entry->val.nc = 1;
1149			entry->val.ok_index = nve + 2;
1150		}
1151
1152		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1153		if (ret)
1154			goto re_enable;
1155
1156		entry->table_pos = nve++;
1157		entry->in_hw = true;
1158
1159		if (frag && !frag->in_hw) {
1160			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1161			if (ret)
1162				goto re_enable;
1163			frag->table_pos = nve++;
1164			frag->in_hw = true;
1165		}
1166	}
1167
1168	if (!nve)
1169		goto re_enable;
1170
1171	/* Update all pass entry */
1172	for (i = 0; i < count; i++) {
1173		entry = &entries[i];
1174		if (!entry->is_last)
1175			continue;
1176
1177		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1178		if (ret)
1179			goto re_enable;
1180
1181		entry->table_pos = nve++;
1182	}
1183
1184	/* Assume n. of parsable entries == n. of valid entries */
1185	val = (nve << 16) & XGMAC_NPE;
1186	val |= nve & XGMAC_NVE;
1187	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1188
1189	/* Enable RX Parser */
1190	dwxgmac3_rxp_enable(ioaddr);
1191
1192re_enable:
1193	/* Re-enable RX */
1194	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1195	return ret;
1196}
1197
1198static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1199{
1200	void __iomem *ioaddr = hw->pcsr;
1201	u32 value;
1202
1203	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1204				      value, value & XGMAC_TXTSC, 100, 10000))
1205		return -EBUSY;
1206
1207	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1208	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1209	return 0;
1210}
1211
1212static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1213				    struct stmmac_pps_cfg *cfg, bool enable,
1214				    u32 sub_second_inc, u32 systime_flags)
1215{
1216	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1217	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1218	u64 period;
1219
1220	if (!cfg->available)
1221		return -EINVAL;
1222	if (tnsec & XGMAC_TRGTBUSY0)
1223		return -EBUSY;
1224	if (!sub_second_inc || !systime_flags)
1225		return -EINVAL;
1226
1227	val &= ~XGMAC_PPSx_MASK(index);
1228
1229	if (!enable) {
1230		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1231		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1232		return 0;
1233	}
1234
1235	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1236	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1237
1238	/* XGMAC Core has 4 PPS outputs at most.
1239	 *
1240	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1241	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1242	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1243	 * read-only reserved to 0.
1244	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1245	 *
1246	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1247	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1248	 */
1249	val |= XGMAC_PPSENx(index);
1250
1251	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1252
1253	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1254		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1255	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1256
1257	period = cfg->period.tv_sec * 1000000000;
1258	period += cfg->period.tv_nsec;
1259
1260	do_div(period, sub_second_inc);
1261
1262	if (period <= 1)
1263		return -EINVAL;
1264
1265	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1266
1267	period >>= 1;
1268	if (period <= 1)
1269		return -EINVAL;
1270
1271	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1272
1273	/* Finally, activate it */
1274	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1275	return 0;
1276}
1277
1278static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1279{
1280	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1281
1282	value &= ~XGMAC_CONFIG_SARC;
1283	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1284
1285	writel(value, ioaddr + XGMAC_TX_CONFIG);
1286}
1287
1288static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1289{
1290	void __iomem *ioaddr = hw->pcsr;
1291	u32 value;
1292
1293	value = readl(ioaddr + XGMAC_VLAN_INCL);
1294	value |= XGMAC_VLAN_VLTI;
1295	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1296	value &= ~XGMAC_VLAN_VLC;
1297	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1298	writel(value, ioaddr + XGMAC_VLAN_INCL);
1299}
1300
1301static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1302{
1303	void __iomem *ioaddr = hw->pcsr;
1304	u32 value;
1305
1306	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1307			       !(value & XGMAC_XB), 100, 10000))
1308		return -EBUSY;
1309	return 0;
1310}
1311
1312static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1313				u8 reg, u32 *data)
1314{
1315	void __iomem *ioaddr = hw->pcsr;
1316	u32 value;
1317	int ret;
1318
1319	ret = dwxgmac2_filter_wait(hw);
1320	if (ret)
1321		return ret;
1322
1323	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1324	value |= XGMAC_TT | XGMAC_XB;
1325	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1326
1327	ret = dwxgmac2_filter_wait(hw);
1328	if (ret)
1329		return ret;
1330
1331	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1332	return 0;
1333}
1334
1335static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1336				 u8 reg, u32 data)
1337{
1338	void __iomem *ioaddr = hw->pcsr;
1339	u32 value;
1340	int ret;
1341
1342	ret = dwxgmac2_filter_wait(hw);
1343	if (ret)
1344		return ret;
1345
1346	writel(data, ioaddr + XGMAC_L3L4_DATA);
1347
1348	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1349	value |= XGMAC_XB;
1350	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1351
1352	return dwxgmac2_filter_wait(hw);
1353}
1354
1355static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1356				     bool en, bool ipv6, bool sa, bool inv,
1357				     u32 match)
1358{
1359	void __iomem *ioaddr = hw->pcsr;
1360	u32 value;
1361	int ret;
1362
1363	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1364	value |= XGMAC_FILTER_IPFE;
1365	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1366
1367	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1368	if (ret)
1369		return ret;
1370
1371	/* For IPv6 not both SA/DA filters can be active */
1372	if (ipv6) {
1373		value |= XGMAC_L3PEN0;
1374		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1375		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1376		if (sa) {
1377			value |= XGMAC_L3SAM0;
1378			if (inv)
1379				value |= XGMAC_L3SAIM0;
1380		} else {
1381			value |= XGMAC_L3DAM0;
1382			if (inv)
1383				value |= XGMAC_L3DAIM0;
1384		}
1385	} else {
1386		value &= ~XGMAC_L3PEN0;
1387		if (sa) {
1388			value |= XGMAC_L3SAM0;
1389			if (inv)
1390				value |= XGMAC_L3SAIM0;
1391		} else {
1392			value |= XGMAC_L3DAM0;
1393			if (inv)
1394				value |= XGMAC_L3DAIM0;
1395		}
1396	}
1397
1398	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1399	if (ret)
1400		return ret;
1401
1402	if (sa) {
1403		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1404		if (ret)
1405			return ret;
1406	} else {
1407		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1408		if (ret)
1409			return ret;
1410	}
1411
1412	if (!en)
1413		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1414
1415	return 0;
1416}
1417
1418static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1419				     bool en, bool udp, bool sa, bool inv,
1420				     u32 match)
1421{
1422	void __iomem *ioaddr = hw->pcsr;
1423	u32 value;
1424	int ret;
1425
1426	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1427	value |= XGMAC_FILTER_IPFE;
1428	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1429
1430	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1431	if (ret)
1432		return ret;
1433
1434	if (udp) {
1435		value |= XGMAC_L4PEN0;
1436	} else {
1437		value &= ~XGMAC_L4PEN0;
1438	}
1439
1440	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1441	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1442	if (sa) {
1443		value |= XGMAC_L4SPM0;
1444		if (inv)
1445			value |= XGMAC_L4SPIM0;
1446	} else {
1447		value |= XGMAC_L4DPM0;
1448		if (inv)
1449			value |= XGMAC_L4DPIM0;
1450	}
1451
1452	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1453	if (ret)
1454		return ret;
1455
1456	if (sa) {
1457		value = match & XGMAC_L4SP0;
1458
1459		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1460		if (ret)
1461			return ret;
1462	} else {
1463		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1464
1465		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1466		if (ret)
1467			return ret;
1468	}
1469
1470	if (!en)
1471		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1472
1473	return 0;
1474}
1475
1476static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1477				     u32 addr)
1478{
1479	void __iomem *ioaddr = hw->pcsr;
1480	u32 value;
1481
1482	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1483
1484	value = readl(ioaddr + XGMAC_RX_CONFIG);
1485	if (en)
1486		value |= XGMAC_CONFIG_ARPEN;
1487	else
1488		value &= ~XGMAC_CONFIG_ARPEN;
1489	writel(value, ioaddr + XGMAC_RX_CONFIG);
1490}
1491
1492static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
1493				   u32 num_txq,
1494				   u32 num_rxq, bool enable)
1495{
1496	u32 value;
1497
1498	if (!enable) {
1499		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1500
1501		value &= ~XGMAC_EFPE;
1502
1503		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1504		return;
1505	}
1506
1507	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1508	value &= ~XGMAC_RQ;
1509	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1510	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1511
1512	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1513	value |= XGMAC_EFPE;
1514	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1515}
1516
1517const struct stmmac_ops dwxgmac210_ops = {
1518	.core_init = dwxgmac2_core_init,
1519	.phylink_get_caps = xgmac_phylink_get_caps,
1520	.set_mac = dwxgmac2_set_mac,
1521	.rx_ipc = dwxgmac2_rx_ipc,
1522	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1523	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1524	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1525	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1526	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1527	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1528	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1529	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1530	.config_cbs = dwxgmac2_config_cbs,
1531	.dump_regs = dwxgmac2_dump_regs,
1532	.host_irq_status = dwxgmac2_host_irq_status,
1533	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1534	.flow_ctrl = dwxgmac2_flow_ctrl,
1535	.pmt = dwxgmac2_pmt,
1536	.set_umac_addr = dwxgmac2_set_umac_addr,
1537	.get_umac_addr = dwxgmac2_get_umac_addr,
1538	.set_eee_mode = dwxgmac2_set_eee_mode,
1539	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1540	.set_eee_timer = dwxgmac2_set_eee_timer,
1541	.set_eee_pls = dwxgmac2_set_eee_pls,
1542	.pcs_ctrl_ane = NULL,
1543	.pcs_rane = NULL,
1544	.pcs_get_adv_lp = NULL,
1545	.debug = NULL,
1546	.set_filter = dwxgmac2_set_filter,
1547	.safety_feat_config = dwxgmac3_safety_feat_config,
1548	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1549	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1550	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1551	.rss_configure = dwxgmac2_rss_configure,
1552	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1553	.rxp_config = dwxgmac3_rxp_config,
1554	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1555	.flex_pps_config = dwxgmac2_flex_pps_config,
1556	.sarc_configure = dwxgmac2_sarc_configure,
1557	.enable_vlan = dwxgmac2_enable_vlan,
1558	.config_l3_filter = dwxgmac2_config_l3_filter,
1559	.config_l4_filter = dwxgmac2_config_l4_filter,
1560	.set_arp_offload = dwxgmac2_set_arp_offload,
1561	.fpe_configure = dwxgmac3_fpe_configure,
1562};
1563
1564static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1565				      u32 queue)
1566{
1567	void __iomem *ioaddr = hw->pcsr;
1568	u32 value;
1569
1570	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1571	if (mode == MTL_QUEUE_AVB)
1572		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1573	else if (mode == MTL_QUEUE_DCB)
1574		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1575	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1576}
1577
1578const struct stmmac_ops dwxlgmac2_ops = {
1579	.core_init = dwxgmac2_core_init,
1580	.phylink_get_caps = xgmac_phylink_get_caps,
1581	.set_mac = dwxgmac2_set_mac,
1582	.rx_ipc = dwxgmac2_rx_ipc,
1583	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1584	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1585	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1586	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1587	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1588	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1589	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1590	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1591	.config_cbs = dwxgmac2_config_cbs,
1592	.dump_regs = dwxgmac2_dump_regs,
1593	.host_irq_status = dwxgmac2_host_irq_status,
1594	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1595	.flow_ctrl = dwxgmac2_flow_ctrl,
1596	.pmt = dwxgmac2_pmt,
1597	.set_umac_addr = dwxgmac2_set_umac_addr,
1598	.get_umac_addr = dwxgmac2_get_umac_addr,
1599	.set_eee_mode = dwxgmac2_set_eee_mode,
1600	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1601	.set_eee_timer = dwxgmac2_set_eee_timer,
1602	.set_eee_pls = dwxgmac2_set_eee_pls,
1603	.pcs_ctrl_ane = NULL,
1604	.pcs_rane = NULL,
1605	.pcs_get_adv_lp = NULL,
1606	.debug = NULL,
1607	.set_filter = dwxgmac2_set_filter,
1608	.safety_feat_config = dwxgmac3_safety_feat_config,
1609	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1610	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1611	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1612	.rss_configure = dwxgmac2_rss_configure,
1613	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1614	.rxp_config = dwxgmac3_rxp_config,
1615	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1616	.flex_pps_config = dwxgmac2_flex_pps_config,
1617	.sarc_configure = dwxgmac2_sarc_configure,
1618	.enable_vlan = dwxgmac2_enable_vlan,
1619	.config_l3_filter = dwxgmac2_config_l3_filter,
1620	.config_l4_filter = dwxgmac2_config_l4_filter,
1621	.set_arp_offload = dwxgmac2_set_arp_offload,
1622	.fpe_configure = dwxgmac3_fpe_configure,
1623};
1624
1625int dwxgmac2_setup(struct stmmac_priv *priv)
1626{
1627	struct mac_device_info *mac = priv->hw;
1628
1629	dev_info(priv->device, "\tXGMAC2\n");
1630
1631	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1632	mac->pcsr = priv->ioaddr;
1633	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1634	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1635	mac->mcast_bits_log2 = 0;
1636
1637	if (mac->multicast_filter_bins)
1638		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1639
1640	mac->link.duplex = 0;
1641	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1642	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1643	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1644	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1645	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1646	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1647	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1648	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1649
1650	mac->mii.addr = XGMAC_MDIO_ADDR;
1651	mac->mii.data = XGMAC_MDIO_DATA;
1652	mac->mii.addr_shift = 16;
1653	mac->mii.addr_mask = GENMASK(20, 16);
1654	mac->mii.reg_shift = 0;
1655	mac->mii.reg_mask = GENMASK(15, 0);
1656	mac->mii.clk_csr_shift = 19;
1657	mac->mii.clk_csr_mask = GENMASK(21, 19);
1658
1659	return 0;
1660}
1661
1662int dwxlgmac2_setup(struct stmmac_priv *priv)
1663{
1664	struct mac_device_info *mac = priv->hw;
1665
1666	dev_info(priv->device, "\tXLGMAC\n");
1667
1668	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1669	mac->pcsr = priv->ioaddr;
1670	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1671	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1672	mac->mcast_bits_log2 = 0;
1673
1674	if (mac->multicast_filter_bins)
1675		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1676
1677	mac->link.duplex = 0;
1678	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1679	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1680	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1681	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1682	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1683	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1684	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1685	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1686
1687	mac->mii.addr = XGMAC_MDIO_ADDR;
1688	mac->mii.data = XGMAC_MDIO_DATA;
1689	mac->mii.addr_shift = 16;
1690	mac->mii.addr_mask = GENMASK(20, 16);
1691	mac->mii.reg_shift = 0;
1692	mac->mii.reg_mask = GENMASK(15, 0);
1693	mac->mii.clk_csr_shift = 19;
1694	mac->mii.clk_csr_mask = GENMASK(21, 19);
1695
1696	return 0;
1697}