Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1// SPDX-License-Identifier: GPL-2.0
    2/* niu.c: Neptune ethernet driver.
    3 *
    4 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    5 */
    6
    7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    8
    9#include <linux/module.h>
   10#include <linux/init.h>
   11#include <linux/interrupt.h>
   12#include <linux/pci.h>
   13#include <linux/dma-mapping.h>
   14#include <linux/netdevice.h>
   15#include <linux/ethtool.h>
   16#include <linux/etherdevice.h>
   17#include <linux/platform_device.h>
   18#include <linux/delay.h>
   19#include <linux/bitops.h>
   20#include <linux/mii.h>
   21#include <linux/if.h>
   22#include <linux/if_ether.h>
   23#include <linux/if_vlan.h>
   24#include <linux/ip.h>
   25#include <linux/in.h>
   26#include <linux/ipv6.h>
   27#include <linux/log2.h>
   28#include <linux/jiffies.h>
   29#include <linux/crc32.h>
   30#include <linux/list.h>
   31#include <linux/slab.h>
   32
   33#include <linux/io.h>
   34#include <linux/of_device.h>
   35
   36#include "niu.h"
   37
   38#define DRV_MODULE_NAME		"niu"
   39#define DRV_MODULE_VERSION	"1.1"
   40#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   41
   42static char version[] =
   43	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   44
   45MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   46MODULE_DESCRIPTION("NIU ethernet driver");
   47MODULE_LICENSE("GPL");
   48MODULE_VERSION(DRV_MODULE_VERSION);
   49
   50#ifndef readq
   51static u64 readq(void __iomem *reg)
   52{
   53	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   54}
   55
   56static void writeq(u64 val, void __iomem *reg)
   57{
   58	writel(val & 0xffffffff, reg);
   59	writel(val >> 32, reg + 0x4UL);
   60}
   61#endif
   62
   63static const struct pci_device_id niu_pci_tbl[] = {
   64	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   65	{}
   66};
   67
   68MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   69
   70#define NIU_TX_TIMEOUT			(5 * HZ)
   71
   72#define nr64(reg)		readq(np->regs + (reg))
   73#define nw64(reg, val)		writeq((val), np->regs + (reg))
   74
   75#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   76#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   77
   78#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   79#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   80
   81#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   82#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   83
   84#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   85#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   86
   87#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   88
   89static int niu_debug;
   90static int debug = -1;
   91module_param(debug, int, 0);
   92MODULE_PARM_DESC(debug, "NIU debug level");
   93
   94#define niu_lock_parent(np, flags) \
   95	spin_lock_irqsave(&np->parent->lock, flags)
   96#define niu_unlock_parent(np, flags) \
   97	spin_unlock_irqrestore(&np->parent->lock, flags)
   98
   99static int serdes_init_10g_serdes(struct niu *np);
  100
  101static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  102				     u64 bits, int limit, int delay)
  103{
  104	while (--limit >= 0) {
  105		u64 val = nr64_mac(reg);
  106
  107		if (!(val & bits))
  108			break;
  109		udelay(delay);
  110	}
  111	if (limit < 0)
  112		return -ENODEV;
  113	return 0;
  114}
  115
  116static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  117					u64 bits, int limit, int delay,
  118					const char *reg_name)
  119{
  120	int err;
  121
  122	nw64_mac(reg, bits);
  123	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  124	if (err)
  125		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  126			   (unsigned long long)bits, reg_name,
  127			   (unsigned long long)nr64_mac(reg));
  128	return err;
  129}
  130
  131#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  132({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  133	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  134})
  135
  136static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  137				     u64 bits, int limit, int delay)
  138{
  139	while (--limit >= 0) {
  140		u64 val = nr64_ipp(reg);
  141
  142		if (!(val & bits))
  143			break;
  144		udelay(delay);
  145	}
  146	if (limit < 0)
  147		return -ENODEV;
  148	return 0;
  149}
  150
  151static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  152					u64 bits, int limit, int delay,
  153					const char *reg_name)
  154{
  155	int err;
  156	u64 val;
  157
  158	val = nr64_ipp(reg);
  159	val |= bits;
  160	nw64_ipp(reg, val);
  161
  162	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  163	if (err)
  164		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  165			   (unsigned long long)bits, reg_name,
  166			   (unsigned long long)nr64_ipp(reg));
  167	return err;
  168}
  169
  170#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  171({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  172	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  173})
  174
  175static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  176				 u64 bits, int limit, int delay)
  177{
  178	while (--limit >= 0) {
  179		u64 val = nr64(reg);
  180
  181		if (!(val & bits))
  182			break;
  183		udelay(delay);
  184	}
  185	if (limit < 0)
  186		return -ENODEV;
  187	return 0;
  188}
  189
  190#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  191({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  192	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  193})
  194
  195static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  196				    u64 bits, int limit, int delay,
  197				    const char *reg_name)
  198{
  199	int err;
  200
  201	nw64(reg, bits);
  202	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  203	if (err)
  204		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  205			   (unsigned long long)bits, reg_name,
  206			   (unsigned long long)nr64(reg));
  207	return err;
  208}
  209
  210#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  211({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  212	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  213})
  214
  215static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  216{
  217	u64 val = (u64) lp->timer;
  218
  219	if (on)
  220		val |= LDG_IMGMT_ARM;
  221
  222	nw64(LDG_IMGMT(lp->ldg_num), val);
  223}
  224
  225static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  226{
  227	unsigned long mask_reg, bits;
  228	u64 val;
  229
  230	if (ldn < 0 || ldn > LDN_MAX)
  231		return -EINVAL;
  232
  233	if (ldn < 64) {
  234		mask_reg = LD_IM0(ldn);
  235		bits = LD_IM0_MASK;
  236	} else {
  237		mask_reg = LD_IM1(ldn - 64);
  238		bits = LD_IM1_MASK;
  239	}
  240
  241	val = nr64(mask_reg);
  242	if (on)
  243		val &= ~bits;
  244	else
  245		val |= bits;
  246	nw64(mask_reg, val);
  247
  248	return 0;
  249}
  250
  251static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  252{
  253	struct niu_parent *parent = np->parent;
  254	int i;
  255
  256	for (i = 0; i <= LDN_MAX; i++) {
  257		int err;
  258
  259		if (parent->ldg_map[i] != lp->ldg_num)
  260			continue;
  261
  262		err = niu_ldn_irq_enable(np, i, on);
  263		if (err)
  264			return err;
  265	}
  266	return 0;
  267}
  268
  269static int niu_enable_interrupts(struct niu *np, int on)
  270{
  271	int i;
  272
  273	for (i = 0; i < np->num_ldg; i++) {
  274		struct niu_ldg *lp = &np->ldg[i];
  275		int err;
  276
  277		err = niu_enable_ldn_in_ldg(np, lp, on);
  278		if (err)
  279			return err;
  280	}
  281	for (i = 0; i < np->num_ldg; i++)
  282		niu_ldg_rearm(np, &np->ldg[i], on);
  283
  284	return 0;
  285}
  286
  287static u32 phy_encode(u32 type, int port)
  288{
  289	return type << (port * 2);
  290}
  291
  292static u32 phy_decode(u32 val, int port)
  293{
  294	return (val >> (port * 2)) & PORT_TYPE_MASK;
  295}
  296
  297static int mdio_wait(struct niu *np)
  298{
  299	int limit = 1000;
  300	u64 val;
  301
  302	while (--limit > 0) {
  303		val = nr64(MIF_FRAME_OUTPUT);
  304		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  305			return val & MIF_FRAME_OUTPUT_DATA;
  306
  307		udelay(10);
  308	}
  309
  310	return -ENODEV;
  311}
  312
  313static int mdio_read(struct niu *np, int port, int dev, int reg)
  314{
  315	int err;
  316
  317	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  318	err = mdio_wait(np);
  319	if (err < 0)
  320		return err;
  321
  322	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  323	return mdio_wait(np);
  324}
  325
  326static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  327{
  328	int err;
  329
  330	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  331	err = mdio_wait(np);
  332	if (err < 0)
  333		return err;
  334
  335	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  336	err = mdio_wait(np);
  337	if (err < 0)
  338		return err;
  339
  340	return 0;
  341}
  342
  343static int mii_read(struct niu *np, int port, int reg)
  344{
  345	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  346	return mdio_wait(np);
  347}
  348
  349static int mii_write(struct niu *np, int port, int reg, int data)
  350{
  351	int err;
  352
  353	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  354	err = mdio_wait(np);
  355	if (err < 0)
  356		return err;
  357
  358	return 0;
  359}
  360
  361static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  362{
  363	int err;
  364
  365	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  366			 ESR2_TI_PLL_TX_CFG_L(channel),
  367			 val & 0xffff);
  368	if (!err)
  369		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  370				 ESR2_TI_PLL_TX_CFG_H(channel),
  371				 val >> 16);
  372	return err;
  373}
  374
  375static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  376{
  377	int err;
  378
  379	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  380			 ESR2_TI_PLL_RX_CFG_L(channel),
  381			 val & 0xffff);
  382	if (!err)
  383		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  384				 ESR2_TI_PLL_RX_CFG_H(channel),
  385				 val >> 16);
  386	return err;
  387}
  388
  389/* Mode is always 10G fiber.  */
  390static int serdes_init_niu_10g_fiber(struct niu *np)
  391{
  392	struct niu_link_config *lp = &np->link_config;
  393	u32 tx_cfg, rx_cfg;
  394	unsigned long i;
  395
  396	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  397	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  398		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  399		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  400
  401	if (lp->loopback_mode == LOOPBACK_PHY) {
  402		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  403
  404		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  405			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  406
  407		tx_cfg |= PLL_TX_CFG_ENTEST;
  408		rx_cfg |= PLL_RX_CFG_ENTEST;
  409	}
  410
  411	/* Initialize all 4 lanes of the SERDES.  */
  412	for (i = 0; i < 4; i++) {
  413		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  414		if (err)
  415			return err;
  416	}
  417
  418	for (i = 0; i < 4; i++) {
  419		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  420		if (err)
  421			return err;
  422	}
  423
  424	return 0;
  425}
  426
  427static int serdes_init_niu_1g_serdes(struct niu *np)
  428{
  429	struct niu_link_config *lp = &np->link_config;
  430	u16 pll_cfg, pll_sts;
  431	int max_retry = 100;
  432	u64 uninitialized_var(sig), mask, val;
  433	u32 tx_cfg, rx_cfg;
  434	unsigned long i;
  435	int err;
  436
  437	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  438		  PLL_TX_CFG_RATE_HALF);
  439	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  440		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  441		  PLL_RX_CFG_RATE_HALF);
  442
  443	if (np->port == 0)
  444		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  445
  446	if (lp->loopback_mode == LOOPBACK_PHY) {
  447		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  448
  449		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  450			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  451
  452		tx_cfg |= PLL_TX_CFG_ENTEST;
  453		rx_cfg |= PLL_RX_CFG_ENTEST;
  454	}
  455
  456	/* Initialize PLL for 1G */
  457	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  458
  459	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  460			 ESR2_TI_PLL_CFG_L, pll_cfg);
  461	if (err) {
  462		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  463			   np->port, __func__);
  464		return err;
  465	}
  466
  467	pll_sts = PLL_CFG_ENPLL;
  468
  469	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  470			 ESR2_TI_PLL_STS_L, pll_sts);
  471	if (err) {
  472		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  473			   np->port, __func__);
  474		return err;
  475	}
  476
  477	udelay(200);
  478
  479	/* Initialize all 4 lanes of the SERDES.  */
  480	for (i = 0; i < 4; i++) {
  481		err = esr2_set_tx_cfg(np, i, tx_cfg);
  482		if (err)
  483			return err;
  484	}
  485
  486	for (i = 0; i < 4; i++) {
  487		err = esr2_set_rx_cfg(np, i, rx_cfg);
  488		if (err)
  489			return err;
  490	}
  491
  492	switch (np->port) {
  493	case 0:
  494		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  495		mask = val;
  496		break;
  497
  498	case 1:
  499		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  500		mask = val;
  501		break;
  502
  503	default:
  504		return -EINVAL;
  505	}
  506
  507	while (max_retry--) {
  508		sig = nr64(ESR_INT_SIGNALS);
  509		if ((sig & mask) == val)
  510			break;
  511
  512		mdelay(500);
  513	}
  514
  515	if ((sig & mask) != val) {
  516		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  517			   np->port, (int)(sig & mask), (int)val);
  518		return -ENODEV;
  519	}
  520
  521	return 0;
  522}
  523
  524static int serdes_init_niu_10g_serdes(struct niu *np)
  525{
  526	struct niu_link_config *lp = &np->link_config;
  527	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  528	int max_retry = 100;
  529	u64 uninitialized_var(sig), mask, val;
  530	unsigned long i;
  531	int err;
  532
  533	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  534	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  535		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  536		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  537
  538	if (lp->loopback_mode == LOOPBACK_PHY) {
  539		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  540
  541		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  542			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  543
  544		tx_cfg |= PLL_TX_CFG_ENTEST;
  545		rx_cfg |= PLL_RX_CFG_ENTEST;
  546	}
  547
  548	/* Initialize PLL for 10G */
  549	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  550
  551	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  552			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  553	if (err) {
  554		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  555			   np->port, __func__);
  556		return err;
  557	}
  558
  559	pll_sts = PLL_CFG_ENPLL;
  560
  561	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  562			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  563	if (err) {
  564		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  565			   np->port, __func__);
  566		return err;
  567	}
  568
  569	udelay(200);
  570
  571	/* Initialize all 4 lanes of the SERDES.  */
  572	for (i = 0; i < 4; i++) {
  573		err = esr2_set_tx_cfg(np, i, tx_cfg);
  574		if (err)
  575			return err;
  576	}
  577
  578	for (i = 0; i < 4; i++) {
  579		err = esr2_set_rx_cfg(np, i, rx_cfg);
  580		if (err)
  581			return err;
  582	}
  583
  584	/* check if serdes is ready */
  585
  586	switch (np->port) {
  587	case 0:
  588		mask = ESR_INT_SIGNALS_P0_BITS;
  589		val = (ESR_INT_SRDY0_P0 |
  590		       ESR_INT_DET0_P0 |
  591		       ESR_INT_XSRDY_P0 |
  592		       ESR_INT_XDP_P0_CH3 |
  593		       ESR_INT_XDP_P0_CH2 |
  594		       ESR_INT_XDP_P0_CH1 |
  595		       ESR_INT_XDP_P0_CH0);
  596		break;
  597
  598	case 1:
  599		mask = ESR_INT_SIGNALS_P1_BITS;
  600		val = (ESR_INT_SRDY0_P1 |
  601		       ESR_INT_DET0_P1 |
  602		       ESR_INT_XSRDY_P1 |
  603		       ESR_INT_XDP_P1_CH3 |
  604		       ESR_INT_XDP_P1_CH2 |
  605		       ESR_INT_XDP_P1_CH1 |
  606		       ESR_INT_XDP_P1_CH0);
  607		break;
  608
  609	default:
  610		return -EINVAL;
  611	}
  612
  613	while (max_retry--) {
  614		sig = nr64(ESR_INT_SIGNALS);
  615		if ((sig & mask) == val)
  616			break;
  617
  618		mdelay(500);
  619	}
  620
  621	if ((sig & mask) != val) {
  622		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  623			np->port, (int)(sig & mask), (int)val);
  624
  625		/* 10G failed, try initializing at 1G */
  626		err = serdes_init_niu_1g_serdes(np);
  627		if (!err) {
  628			np->flags &= ~NIU_FLAGS_10G;
  629			np->mac_xcvr = MAC_XCVR_PCS;
  630		}  else {
  631			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  632				   np->port);
  633			return -ENODEV;
  634		}
  635	}
  636	return 0;
  637}
  638
  639static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  640{
  641	int err;
  642
  643	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  644	if (err >= 0) {
  645		*val = (err & 0xffff);
  646		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  647				ESR_RXTX_CTRL_H(chan));
  648		if (err >= 0)
  649			*val |= ((err & 0xffff) << 16);
  650		err = 0;
  651	}
  652	return err;
  653}
  654
  655static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  656{
  657	int err;
  658
  659	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  660			ESR_GLUE_CTRL0_L(chan));
  661	if (err >= 0) {
  662		*val = (err & 0xffff);
  663		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  664				ESR_GLUE_CTRL0_H(chan));
  665		if (err >= 0) {
  666			*val |= ((err & 0xffff) << 16);
  667			err = 0;
  668		}
  669	}
  670	return err;
  671}
  672
  673static int esr_read_reset(struct niu *np, u32 *val)
  674{
  675	int err;
  676
  677	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  678			ESR_RXTX_RESET_CTRL_L);
  679	if (err >= 0) {
  680		*val = (err & 0xffff);
  681		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  682				ESR_RXTX_RESET_CTRL_H);
  683		if (err >= 0) {
  684			*val |= ((err & 0xffff) << 16);
  685			err = 0;
  686		}
  687	}
  688	return err;
  689}
  690
  691static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  692{
  693	int err;
  694
  695	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  696			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  697	if (!err)
  698		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  699				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  700	return err;
  701}
  702
  703static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  704{
  705	int err;
  706
  707	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  708			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  709	if (!err)
  710		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  711				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  712	return err;
  713}
  714
  715static int esr_reset(struct niu *np)
  716{
  717	u32 uninitialized_var(reset);
  718	int err;
  719
  720	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  721			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  722	if (err)
  723		return err;
  724	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  725			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  726	if (err)
  727		return err;
  728	udelay(200);
  729
  730	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  731			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  732	if (err)
  733		return err;
  734	udelay(200);
  735
  736	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  737			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  738	if (err)
  739		return err;
  740	udelay(200);
  741
  742	err = esr_read_reset(np, &reset);
  743	if (err)
  744		return err;
  745	if (reset != 0) {
  746		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  747			   np->port, reset);
  748		return -ENODEV;
  749	}
  750
  751	return 0;
  752}
  753
  754static int serdes_init_10g(struct niu *np)
  755{
  756	struct niu_link_config *lp = &np->link_config;
  757	unsigned long ctrl_reg, test_cfg_reg, i;
  758	u64 ctrl_val, test_cfg_val, sig, mask, val;
  759	int err;
  760
  761	switch (np->port) {
  762	case 0:
  763		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  764		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  765		break;
  766	case 1:
  767		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  768		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  769		break;
  770
  771	default:
  772		return -EINVAL;
  773	}
  774	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  775		    ENET_SERDES_CTRL_SDET_1 |
  776		    ENET_SERDES_CTRL_SDET_2 |
  777		    ENET_SERDES_CTRL_SDET_3 |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  780		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  781		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  784		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  785		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  786	test_cfg_val = 0;
  787
  788	if (lp->loopback_mode == LOOPBACK_PHY) {
  789		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  790				  ENET_SERDES_TEST_MD_0_SHIFT) |
  791				 (ENET_TEST_MD_PAD_LOOPBACK <<
  792				  ENET_SERDES_TEST_MD_1_SHIFT) |
  793				 (ENET_TEST_MD_PAD_LOOPBACK <<
  794				  ENET_SERDES_TEST_MD_2_SHIFT) |
  795				 (ENET_TEST_MD_PAD_LOOPBACK <<
  796				  ENET_SERDES_TEST_MD_3_SHIFT));
  797	}
  798
  799	nw64(ctrl_reg, ctrl_val);
  800	nw64(test_cfg_reg, test_cfg_val);
  801
  802	/* Initialize all 4 lanes of the SERDES.  */
  803	for (i = 0; i < 4; i++) {
  804		u32 rxtx_ctrl, glue0;
  805
  806		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  807		if (err)
  808			return err;
  809		err = esr_read_glue0(np, i, &glue0);
  810		if (err)
  811			return err;
  812
  813		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  814		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  815			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  816
  817		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  818			   ESR_GLUE_CTRL0_THCNT |
  819			   ESR_GLUE_CTRL0_BLTIME);
  820		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  821			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  822			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  823			  (BLTIME_300_CYCLES <<
  824			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  825
  826		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  827		if (err)
  828			return err;
  829		err = esr_write_glue0(np, i, glue0);
  830		if (err)
  831			return err;
  832	}
  833
  834	err = esr_reset(np);
  835	if (err)
  836		return err;
  837
  838	sig = nr64(ESR_INT_SIGNALS);
  839	switch (np->port) {
  840	case 0:
  841		mask = ESR_INT_SIGNALS_P0_BITS;
  842		val = (ESR_INT_SRDY0_P0 |
  843		       ESR_INT_DET0_P0 |
  844		       ESR_INT_XSRDY_P0 |
  845		       ESR_INT_XDP_P0_CH3 |
  846		       ESR_INT_XDP_P0_CH2 |
  847		       ESR_INT_XDP_P0_CH1 |
  848		       ESR_INT_XDP_P0_CH0);
  849		break;
  850
  851	case 1:
  852		mask = ESR_INT_SIGNALS_P1_BITS;
  853		val = (ESR_INT_SRDY0_P1 |
  854		       ESR_INT_DET0_P1 |
  855		       ESR_INT_XSRDY_P1 |
  856		       ESR_INT_XDP_P1_CH3 |
  857		       ESR_INT_XDP_P1_CH2 |
  858		       ESR_INT_XDP_P1_CH1 |
  859		       ESR_INT_XDP_P1_CH0);
  860		break;
  861
  862	default:
  863		return -EINVAL;
  864	}
  865
  866	if ((sig & mask) != val) {
  867		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  868			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  869			return 0;
  870		}
  871		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  872			   np->port, (int)(sig & mask), (int)val);
  873		return -ENODEV;
  874	}
  875	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  876		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  877	return 0;
  878}
  879
  880static int serdes_init_1g(struct niu *np)
  881{
  882	u64 val;
  883
  884	val = nr64(ENET_SERDES_1_PLL_CFG);
  885	val &= ~ENET_SERDES_PLL_FBDIV2;
  886	switch (np->port) {
  887	case 0:
  888		val |= ENET_SERDES_PLL_HRATE0;
  889		break;
  890	case 1:
  891		val |= ENET_SERDES_PLL_HRATE1;
  892		break;
  893	case 2:
  894		val |= ENET_SERDES_PLL_HRATE2;
  895		break;
  896	case 3:
  897		val |= ENET_SERDES_PLL_HRATE3;
  898		break;
  899	default:
  900		return -EINVAL;
  901	}
  902	nw64(ENET_SERDES_1_PLL_CFG, val);
  903
  904	return 0;
  905}
  906
  907static int serdes_init_1g_serdes(struct niu *np)
  908{
  909	struct niu_link_config *lp = &np->link_config;
  910	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  911	u64 ctrl_val, test_cfg_val, sig, mask, val;
  912	int err;
  913	u64 reset_val, val_rd;
  914
  915	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  916		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  917		ENET_SERDES_PLL_FBDIV0;
  918	switch (np->port) {
  919	case 0:
  920		reset_val =  ENET_SERDES_RESET_0;
  921		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  922		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  923		pll_cfg = ENET_SERDES_0_PLL_CFG;
  924		break;
  925	case 1:
  926		reset_val =  ENET_SERDES_RESET_1;
  927		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  928		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  929		pll_cfg = ENET_SERDES_1_PLL_CFG;
  930		break;
  931
  932	default:
  933		return -EINVAL;
  934	}
  935	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  936		    ENET_SERDES_CTRL_SDET_1 |
  937		    ENET_SERDES_CTRL_SDET_2 |
  938		    ENET_SERDES_CTRL_SDET_3 |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  941		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  942		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  945		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  946		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  947	test_cfg_val = 0;
  948
  949	if (lp->loopback_mode == LOOPBACK_PHY) {
  950		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  951				  ENET_SERDES_TEST_MD_0_SHIFT) |
  952				 (ENET_TEST_MD_PAD_LOOPBACK <<
  953				  ENET_SERDES_TEST_MD_1_SHIFT) |
  954				 (ENET_TEST_MD_PAD_LOOPBACK <<
  955				  ENET_SERDES_TEST_MD_2_SHIFT) |
  956				 (ENET_TEST_MD_PAD_LOOPBACK <<
  957				  ENET_SERDES_TEST_MD_3_SHIFT));
  958	}
  959
  960	nw64(ENET_SERDES_RESET, reset_val);
  961	mdelay(20);
  962	val_rd = nr64(ENET_SERDES_RESET);
  963	val_rd &= ~reset_val;
  964	nw64(pll_cfg, val);
  965	nw64(ctrl_reg, ctrl_val);
  966	nw64(test_cfg_reg, test_cfg_val);
  967	nw64(ENET_SERDES_RESET, val_rd);
  968	mdelay(2000);
  969
  970	/* Initialize all 4 lanes of the SERDES.  */
  971	for (i = 0; i < 4; i++) {
  972		u32 rxtx_ctrl, glue0;
  973
  974		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  975		if (err)
  976			return err;
  977		err = esr_read_glue0(np, i, &glue0);
  978		if (err)
  979			return err;
  980
  981		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  982		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  983			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  984
  985		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  986			   ESR_GLUE_CTRL0_THCNT |
  987			   ESR_GLUE_CTRL0_BLTIME);
  988		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  989			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  990			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  991			  (BLTIME_300_CYCLES <<
  992			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  993
  994		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  995		if (err)
  996			return err;
  997		err = esr_write_glue0(np, i, glue0);
  998		if (err)
  999			return err;
 1000	}
 1001
 1002
 1003	sig = nr64(ESR_INT_SIGNALS);
 1004	switch (np->port) {
 1005	case 0:
 1006		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1007		mask = val;
 1008		break;
 1009
 1010	case 1:
 1011		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1012		mask = val;
 1013		break;
 1014
 1015	default:
 1016		return -EINVAL;
 1017	}
 1018
 1019	if ((sig & mask) != val) {
 1020		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1021			   np->port, (int)(sig & mask), (int)val);
 1022		return -ENODEV;
 1023	}
 1024
 1025	return 0;
 1026}
 1027
 1028static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1029{
 1030	struct niu_link_config *lp = &np->link_config;
 1031	int link_up;
 1032	u64 val;
 1033	u16 current_speed;
 1034	unsigned long flags;
 1035	u8 current_duplex;
 1036
 1037	link_up = 0;
 1038	current_speed = SPEED_INVALID;
 1039	current_duplex = DUPLEX_INVALID;
 1040
 1041	spin_lock_irqsave(&np->lock, flags);
 1042
 1043	val = nr64_pcs(PCS_MII_STAT);
 1044
 1045	if (val & PCS_MII_STAT_LINK_STATUS) {
 1046		link_up = 1;
 1047		current_speed = SPEED_1000;
 1048		current_duplex = DUPLEX_FULL;
 1049	}
 1050
 1051	lp->active_speed = current_speed;
 1052	lp->active_duplex = current_duplex;
 1053	spin_unlock_irqrestore(&np->lock, flags);
 1054
 1055	*link_up_p = link_up;
 1056	return 0;
 1057}
 1058
 1059static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1060{
 1061	unsigned long flags;
 1062	struct niu_link_config *lp = &np->link_config;
 1063	int link_up = 0;
 1064	int link_ok = 1;
 1065	u64 val, val2;
 1066	u16 current_speed;
 1067	u8 current_duplex;
 1068
 1069	if (!(np->flags & NIU_FLAGS_10G))
 1070		return link_status_1g_serdes(np, link_up_p);
 1071
 1072	current_speed = SPEED_INVALID;
 1073	current_duplex = DUPLEX_INVALID;
 1074	spin_lock_irqsave(&np->lock, flags);
 1075
 1076	val = nr64_xpcs(XPCS_STATUS(0));
 1077	val2 = nr64_mac(XMAC_INTER2);
 1078	if (val2 & 0x01000000)
 1079		link_ok = 0;
 1080
 1081	if ((val & 0x1000ULL) && link_ok) {
 1082		link_up = 1;
 1083		current_speed = SPEED_10000;
 1084		current_duplex = DUPLEX_FULL;
 1085	}
 1086	lp->active_speed = current_speed;
 1087	lp->active_duplex = current_duplex;
 1088	spin_unlock_irqrestore(&np->lock, flags);
 1089	*link_up_p = link_up;
 1090	return 0;
 1091}
 1092
 1093static int link_status_mii(struct niu *np, int *link_up_p)
 1094{
 1095	struct niu_link_config *lp = &np->link_config;
 1096	int err;
 1097	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1098	int supported, advertising, active_speed, active_duplex;
 1099
 1100	err = mii_read(np, np->phy_addr, MII_BMCR);
 1101	if (unlikely(err < 0))
 1102		return err;
 1103	bmcr = err;
 1104
 1105	err = mii_read(np, np->phy_addr, MII_BMSR);
 1106	if (unlikely(err < 0))
 1107		return err;
 1108	bmsr = err;
 1109
 1110	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1111	if (unlikely(err < 0))
 1112		return err;
 1113	advert = err;
 1114
 1115	err = mii_read(np, np->phy_addr, MII_LPA);
 1116	if (unlikely(err < 0))
 1117		return err;
 1118	lpa = err;
 1119
 1120	if (likely(bmsr & BMSR_ESTATEN)) {
 1121		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1122		if (unlikely(err < 0))
 1123			return err;
 1124		estatus = err;
 1125
 1126		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1127		if (unlikely(err < 0))
 1128			return err;
 1129		ctrl1000 = err;
 1130
 1131		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1132		if (unlikely(err < 0))
 1133			return err;
 1134		stat1000 = err;
 1135	} else
 1136		estatus = ctrl1000 = stat1000 = 0;
 1137
 1138	supported = 0;
 1139	if (bmsr & BMSR_ANEGCAPABLE)
 1140		supported |= SUPPORTED_Autoneg;
 1141	if (bmsr & BMSR_10HALF)
 1142		supported |= SUPPORTED_10baseT_Half;
 1143	if (bmsr & BMSR_10FULL)
 1144		supported |= SUPPORTED_10baseT_Full;
 1145	if (bmsr & BMSR_100HALF)
 1146		supported |= SUPPORTED_100baseT_Half;
 1147	if (bmsr & BMSR_100FULL)
 1148		supported |= SUPPORTED_100baseT_Full;
 1149	if (estatus & ESTATUS_1000_THALF)
 1150		supported |= SUPPORTED_1000baseT_Half;
 1151	if (estatus & ESTATUS_1000_TFULL)
 1152		supported |= SUPPORTED_1000baseT_Full;
 1153	lp->supported = supported;
 1154
 1155	advertising = mii_adv_to_ethtool_adv_t(advert);
 1156	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 1157
 1158	if (bmcr & BMCR_ANENABLE) {
 1159		int neg, neg1000;
 1160
 1161		lp->active_autoneg = 1;
 1162		advertising |= ADVERTISED_Autoneg;
 1163
 1164		neg = advert & lpa;
 1165		neg1000 = (ctrl1000 << 2) & stat1000;
 1166
 1167		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1168			active_speed = SPEED_1000;
 1169		else if (neg & LPA_100)
 1170			active_speed = SPEED_100;
 1171		else if (neg & (LPA_10HALF | LPA_10FULL))
 1172			active_speed = SPEED_10;
 1173		else
 1174			active_speed = SPEED_INVALID;
 1175
 1176		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1177			active_duplex = DUPLEX_FULL;
 1178		else if (active_speed != SPEED_INVALID)
 1179			active_duplex = DUPLEX_HALF;
 1180		else
 1181			active_duplex = DUPLEX_INVALID;
 1182	} else {
 1183		lp->active_autoneg = 0;
 1184
 1185		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1186			active_speed = SPEED_1000;
 1187		else if (bmcr & BMCR_SPEED100)
 1188			active_speed = SPEED_100;
 1189		else
 1190			active_speed = SPEED_10;
 1191
 1192		if (bmcr & BMCR_FULLDPLX)
 1193			active_duplex = DUPLEX_FULL;
 1194		else
 1195			active_duplex = DUPLEX_HALF;
 1196	}
 1197
 1198	lp->active_advertising = advertising;
 1199	lp->active_speed = active_speed;
 1200	lp->active_duplex = active_duplex;
 1201	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1202
 1203	return 0;
 1204}
 1205
 1206static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1207{
 1208	struct niu_link_config *lp = &np->link_config;
 1209	u16 current_speed, bmsr;
 1210	unsigned long flags;
 1211	u8 current_duplex;
 1212	int err, link_up;
 1213
 1214	link_up = 0;
 1215	current_speed = SPEED_INVALID;
 1216	current_duplex = DUPLEX_INVALID;
 1217
 1218	spin_lock_irqsave(&np->lock, flags);
 1219
 1220	err = -EINVAL;
 1221
 1222	err = mii_read(np, np->phy_addr, MII_BMSR);
 1223	if (err < 0)
 1224		goto out;
 1225
 1226	bmsr = err;
 1227	if (bmsr & BMSR_LSTATUS) {
 1228		u16 adv, lpa;
 1229
 1230		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1231		if (err < 0)
 1232			goto out;
 1233		adv = err;
 1234
 1235		err = mii_read(np, np->phy_addr, MII_LPA);
 1236		if (err < 0)
 1237			goto out;
 1238		lpa = err;
 1239
 1240		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1241		if (err < 0)
 1242			goto out;
 1243		link_up = 1;
 1244		current_speed = SPEED_1000;
 1245		current_duplex = DUPLEX_FULL;
 1246
 1247	}
 1248	lp->active_speed = current_speed;
 1249	lp->active_duplex = current_duplex;
 1250	err = 0;
 1251
 1252out:
 1253	spin_unlock_irqrestore(&np->lock, flags);
 1254
 1255	*link_up_p = link_up;
 1256	return err;
 1257}
 1258
 1259static int link_status_1g(struct niu *np, int *link_up_p)
 1260{
 1261	struct niu_link_config *lp = &np->link_config;
 1262	unsigned long flags;
 1263	int err;
 1264
 1265	spin_lock_irqsave(&np->lock, flags);
 1266
 1267	err = link_status_mii(np, link_up_p);
 1268	lp->supported |= SUPPORTED_TP;
 1269	lp->active_advertising |= ADVERTISED_TP;
 1270
 1271	spin_unlock_irqrestore(&np->lock, flags);
 1272	return err;
 1273}
 1274
 1275static int bcm8704_reset(struct niu *np)
 1276{
 1277	int err, limit;
 1278
 1279	err = mdio_read(np, np->phy_addr,
 1280			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1281	if (err < 0 || err == 0xffff)
 1282		return err;
 1283	err |= BMCR_RESET;
 1284	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1285			 MII_BMCR, err);
 1286	if (err)
 1287		return err;
 1288
 1289	limit = 1000;
 1290	while (--limit >= 0) {
 1291		err = mdio_read(np, np->phy_addr,
 1292				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1293		if (err < 0)
 1294			return err;
 1295		if (!(err & BMCR_RESET))
 1296			break;
 1297	}
 1298	if (limit < 0) {
 1299		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1300			   np->port, (err & 0xffff));
 1301		return -ENODEV;
 1302	}
 1303	return 0;
 1304}
 1305
 1306/* When written, certain PHY registers need to be read back twice
 1307 * in order for the bits to settle properly.
 1308 */
 1309static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1310{
 1311	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1312	if (err < 0)
 1313		return err;
 1314	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1315	if (err < 0)
 1316		return err;
 1317	return 0;
 1318}
 1319
 1320static int bcm8706_init_user_dev3(struct niu *np)
 1321{
 1322	int err;
 1323
 1324
 1325	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1326			BCM8704_USER_OPT_DIGITAL_CTRL);
 1327	if (err < 0)
 1328		return err;
 1329	err &= ~USER_ODIG_CTRL_GPIOS;
 1330	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1331	err |=  USER_ODIG_CTRL_RESV2;
 1332	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1333			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1334	if (err)
 1335		return err;
 1336
 1337	mdelay(1000);
 1338
 1339	return 0;
 1340}
 1341
 1342static int bcm8704_init_user_dev3(struct niu *np)
 1343{
 1344	int err;
 1345
 1346	err = mdio_write(np, np->phy_addr,
 1347			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1348			 (USER_CONTROL_OPTXRST_LVL |
 1349			  USER_CONTROL_OPBIASFLT_LVL |
 1350			  USER_CONTROL_OBTMPFLT_LVL |
 1351			  USER_CONTROL_OPPRFLT_LVL |
 1352			  USER_CONTROL_OPTXFLT_LVL |
 1353			  USER_CONTROL_OPRXLOS_LVL |
 1354			  USER_CONTROL_OPRXFLT_LVL |
 1355			  USER_CONTROL_OPTXON_LVL |
 1356			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1357	if (err)
 1358		return err;
 1359
 1360	err = mdio_write(np, np->phy_addr,
 1361			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1362			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1363			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1364			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1365			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1366	if (err)
 1367		return err;
 1368
 1369	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1370	if (err)
 1371		return err;
 1372	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1373	if (err)
 1374		return err;
 1375
 1376	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1377			BCM8704_USER_OPT_DIGITAL_CTRL);
 1378	if (err < 0)
 1379		return err;
 1380	err &= ~USER_ODIG_CTRL_GPIOS;
 1381	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1382	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1383			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1384	if (err)
 1385		return err;
 1386
 1387	mdelay(1000);
 1388
 1389	return 0;
 1390}
 1391
 1392static int mrvl88x2011_act_led(struct niu *np, int val)
 1393{
 1394	int	err;
 1395
 1396	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1397		MRVL88X2011_LED_8_TO_11_CTL);
 1398	if (err < 0)
 1399		return err;
 1400
 1401	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1402	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1403
 1404	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1405			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1406}
 1407
 1408static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1409{
 1410	int	err;
 1411
 1412	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1413			MRVL88X2011_LED_BLINK_CTL);
 1414	if (err >= 0) {
 1415		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1416		err |= (rate << 4);
 1417
 1418		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1419				 MRVL88X2011_LED_BLINK_CTL, err);
 1420	}
 1421
 1422	return err;
 1423}
 1424
 1425static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1426{
 1427	int	err;
 1428
 1429	/* Set LED functions */
 1430	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1431	if (err)
 1432		return err;
 1433
 1434	/* led activity */
 1435	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1436	if (err)
 1437		return err;
 1438
 1439	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1440			MRVL88X2011_GENERAL_CTL);
 1441	if (err < 0)
 1442		return err;
 1443
 1444	err |= MRVL88X2011_ENA_XFPREFCLK;
 1445
 1446	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1447			 MRVL88X2011_GENERAL_CTL, err);
 1448	if (err < 0)
 1449		return err;
 1450
 1451	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1452			MRVL88X2011_PMA_PMD_CTL_1);
 1453	if (err < 0)
 1454		return err;
 1455
 1456	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1457		err |= MRVL88X2011_LOOPBACK;
 1458	else
 1459		err &= ~MRVL88X2011_LOOPBACK;
 1460
 1461	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1462			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1463	if (err < 0)
 1464		return err;
 1465
 1466	/* Enable PMD  */
 1467	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1468			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1469}
 1470
 1471
 1472static int xcvr_diag_bcm870x(struct niu *np)
 1473{
 1474	u16 analog_stat0, tx_alarm_status;
 1475	int err = 0;
 1476
 1477#if 1
 1478	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1479			MII_STAT1000);
 1480	if (err < 0)
 1481		return err;
 1482	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1483
 1484	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1485	if (err < 0)
 1486		return err;
 1487	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1488
 1489	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1490			MII_NWAYTEST);
 1491	if (err < 0)
 1492		return err;
 1493	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1494#endif
 1495
 1496	/* XXX dig this out it might not be so useful XXX */
 1497	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1498			BCM8704_USER_ANALOG_STATUS0);
 1499	if (err < 0)
 1500		return err;
 1501	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1502			BCM8704_USER_ANALOG_STATUS0);
 1503	if (err < 0)
 1504		return err;
 1505	analog_stat0 = err;
 1506
 1507	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1508			BCM8704_USER_TX_ALARM_STATUS);
 1509	if (err < 0)
 1510		return err;
 1511	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1512			BCM8704_USER_TX_ALARM_STATUS);
 1513	if (err < 0)
 1514		return err;
 1515	tx_alarm_status = err;
 1516
 1517	if (analog_stat0 != 0x03fc) {
 1518		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1519			pr_info("Port %u cable not connected or bad cable\n",
 1520				np->port);
 1521		} else if (analog_stat0 == 0x639c) {
 1522			pr_info("Port %u optical module is bad or missing\n",
 1523				np->port);
 1524		}
 1525	}
 1526
 1527	return 0;
 1528}
 1529
 1530static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1531{
 1532	struct niu_link_config *lp = &np->link_config;
 1533	int err;
 1534
 1535	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1536			MII_BMCR);
 1537	if (err < 0)
 1538		return err;
 1539
 1540	err &= ~BMCR_LOOPBACK;
 1541
 1542	if (lp->loopback_mode == LOOPBACK_MAC)
 1543		err |= BMCR_LOOPBACK;
 1544
 1545	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1546			 MII_BMCR, err);
 1547	if (err)
 1548		return err;
 1549
 1550	return 0;
 1551}
 1552
 1553static int xcvr_init_10g_bcm8706(struct niu *np)
 1554{
 1555	int err = 0;
 1556	u64 val;
 1557
 1558	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1559	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1560			return err;
 1561
 1562	val = nr64_mac(XMAC_CONFIG);
 1563	val &= ~XMAC_CONFIG_LED_POLARITY;
 1564	val |= XMAC_CONFIG_FORCE_LED_ON;
 1565	nw64_mac(XMAC_CONFIG, val);
 1566
 1567	val = nr64(MIF_CONFIG);
 1568	val |= MIF_CONFIG_INDIRECT_MODE;
 1569	nw64(MIF_CONFIG, val);
 1570
 1571	err = bcm8704_reset(np);
 1572	if (err)
 1573		return err;
 1574
 1575	err = xcvr_10g_set_lb_bcm870x(np);
 1576	if (err)
 1577		return err;
 1578
 1579	err = bcm8706_init_user_dev3(np);
 1580	if (err)
 1581		return err;
 1582
 1583	err = xcvr_diag_bcm870x(np);
 1584	if (err)
 1585		return err;
 1586
 1587	return 0;
 1588}
 1589
 1590static int xcvr_init_10g_bcm8704(struct niu *np)
 1591{
 1592	int err;
 1593
 1594	err = bcm8704_reset(np);
 1595	if (err)
 1596		return err;
 1597
 1598	err = bcm8704_init_user_dev3(np);
 1599	if (err)
 1600		return err;
 1601
 1602	err = xcvr_10g_set_lb_bcm870x(np);
 1603	if (err)
 1604		return err;
 1605
 1606	err =  xcvr_diag_bcm870x(np);
 1607	if (err)
 1608		return err;
 1609
 1610	return 0;
 1611}
 1612
 1613static int xcvr_init_10g(struct niu *np)
 1614{
 1615	int phy_id, err;
 1616	u64 val;
 1617
 1618	val = nr64_mac(XMAC_CONFIG);
 1619	val &= ~XMAC_CONFIG_LED_POLARITY;
 1620	val |= XMAC_CONFIG_FORCE_LED_ON;
 1621	nw64_mac(XMAC_CONFIG, val);
 1622
 1623	/* XXX shared resource, lock parent XXX */
 1624	val = nr64(MIF_CONFIG);
 1625	val |= MIF_CONFIG_INDIRECT_MODE;
 1626	nw64(MIF_CONFIG, val);
 1627
 1628	phy_id = phy_decode(np->parent->port_phy, np->port);
 1629	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1630
 1631	/* handle different phy types */
 1632	switch (phy_id & NIU_PHY_ID_MASK) {
 1633	case NIU_PHY_ID_MRVL88X2011:
 1634		err = xcvr_init_10g_mrvl88x2011(np);
 1635		break;
 1636
 1637	default: /* bcom 8704 */
 1638		err = xcvr_init_10g_bcm8704(np);
 1639		break;
 1640	}
 1641
 1642	return err;
 1643}
 1644
 1645static int mii_reset(struct niu *np)
 1646{
 1647	int limit, err;
 1648
 1649	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1650	if (err)
 1651		return err;
 1652
 1653	limit = 1000;
 1654	while (--limit >= 0) {
 1655		udelay(500);
 1656		err = mii_read(np, np->phy_addr, MII_BMCR);
 1657		if (err < 0)
 1658			return err;
 1659		if (!(err & BMCR_RESET))
 1660			break;
 1661	}
 1662	if (limit < 0) {
 1663		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1664			   np->port, err);
 1665		return -ENODEV;
 1666	}
 1667
 1668	return 0;
 1669}
 1670
 1671static int xcvr_init_1g_rgmii(struct niu *np)
 1672{
 1673	int err;
 1674	u64 val;
 1675	u16 bmcr, bmsr, estat;
 1676
 1677	val = nr64(MIF_CONFIG);
 1678	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1679	nw64(MIF_CONFIG, val);
 1680
 1681	err = mii_reset(np);
 1682	if (err)
 1683		return err;
 1684
 1685	err = mii_read(np, np->phy_addr, MII_BMSR);
 1686	if (err < 0)
 1687		return err;
 1688	bmsr = err;
 1689
 1690	estat = 0;
 1691	if (bmsr & BMSR_ESTATEN) {
 1692		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1693		if (err < 0)
 1694			return err;
 1695		estat = err;
 1696	}
 1697
 1698	bmcr = 0;
 1699	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1700	if (err)
 1701		return err;
 1702
 1703	if (bmsr & BMSR_ESTATEN) {
 1704		u16 ctrl1000 = 0;
 1705
 1706		if (estat & ESTATUS_1000_TFULL)
 1707			ctrl1000 |= ADVERTISE_1000FULL;
 1708		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1709		if (err)
 1710			return err;
 1711	}
 1712
 1713	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1714
 1715	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1716	if (err)
 1717		return err;
 1718
 1719	err = mii_read(np, np->phy_addr, MII_BMCR);
 1720	if (err < 0)
 1721		return err;
 1722	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1723
 1724	err = mii_read(np, np->phy_addr, MII_BMSR);
 1725	if (err < 0)
 1726		return err;
 1727
 1728	return 0;
 1729}
 1730
 1731static int mii_init_common(struct niu *np)
 1732{
 1733	struct niu_link_config *lp = &np->link_config;
 1734	u16 bmcr, bmsr, adv, estat;
 1735	int err;
 1736
 1737	err = mii_reset(np);
 1738	if (err)
 1739		return err;
 1740
 1741	err = mii_read(np, np->phy_addr, MII_BMSR);
 1742	if (err < 0)
 1743		return err;
 1744	bmsr = err;
 1745
 1746	estat = 0;
 1747	if (bmsr & BMSR_ESTATEN) {
 1748		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1749		if (err < 0)
 1750			return err;
 1751		estat = err;
 1752	}
 1753
 1754	bmcr = 0;
 1755	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1756	if (err)
 1757		return err;
 1758
 1759	if (lp->loopback_mode == LOOPBACK_MAC) {
 1760		bmcr |= BMCR_LOOPBACK;
 1761		if (lp->active_speed == SPEED_1000)
 1762			bmcr |= BMCR_SPEED1000;
 1763		if (lp->active_duplex == DUPLEX_FULL)
 1764			bmcr |= BMCR_FULLDPLX;
 1765	}
 1766
 1767	if (lp->loopback_mode == LOOPBACK_PHY) {
 1768		u16 aux;
 1769
 1770		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1771		       BCM5464R_AUX_CTL_WRITE_1);
 1772		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1773		if (err)
 1774			return err;
 1775	}
 1776
 1777	if (lp->autoneg) {
 1778		u16 ctrl1000;
 1779
 1780		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1781		if ((bmsr & BMSR_10HALF) &&
 1782			(lp->advertising & ADVERTISED_10baseT_Half))
 1783			adv |= ADVERTISE_10HALF;
 1784		if ((bmsr & BMSR_10FULL) &&
 1785			(lp->advertising & ADVERTISED_10baseT_Full))
 1786			adv |= ADVERTISE_10FULL;
 1787		if ((bmsr & BMSR_100HALF) &&
 1788			(lp->advertising & ADVERTISED_100baseT_Half))
 1789			adv |= ADVERTISE_100HALF;
 1790		if ((bmsr & BMSR_100FULL) &&
 1791			(lp->advertising & ADVERTISED_100baseT_Full))
 1792			adv |= ADVERTISE_100FULL;
 1793		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1794		if (err)
 1795			return err;
 1796
 1797		if (likely(bmsr & BMSR_ESTATEN)) {
 1798			ctrl1000 = 0;
 1799			if ((estat & ESTATUS_1000_THALF) &&
 1800				(lp->advertising & ADVERTISED_1000baseT_Half))
 1801				ctrl1000 |= ADVERTISE_1000HALF;
 1802			if ((estat & ESTATUS_1000_TFULL) &&
 1803				(lp->advertising & ADVERTISED_1000baseT_Full))
 1804				ctrl1000 |= ADVERTISE_1000FULL;
 1805			err = mii_write(np, np->phy_addr,
 1806					MII_CTRL1000, ctrl1000);
 1807			if (err)
 1808				return err;
 1809		}
 1810
 1811		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1812	} else {
 1813		/* !lp->autoneg */
 1814		int fulldpx;
 1815
 1816		if (lp->duplex == DUPLEX_FULL) {
 1817			bmcr |= BMCR_FULLDPLX;
 1818			fulldpx = 1;
 1819		} else if (lp->duplex == DUPLEX_HALF)
 1820			fulldpx = 0;
 1821		else
 1822			return -EINVAL;
 1823
 1824		if (lp->speed == SPEED_1000) {
 1825			/* if X-full requested while not supported, or
 1826			   X-half requested while not supported... */
 1827			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1828				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1829				return -EINVAL;
 1830			bmcr |= BMCR_SPEED1000;
 1831		} else if (lp->speed == SPEED_100) {
 1832			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1833				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1834				return -EINVAL;
 1835			bmcr |= BMCR_SPEED100;
 1836		} else if (lp->speed == SPEED_10) {
 1837			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1838				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1839				return -EINVAL;
 1840		} else
 1841			return -EINVAL;
 1842	}
 1843
 1844	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1845	if (err)
 1846		return err;
 1847
 1848#if 0
 1849	err = mii_read(np, np->phy_addr, MII_BMCR);
 1850	if (err < 0)
 1851		return err;
 1852	bmcr = err;
 1853
 1854	err = mii_read(np, np->phy_addr, MII_BMSR);
 1855	if (err < 0)
 1856		return err;
 1857	bmsr = err;
 1858
 1859	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1860		np->port, bmcr, bmsr);
 1861#endif
 1862
 1863	return 0;
 1864}
 1865
 1866static int xcvr_init_1g(struct niu *np)
 1867{
 1868	u64 val;
 1869
 1870	/* XXX shared resource, lock parent XXX */
 1871	val = nr64(MIF_CONFIG);
 1872	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1873	nw64(MIF_CONFIG, val);
 1874
 1875	return mii_init_common(np);
 1876}
 1877
 1878static int niu_xcvr_init(struct niu *np)
 1879{
 1880	const struct niu_phy_ops *ops = np->phy_ops;
 1881	int err;
 1882
 1883	err = 0;
 1884	if (ops->xcvr_init)
 1885		err = ops->xcvr_init(np);
 1886
 1887	return err;
 1888}
 1889
 1890static int niu_serdes_init(struct niu *np)
 1891{
 1892	const struct niu_phy_ops *ops = np->phy_ops;
 1893	int err;
 1894
 1895	err = 0;
 1896	if (ops->serdes_init)
 1897		err = ops->serdes_init(np);
 1898
 1899	return err;
 1900}
 1901
 1902static void niu_init_xif(struct niu *);
 1903static void niu_handle_led(struct niu *, int status);
 1904
 1905static int niu_link_status_common(struct niu *np, int link_up)
 1906{
 1907	struct niu_link_config *lp = &np->link_config;
 1908	struct net_device *dev = np->dev;
 1909	unsigned long flags;
 1910
 1911	if (!netif_carrier_ok(dev) && link_up) {
 1912		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1913			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1914			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1915			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1916			   "10Mbit/sec",
 1917			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1918
 1919		spin_lock_irqsave(&np->lock, flags);
 1920		niu_init_xif(np);
 1921		niu_handle_led(np, 1);
 1922		spin_unlock_irqrestore(&np->lock, flags);
 1923
 1924		netif_carrier_on(dev);
 1925	} else if (netif_carrier_ok(dev) && !link_up) {
 1926		netif_warn(np, link, dev, "Link is down\n");
 1927		spin_lock_irqsave(&np->lock, flags);
 1928		niu_handle_led(np, 0);
 1929		spin_unlock_irqrestore(&np->lock, flags);
 1930		netif_carrier_off(dev);
 1931	}
 1932
 1933	return 0;
 1934}
 1935
 1936static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1937{
 1938	int err, link_up, pma_status, pcs_status;
 1939
 1940	link_up = 0;
 1941
 1942	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1943			MRVL88X2011_10G_PMD_STATUS_2);
 1944	if (err < 0)
 1945		goto out;
 1946
 1947	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1948	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1949			MRVL88X2011_PMA_PMD_STATUS_1);
 1950	if (err < 0)
 1951		goto out;
 1952
 1953	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1954
 1955        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1956	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1957			MRVL88X2011_PMA_PMD_STATUS_1);
 1958	if (err < 0)
 1959		goto out;
 1960
 1961	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1962			MRVL88X2011_PMA_PMD_STATUS_1);
 1963	if (err < 0)
 1964		goto out;
 1965
 1966	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1967
 1968        /* Check XGXS Register : 4.0018.[0-3,12] */
 1969	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1970			MRVL88X2011_10G_XGXS_LANE_STAT);
 1971	if (err < 0)
 1972		goto out;
 1973
 1974	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1975		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1976		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1977		    0x800))
 1978		link_up = (pma_status && pcs_status) ? 1 : 0;
 1979
 1980	np->link_config.active_speed = SPEED_10000;
 1981	np->link_config.active_duplex = DUPLEX_FULL;
 1982	err = 0;
 1983out:
 1984	mrvl88x2011_act_led(np, (link_up ?
 1985				 MRVL88X2011_LED_CTL_PCS_ACT :
 1986				 MRVL88X2011_LED_CTL_OFF));
 1987
 1988	*link_up_p = link_up;
 1989	return err;
 1990}
 1991
 1992static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 1993{
 1994	int err, link_up;
 1995	link_up = 0;
 1996
 1997	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1998			BCM8704_PMD_RCV_SIGDET);
 1999	if (err < 0 || err == 0xffff)
 2000		goto out;
 2001	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2002		err = 0;
 2003		goto out;
 2004	}
 2005
 2006	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2007			BCM8704_PCS_10G_R_STATUS);
 2008	if (err < 0)
 2009		goto out;
 2010
 2011	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2012		err = 0;
 2013		goto out;
 2014	}
 2015
 2016	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2017			BCM8704_PHYXS_XGXS_LANE_STAT);
 2018	if (err < 0)
 2019		goto out;
 2020	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2021		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2022		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2023		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2024		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2025		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2026		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2027		err = 0;
 2028		np->link_config.active_speed = SPEED_INVALID;
 2029		np->link_config.active_duplex = DUPLEX_INVALID;
 2030		goto out;
 2031	}
 2032
 2033	link_up = 1;
 2034	np->link_config.active_speed = SPEED_10000;
 2035	np->link_config.active_duplex = DUPLEX_FULL;
 2036	err = 0;
 2037
 2038out:
 2039	*link_up_p = link_up;
 2040	return err;
 2041}
 2042
 2043static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2044{
 2045	int err, link_up;
 2046
 2047	link_up = 0;
 2048
 2049	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2050			BCM8704_PMD_RCV_SIGDET);
 2051	if (err < 0)
 2052		goto out;
 2053	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2054		err = 0;
 2055		goto out;
 2056	}
 2057
 2058	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2059			BCM8704_PCS_10G_R_STATUS);
 2060	if (err < 0)
 2061		goto out;
 2062	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2063		err = 0;
 2064		goto out;
 2065	}
 2066
 2067	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2068			BCM8704_PHYXS_XGXS_LANE_STAT);
 2069	if (err < 0)
 2070		goto out;
 2071
 2072	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2073		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2074		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2075		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2076		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2077		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2078		err = 0;
 2079		goto out;
 2080	}
 2081
 2082	link_up = 1;
 2083	np->link_config.active_speed = SPEED_10000;
 2084	np->link_config.active_duplex = DUPLEX_FULL;
 2085	err = 0;
 2086
 2087out:
 2088	*link_up_p = link_up;
 2089	return err;
 2090}
 2091
 2092static int link_status_10g(struct niu *np, int *link_up_p)
 2093{
 2094	unsigned long flags;
 2095	int err = -EINVAL;
 2096
 2097	spin_lock_irqsave(&np->lock, flags);
 2098
 2099	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2100		int phy_id;
 2101
 2102		phy_id = phy_decode(np->parent->port_phy, np->port);
 2103		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2104
 2105		/* handle different phy types */
 2106		switch (phy_id & NIU_PHY_ID_MASK) {
 2107		case NIU_PHY_ID_MRVL88X2011:
 2108			err = link_status_10g_mrvl(np, link_up_p);
 2109			break;
 2110
 2111		default: /* bcom 8704 */
 2112			err = link_status_10g_bcom(np, link_up_p);
 2113			break;
 2114		}
 2115	}
 2116
 2117	spin_unlock_irqrestore(&np->lock, flags);
 2118
 2119	return err;
 2120}
 2121
 2122static int niu_10g_phy_present(struct niu *np)
 2123{
 2124	u64 sig, mask, val;
 2125
 2126	sig = nr64(ESR_INT_SIGNALS);
 2127	switch (np->port) {
 2128	case 0:
 2129		mask = ESR_INT_SIGNALS_P0_BITS;
 2130		val = (ESR_INT_SRDY0_P0 |
 2131		       ESR_INT_DET0_P0 |
 2132		       ESR_INT_XSRDY_P0 |
 2133		       ESR_INT_XDP_P0_CH3 |
 2134		       ESR_INT_XDP_P0_CH2 |
 2135		       ESR_INT_XDP_P0_CH1 |
 2136		       ESR_INT_XDP_P0_CH0);
 2137		break;
 2138
 2139	case 1:
 2140		mask = ESR_INT_SIGNALS_P1_BITS;
 2141		val = (ESR_INT_SRDY0_P1 |
 2142		       ESR_INT_DET0_P1 |
 2143		       ESR_INT_XSRDY_P1 |
 2144		       ESR_INT_XDP_P1_CH3 |
 2145		       ESR_INT_XDP_P1_CH2 |
 2146		       ESR_INT_XDP_P1_CH1 |
 2147		       ESR_INT_XDP_P1_CH0);
 2148		break;
 2149
 2150	default:
 2151		return 0;
 2152	}
 2153
 2154	if ((sig & mask) != val)
 2155		return 0;
 2156	return 1;
 2157}
 2158
 2159static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2160{
 2161	unsigned long flags;
 2162	int err = 0;
 2163	int phy_present;
 2164	int phy_present_prev;
 2165
 2166	spin_lock_irqsave(&np->lock, flags);
 2167
 2168	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2169		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2170			1 : 0;
 2171		phy_present = niu_10g_phy_present(np);
 2172		if (phy_present != phy_present_prev) {
 2173			/* state change */
 2174			if (phy_present) {
 2175				/* A NEM was just plugged in */
 2176				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2177				if (np->phy_ops->xcvr_init)
 2178					err = np->phy_ops->xcvr_init(np);
 2179				if (err) {
 2180					err = mdio_read(np, np->phy_addr,
 2181						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2182					if (err == 0xffff) {
 2183						/* No mdio, back-to-back XAUI */
 2184						goto out;
 2185					}
 2186					/* debounce */
 2187					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2188				}
 2189			} else {
 2190				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2191				*link_up_p = 0;
 2192				netif_warn(np, link, np->dev,
 2193					   "Hotplug PHY Removed\n");
 2194			}
 2195		}
 2196out:
 2197		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2198			err = link_status_10g_bcm8706(np, link_up_p);
 2199			if (err == 0xffff) {
 2200				/* No mdio, back-to-back XAUI: it is C10NEM */
 2201				*link_up_p = 1;
 2202				np->link_config.active_speed = SPEED_10000;
 2203				np->link_config.active_duplex = DUPLEX_FULL;
 2204			}
 2205		}
 2206	}
 2207
 2208	spin_unlock_irqrestore(&np->lock, flags);
 2209
 2210	return 0;
 2211}
 2212
 2213static int niu_link_status(struct niu *np, int *link_up_p)
 2214{
 2215	const struct niu_phy_ops *ops = np->phy_ops;
 2216	int err;
 2217
 2218	err = 0;
 2219	if (ops->link_status)
 2220		err = ops->link_status(np, link_up_p);
 2221
 2222	return err;
 2223}
 2224
 2225static void niu_timer(struct timer_list *t)
 2226{
 2227	struct niu *np = from_timer(np, t, timer);
 2228	unsigned long off;
 2229	int err, link_up;
 2230
 2231	err = niu_link_status(np, &link_up);
 2232	if (!err)
 2233		niu_link_status_common(np, link_up);
 2234
 2235	if (netif_carrier_ok(np->dev))
 2236		off = 5 * HZ;
 2237	else
 2238		off = 1 * HZ;
 2239	np->timer.expires = jiffies + off;
 2240
 2241	add_timer(&np->timer);
 2242}
 2243
 2244static const struct niu_phy_ops phy_ops_10g_serdes = {
 2245	.serdes_init		= serdes_init_10g_serdes,
 2246	.link_status		= link_status_10g_serdes,
 2247};
 2248
 2249static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2250	.serdes_init		= serdes_init_niu_10g_serdes,
 2251	.link_status		= link_status_10g_serdes,
 2252};
 2253
 2254static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2255	.serdes_init		= serdes_init_niu_1g_serdes,
 2256	.link_status		= link_status_1g_serdes,
 2257};
 2258
 2259static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2260	.xcvr_init		= xcvr_init_1g_rgmii,
 2261	.link_status		= link_status_1g_rgmii,
 2262};
 2263
 2264static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2265	.serdes_init		= serdes_init_niu_10g_fiber,
 2266	.xcvr_init		= xcvr_init_10g,
 2267	.link_status		= link_status_10g,
 2268};
 2269
 2270static const struct niu_phy_ops phy_ops_10g_fiber = {
 2271	.serdes_init		= serdes_init_10g,
 2272	.xcvr_init		= xcvr_init_10g,
 2273	.link_status		= link_status_10g,
 2274};
 2275
 2276static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2277	.serdes_init		= serdes_init_10g,
 2278	.xcvr_init		= xcvr_init_10g_bcm8706,
 2279	.link_status		= link_status_10g_hotplug,
 2280};
 2281
 2282static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2283	.serdes_init		= serdes_init_niu_10g_fiber,
 2284	.xcvr_init		= xcvr_init_10g_bcm8706,
 2285	.link_status		= link_status_10g_hotplug,
 2286};
 2287
 2288static const struct niu_phy_ops phy_ops_10g_copper = {
 2289	.serdes_init		= serdes_init_10g,
 2290	.link_status		= link_status_10g, /* XXX */
 2291};
 2292
 2293static const struct niu_phy_ops phy_ops_1g_fiber = {
 2294	.serdes_init		= serdes_init_1g,
 2295	.xcvr_init		= xcvr_init_1g,
 2296	.link_status		= link_status_1g,
 2297};
 2298
 2299static const struct niu_phy_ops phy_ops_1g_copper = {
 2300	.xcvr_init		= xcvr_init_1g,
 2301	.link_status		= link_status_1g,
 2302};
 2303
 2304struct niu_phy_template {
 2305	const struct niu_phy_ops	*ops;
 2306	u32				phy_addr_base;
 2307};
 2308
 2309static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2310	.ops		= &phy_ops_10g_fiber_niu,
 2311	.phy_addr_base	= 16,
 2312};
 2313
 2314static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2315	.ops		= &phy_ops_10g_serdes_niu,
 2316	.phy_addr_base	= 0,
 2317};
 2318
 2319static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2320	.ops		= &phy_ops_1g_serdes_niu,
 2321	.phy_addr_base	= 0,
 2322};
 2323
 2324static const struct niu_phy_template phy_template_10g_fiber = {
 2325	.ops		= &phy_ops_10g_fiber,
 2326	.phy_addr_base	= 8,
 2327};
 2328
 2329static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2330	.ops		= &phy_ops_10g_fiber_hotplug,
 2331	.phy_addr_base	= 8,
 2332};
 2333
 2334static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2335	.ops		= &phy_ops_niu_10g_hotplug,
 2336	.phy_addr_base	= 8,
 2337};
 2338
 2339static const struct niu_phy_template phy_template_10g_copper = {
 2340	.ops		= &phy_ops_10g_copper,
 2341	.phy_addr_base	= 10,
 2342};
 2343
 2344static const struct niu_phy_template phy_template_1g_fiber = {
 2345	.ops		= &phy_ops_1g_fiber,
 2346	.phy_addr_base	= 0,
 2347};
 2348
 2349static const struct niu_phy_template phy_template_1g_copper = {
 2350	.ops		= &phy_ops_1g_copper,
 2351	.phy_addr_base	= 0,
 2352};
 2353
 2354static const struct niu_phy_template phy_template_1g_rgmii = {
 2355	.ops		= &phy_ops_1g_rgmii,
 2356	.phy_addr_base	= 0,
 2357};
 2358
 2359static const struct niu_phy_template phy_template_10g_serdes = {
 2360	.ops		= &phy_ops_10g_serdes,
 2361	.phy_addr_base	= 0,
 2362};
 2363
 2364static int niu_atca_port_num[4] = {
 2365	0, 0,  11, 10
 2366};
 2367
 2368static int serdes_init_10g_serdes(struct niu *np)
 2369{
 2370	struct niu_link_config *lp = &np->link_config;
 2371	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2372	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2373
 2374	switch (np->port) {
 2375	case 0:
 2376		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2377		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2378		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2379		break;
 2380	case 1:
 2381		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2382		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2383		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2384		break;
 2385
 2386	default:
 2387		return -EINVAL;
 2388	}
 2389	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2390		    ENET_SERDES_CTRL_SDET_1 |
 2391		    ENET_SERDES_CTRL_SDET_2 |
 2392		    ENET_SERDES_CTRL_SDET_3 |
 2393		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2394		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2395		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2396		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2397		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2398		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2399		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2400		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2401	test_cfg_val = 0;
 2402
 2403	if (lp->loopback_mode == LOOPBACK_PHY) {
 2404		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2405				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2406				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2407				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2408				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2409				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2410				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2411				  ENET_SERDES_TEST_MD_3_SHIFT));
 2412	}
 2413
 2414	esr_reset(np);
 2415	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2416	nw64(ctrl_reg, ctrl_val);
 2417	nw64(test_cfg_reg, test_cfg_val);
 2418
 2419	/* Initialize all 4 lanes of the SERDES.  */
 2420	for (i = 0; i < 4; i++) {
 2421		u32 rxtx_ctrl, glue0;
 2422		int err;
 2423
 2424		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2425		if (err)
 2426			return err;
 2427		err = esr_read_glue0(np, i, &glue0);
 2428		if (err)
 2429			return err;
 2430
 2431		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2432		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2433			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2434
 2435		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2436			   ESR_GLUE_CTRL0_THCNT |
 2437			   ESR_GLUE_CTRL0_BLTIME);
 2438		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2439			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2440			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2441			  (BLTIME_300_CYCLES <<
 2442			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2443
 2444		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2445		if (err)
 2446			return err;
 2447		err = esr_write_glue0(np, i, glue0);
 2448		if (err)
 2449			return err;
 2450	}
 2451
 2452
 2453	sig = nr64(ESR_INT_SIGNALS);
 2454	switch (np->port) {
 2455	case 0:
 2456		mask = ESR_INT_SIGNALS_P0_BITS;
 2457		val = (ESR_INT_SRDY0_P0 |
 2458		       ESR_INT_DET0_P0 |
 2459		       ESR_INT_XSRDY_P0 |
 2460		       ESR_INT_XDP_P0_CH3 |
 2461		       ESR_INT_XDP_P0_CH2 |
 2462		       ESR_INT_XDP_P0_CH1 |
 2463		       ESR_INT_XDP_P0_CH0);
 2464		break;
 2465
 2466	case 1:
 2467		mask = ESR_INT_SIGNALS_P1_BITS;
 2468		val = (ESR_INT_SRDY0_P1 |
 2469		       ESR_INT_DET0_P1 |
 2470		       ESR_INT_XSRDY_P1 |
 2471		       ESR_INT_XDP_P1_CH3 |
 2472		       ESR_INT_XDP_P1_CH2 |
 2473		       ESR_INT_XDP_P1_CH1 |
 2474		       ESR_INT_XDP_P1_CH0);
 2475		break;
 2476
 2477	default:
 2478		return -EINVAL;
 2479	}
 2480
 2481	if ((sig & mask) != val) {
 2482		int err;
 2483		err = serdes_init_1g_serdes(np);
 2484		if (!err) {
 2485			np->flags &= ~NIU_FLAGS_10G;
 2486			np->mac_xcvr = MAC_XCVR_PCS;
 2487		}  else {
 2488			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2489				   np->port);
 2490			return -ENODEV;
 2491		}
 2492	}
 2493
 2494	return 0;
 2495}
 2496
 2497static int niu_determine_phy_disposition(struct niu *np)
 2498{
 2499	struct niu_parent *parent = np->parent;
 2500	u8 plat_type = parent->plat_type;
 2501	const struct niu_phy_template *tp;
 2502	u32 phy_addr_off = 0;
 2503
 2504	if (plat_type == PLAT_TYPE_NIU) {
 2505		switch (np->flags &
 2506			(NIU_FLAGS_10G |
 2507			 NIU_FLAGS_FIBER |
 2508			 NIU_FLAGS_XCVR_SERDES)) {
 2509		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2510			/* 10G Serdes */
 2511			tp = &phy_template_niu_10g_serdes;
 2512			break;
 2513		case NIU_FLAGS_XCVR_SERDES:
 2514			/* 1G Serdes */
 2515			tp = &phy_template_niu_1g_serdes;
 2516			break;
 2517		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2518			/* 10G Fiber */
 2519		default:
 2520			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2521				tp = &phy_template_niu_10g_hotplug;
 2522				if (np->port == 0)
 2523					phy_addr_off = 8;
 2524				if (np->port == 1)
 2525					phy_addr_off = 12;
 2526			} else {
 2527				tp = &phy_template_niu_10g_fiber;
 2528				phy_addr_off += np->port;
 2529			}
 2530			break;
 2531		}
 2532	} else {
 2533		switch (np->flags &
 2534			(NIU_FLAGS_10G |
 2535			 NIU_FLAGS_FIBER |
 2536			 NIU_FLAGS_XCVR_SERDES)) {
 2537		case 0:
 2538			/* 1G copper */
 2539			tp = &phy_template_1g_copper;
 2540			if (plat_type == PLAT_TYPE_VF_P0)
 2541				phy_addr_off = 10;
 2542			else if (plat_type == PLAT_TYPE_VF_P1)
 2543				phy_addr_off = 26;
 2544
 2545			phy_addr_off += (np->port ^ 0x3);
 2546			break;
 2547
 2548		case NIU_FLAGS_10G:
 2549			/* 10G copper */
 2550			tp = &phy_template_10g_copper;
 2551			break;
 2552
 2553		case NIU_FLAGS_FIBER:
 2554			/* 1G fiber */
 2555			tp = &phy_template_1g_fiber;
 2556			break;
 2557
 2558		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2559			/* 10G fiber */
 2560			tp = &phy_template_10g_fiber;
 2561			if (plat_type == PLAT_TYPE_VF_P0 ||
 2562			    plat_type == PLAT_TYPE_VF_P1)
 2563				phy_addr_off = 8;
 2564			phy_addr_off += np->port;
 2565			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2566				tp = &phy_template_10g_fiber_hotplug;
 2567				if (np->port == 0)
 2568					phy_addr_off = 8;
 2569				if (np->port == 1)
 2570					phy_addr_off = 12;
 2571			}
 2572			break;
 2573
 2574		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2575		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2576		case NIU_FLAGS_XCVR_SERDES:
 2577			switch(np->port) {
 2578			case 0:
 2579			case 1:
 2580				tp = &phy_template_10g_serdes;
 2581				break;
 2582			case 2:
 2583			case 3:
 2584				tp = &phy_template_1g_rgmii;
 2585				break;
 2586			default:
 2587				return -EINVAL;
 2588			}
 2589			phy_addr_off = niu_atca_port_num[np->port];
 2590			break;
 2591
 2592		default:
 2593			return -EINVAL;
 2594		}
 2595	}
 2596
 2597	np->phy_ops = tp->ops;
 2598	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2599
 2600	return 0;
 2601}
 2602
 2603static int niu_init_link(struct niu *np)
 2604{
 2605	struct niu_parent *parent = np->parent;
 2606	int err, ignore;
 2607
 2608	if (parent->plat_type == PLAT_TYPE_NIU) {
 2609		err = niu_xcvr_init(np);
 2610		if (err)
 2611			return err;
 2612		msleep(200);
 2613	}
 2614	err = niu_serdes_init(np);
 2615	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2616		return err;
 2617	msleep(200);
 2618	err = niu_xcvr_init(np);
 2619	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2620		niu_link_status(np, &ignore);
 2621	return 0;
 2622}
 2623
 2624static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2625{
 2626	u16 reg0 = addr[4] << 8 | addr[5];
 2627	u16 reg1 = addr[2] << 8 | addr[3];
 2628	u16 reg2 = addr[0] << 8 | addr[1];
 2629
 2630	if (np->flags & NIU_FLAGS_XMAC) {
 2631		nw64_mac(XMAC_ADDR0, reg0);
 2632		nw64_mac(XMAC_ADDR1, reg1);
 2633		nw64_mac(XMAC_ADDR2, reg2);
 2634	} else {
 2635		nw64_mac(BMAC_ADDR0, reg0);
 2636		nw64_mac(BMAC_ADDR1, reg1);
 2637		nw64_mac(BMAC_ADDR2, reg2);
 2638	}
 2639}
 2640
 2641static int niu_num_alt_addr(struct niu *np)
 2642{
 2643	if (np->flags & NIU_FLAGS_XMAC)
 2644		return XMAC_NUM_ALT_ADDR;
 2645	else
 2646		return BMAC_NUM_ALT_ADDR;
 2647}
 2648
 2649static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2650{
 2651	u16 reg0 = addr[4] << 8 | addr[5];
 2652	u16 reg1 = addr[2] << 8 | addr[3];
 2653	u16 reg2 = addr[0] << 8 | addr[1];
 2654
 2655	if (index >= niu_num_alt_addr(np))
 2656		return -EINVAL;
 2657
 2658	if (np->flags & NIU_FLAGS_XMAC) {
 2659		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2660		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2661		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2662	} else {
 2663		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2664		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2665		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2666	}
 2667
 2668	return 0;
 2669}
 2670
 2671static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2672{
 2673	unsigned long reg;
 2674	u64 val, mask;
 2675
 2676	if (index >= niu_num_alt_addr(np))
 2677		return -EINVAL;
 2678
 2679	if (np->flags & NIU_FLAGS_XMAC) {
 2680		reg = XMAC_ADDR_CMPEN;
 2681		mask = 1 << index;
 2682	} else {
 2683		reg = BMAC_ADDR_CMPEN;
 2684		mask = 1 << (index + 1);
 2685	}
 2686
 2687	val = nr64_mac(reg);
 2688	if (on)
 2689		val |= mask;
 2690	else
 2691		val &= ~mask;
 2692	nw64_mac(reg, val);
 2693
 2694	return 0;
 2695}
 2696
 2697static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2698				   int num, int mac_pref)
 2699{
 2700	u64 val = nr64_mac(reg);
 2701	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2702	val |= num;
 2703	if (mac_pref)
 2704		val |= HOST_INFO_MPR;
 2705	nw64_mac(reg, val);
 2706}
 2707
 2708static int __set_rdc_table_num(struct niu *np,
 2709			       int xmac_index, int bmac_index,
 2710			       int rdc_table_num, int mac_pref)
 2711{
 2712	unsigned long reg;
 2713
 2714	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2715		return -EINVAL;
 2716	if (np->flags & NIU_FLAGS_XMAC)
 2717		reg = XMAC_HOST_INFO(xmac_index);
 2718	else
 2719		reg = BMAC_HOST_INFO(bmac_index);
 2720	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2721	return 0;
 2722}
 2723
 2724static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2725					 int mac_pref)
 2726{
 2727	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2728}
 2729
 2730static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2731					   int mac_pref)
 2732{
 2733	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2734}
 2735
 2736static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2737				     int table_num, int mac_pref)
 2738{
 2739	if (idx >= niu_num_alt_addr(np))
 2740		return -EINVAL;
 2741	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2742}
 2743
 2744static u64 vlan_entry_set_parity(u64 reg_val)
 2745{
 2746	u64 port01_mask;
 2747	u64 port23_mask;
 2748
 2749	port01_mask = 0x00ff;
 2750	port23_mask = 0xff00;
 2751
 2752	if (hweight64(reg_val & port01_mask) & 1)
 2753		reg_val |= ENET_VLAN_TBL_PARITY0;
 2754	else
 2755		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2756
 2757	if (hweight64(reg_val & port23_mask) & 1)
 2758		reg_val |= ENET_VLAN_TBL_PARITY1;
 2759	else
 2760		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2761
 2762	return reg_val;
 2763}
 2764
 2765static void vlan_tbl_write(struct niu *np, unsigned long index,
 2766			   int port, int vpr, int rdc_table)
 2767{
 2768	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2769
 2770	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2771		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2772		     ENET_VLAN_TBL_SHIFT(port));
 2773	if (vpr)
 2774		reg_val |= (ENET_VLAN_TBL_VPR <<
 2775			    ENET_VLAN_TBL_SHIFT(port));
 2776	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2777
 2778	reg_val = vlan_entry_set_parity(reg_val);
 2779
 2780	nw64(ENET_VLAN_TBL(index), reg_val);
 2781}
 2782
 2783static void vlan_tbl_clear(struct niu *np)
 2784{
 2785	int i;
 2786
 2787	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2788		nw64(ENET_VLAN_TBL(i), 0);
 2789}
 2790
 2791static int tcam_wait_bit(struct niu *np, u64 bit)
 2792{
 2793	int limit = 1000;
 2794
 2795	while (--limit > 0) {
 2796		if (nr64(TCAM_CTL) & bit)
 2797			break;
 2798		udelay(1);
 2799	}
 2800	if (limit <= 0)
 2801		return -ENODEV;
 2802
 2803	return 0;
 2804}
 2805
 2806static int tcam_flush(struct niu *np, int index)
 2807{
 2808	nw64(TCAM_KEY_0, 0x00);
 2809	nw64(TCAM_KEY_MASK_0, 0xff);
 2810	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2811
 2812	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2813}
 2814
 2815#if 0
 2816static int tcam_read(struct niu *np, int index,
 2817		     u64 *key, u64 *mask)
 2818{
 2819	int err;
 2820
 2821	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2822	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2823	if (!err) {
 2824		key[0] = nr64(TCAM_KEY_0);
 2825		key[1] = nr64(TCAM_KEY_1);
 2826		key[2] = nr64(TCAM_KEY_2);
 2827		key[3] = nr64(TCAM_KEY_3);
 2828		mask[0] = nr64(TCAM_KEY_MASK_0);
 2829		mask[1] = nr64(TCAM_KEY_MASK_1);
 2830		mask[2] = nr64(TCAM_KEY_MASK_2);
 2831		mask[3] = nr64(TCAM_KEY_MASK_3);
 2832	}
 2833	return err;
 2834}
 2835#endif
 2836
 2837static int tcam_write(struct niu *np, int index,
 2838		      u64 *key, u64 *mask)
 2839{
 2840	nw64(TCAM_KEY_0, key[0]);
 2841	nw64(TCAM_KEY_1, key[1]);
 2842	nw64(TCAM_KEY_2, key[2]);
 2843	nw64(TCAM_KEY_3, key[3]);
 2844	nw64(TCAM_KEY_MASK_0, mask[0]);
 2845	nw64(TCAM_KEY_MASK_1, mask[1]);
 2846	nw64(TCAM_KEY_MASK_2, mask[2]);
 2847	nw64(TCAM_KEY_MASK_3, mask[3]);
 2848	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2849
 2850	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2851}
 2852
 2853#if 0
 2854static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2855{
 2856	int err;
 2857
 2858	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2859	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2860	if (!err)
 2861		*data = nr64(TCAM_KEY_1);
 2862
 2863	return err;
 2864}
 2865#endif
 2866
 2867static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2868{
 2869	nw64(TCAM_KEY_1, assoc_data);
 2870	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2871
 2872	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2873}
 2874
 2875static void tcam_enable(struct niu *np, int on)
 2876{
 2877	u64 val = nr64(FFLP_CFG_1);
 2878
 2879	if (on)
 2880		val &= ~FFLP_CFG_1_TCAM_DIS;
 2881	else
 2882		val |= FFLP_CFG_1_TCAM_DIS;
 2883	nw64(FFLP_CFG_1, val);
 2884}
 2885
 2886static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2887{
 2888	u64 val = nr64(FFLP_CFG_1);
 2889
 2890	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2891		 FFLP_CFG_1_CAMLAT |
 2892		 FFLP_CFG_1_CAMRATIO);
 2893	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2894	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2895	nw64(FFLP_CFG_1, val);
 2896
 2897	val = nr64(FFLP_CFG_1);
 2898	val |= FFLP_CFG_1_FFLPINITDONE;
 2899	nw64(FFLP_CFG_1, val);
 2900}
 2901
 2902static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2903				      int on)
 2904{
 2905	unsigned long reg;
 2906	u64 val;
 2907
 2908	if (class < CLASS_CODE_ETHERTYPE1 ||
 2909	    class > CLASS_CODE_ETHERTYPE2)
 2910		return -EINVAL;
 2911
 2912	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2913	val = nr64(reg);
 2914	if (on)
 2915		val |= L2_CLS_VLD;
 2916	else
 2917		val &= ~L2_CLS_VLD;
 2918	nw64(reg, val);
 2919
 2920	return 0;
 2921}
 2922
 2923#if 0
 2924static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2925				   u64 ether_type)
 2926{
 2927	unsigned long reg;
 2928	u64 val;
 2929
 2930	if (class < CLASS_CODE_ETHERTYPE1 ||
 2931	    class > CLASS_CODE_ETHERTYPE2 ||
 2932	    (ether_type & ~(u64)0xffff) != 0)
 2933		return -EINVAL;
 2934
 2935	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2936	val = nr64(reg);
 2937	val &= ~L2_CLS_ETYPE;
 2938	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2939	nw64(reg, val);
 2940
 2941	return 0;
 2942}
 2943#endif
 2944
 2945static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2946				     int on)
 2947{
 2948	unsigned long reg;
 2949	u64 val;
 2950
 2951	if (class < CLASS_CODE_USER_PROG1 ||
 2952	    class > CLASS_CODE_USER_PROG4)
 2953		return -EINVAL;
 2954
 2955	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2956	val = nr64(reg);
 2957	if (on)
 2958		val |= L3_CLS_VALID;
 2959	else
 2960		val &= ~L3_CLS_VALID;
 2961	nw64(reg, val);
 2962
 2963	return 0;
 2964}
 2965
 2966static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2967				  int ipv6, u64 protocol_id,
 2968				  u64 tos_mask, u64 tos_val)
 2969{
 2970	unsigned long reg;
 2971	u64 val;
 2972
 2973	if (class < CLASS_CODE_USER_PROG1 ||
 2974	    class > CLASS_CODE_USER_PROG4 ||
 2975	    (protocol_id & ~(u64)0xff) != 0 ||
 2976	    (tos_mask & ~(u64)0xff) != 0 ||
 2977	    (tos_val & ~(u64)0xff) != 0)
 2978		return -EINVAL;
 2979
 2980	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2981	val = nr64(reg);
 2982	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2983		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2984	if (ipv6)
 2985		val |= L3_CLS_IPVER;
 2986	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2987	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2988	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2989	nw64(reg, val);
 2990
 2991	return 0;
 2992}
 2993
 2994static int tcam_early_init(struct niu *np)
 2995{
 2996	unsigned long i;
 2997	int err;
 2998
 2999	tcam_enable(np, 0);
 3000	tcam_set_lat_and_ratio(np,
 3001			       DEFAULT_TCAM_LATENCY,
 3002			       DEFAULT_TCAM_ACCESS_RATIO);
 3003	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 3004		err = tcam_user_eth_class_enable(np, i, 0);
 3005		if (err)
 3006			return err;
 3007	}
 3008	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 3009		err = tcam_user_ip_class_enable(np, i, 0);
 3010		if (err)
 3011			return err;
 3012	}
 3013
 3014	return 0;
 3015}
 3016
 3017static int tcam_flush_all(struct niu *np)
 3018{
 3019	unsigned long i;
 3020
 3021	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3022		int err = tcam_flush(np, i);
 3023		if (err)
 3024			return err;
 3025	}
 3026	return 0;
 3027}
 3028
 3029static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3030{
 3031	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3032}
 3033
 3034#if 0
 3035static int hash_read(struct niu *np, unsigned long partition,
 3036		     unsigned long index, unsigned long num_entries,
 3037		     u64 *data)
 3038{
 3039	u64 val = hash_addr_regval(index, num_entries);
 3040	unsigned long i;
 3041
 3042	if (partition >= FCRAM_NUM_PARTITIONS ||
 3043	    index + num_entries > FCRAM_SIZE)
 3044		return -EINVAL;
 3045
 3046	nw64(HASH_TBL_ADDR(partition), val);
 3047	for (i = 0; i < num_entries; i++)
 3048		data[i] = nr64(HASH_TBL_DATA(partition));
 3049
 3050	return 0;
 3051}
 3052#endif
 3053
 3054static int hash_write(struct niu *np, unsigned long partition,
 3055		      unsigned long index, unsigned long num_entries,
 3056		      u64 *data)
 3057{
 3058	u64 val = hash_addr_regval(index, num_entries);
 3059	unsigned long i;
 3060
 3061	if (partition >= FCRAM_NUM_PARTITIONS ||
 3062	    index + (num_entries * 8) > FCRAM_SIZE)
 3063		return -EINVAL;
 3064
 3065	nw64(HASH_TBL_ADDR(partition), val);
 3066	for (i = 0; i < num_entries; i++)
 3067		nw64(HASH_TBL_DATA(partition), data[i]);
 3068
 3069	return 0;
 3070}
 3071
 3072static void fflp_reset(struct niu *np)
 3073{
 3074	u64 val;
 3075
 3076	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3077	udelay(10);
 3078	nw64(FFLP_CFG_1, 0);
 3079
 3080	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3081	nw64(FFLP_CFG_1, val);
 3082}
 3083
 3084static void fflp_set_timings(struct niu *np)
 3085{
 3086	u64 val = nr64(FFLP_CFG_1);
 3087
 3088	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3089	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3090	nw64(FFLP_CFG_1, val);
 3091
 3092	val = nr64(FFLP_CFG_1);
 3093	val |= FFLP_CFG_1_FFLPINITDONE;
 3094	nw64(FFLP_CFG_1, val);
 3095
 3096	val = nr64(FCRAM_REF_TMR);
 3097	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3098	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3099	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3100	nw64(FCRAM_REF_TMR, val);
 3101}
 3102
 3103static int fflp_set_partition(struct niu *np, u64 partition,
 3104			      u64 mask, u64 base, int enable)
 3105{
 3106	unsigned long reg;
 3107	u64 val;
 3108
 3109	if (partition >= FCRAM_NUM_PARTITIONS ||
 3110	    (mask & ~(u64)0x1f) != 0 ||
 3111	    (base & ~(u64)0x1f) != 0)
 3112		return -EINVAL;
 3113
 3114	reg = FLW_PRT_SEL(partition);
 3115
 3116	val = nr64(reg);
 3117	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3118	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3119	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3120	if (enable)
 3121		val |= FLW_PRT_SEL_EXT;
 3122	nw64(reg, val);
 3123
 3124	return 0;
 3125}
 3126
 3127static int fflp_disable_all_partitions(struct niu *np)
 3128{
 3129	unsigned long i;
 3130
 3131	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3132		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3133		if (err)
 3134			return err;
 3135	}
 3136	return 0;
 3137}
 3138
 3139static void fflp_llcsnap_enable(struct niu *np, int on)
 3140{
 3141	u64 val = nr64(FFLP_CFG_1);
 3142
 3143	if (on)
 3144		val |= FFLP_CFG_1_LLCSNAP;
 3145	else
 3146		val &= ~FFLP_CFG_1_LLCSNAP;
 3147	nw64(FFLP_CFG_1, val);
 3148}
 3149
 3150static void fflp_errors_enable(struct niu *np, int on)
 3151{
 3152	u64 val = nr64(FFLP_CFG_1);
 3153
 3154	if (on)
 3155		val &= ~FFLP_CFG_1_ERRORDIS;
 3156	else
 3157		val |= FFLP_CFG_1_ERRORDIS;
 3158	nw64(FFLP_CFG_1, val);
 3159}
 3160
 3161static int fflp_hash_clear(struct niu *np)
 3162{
 3163	struct fcram_hash_ipv4 ent;
 3164	unsigned long i;
 3165
 3166	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3167	memset(&ent, 0, sizeof(ent));
 3168	ent.header = HASH_HEADER_EXT;
 3169
 3170	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3171		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3172		if (err)
 3173			return err;
 3174	}
 3175	return 0;
 3176}
 3177
 3178static int fflp_early_init(struct niu *np)
 3179{
 3180	struct niu_parent *parent;
 3181	unsigned long flags;
 3182	int err;
 3183
 3184	niu_lock_parent(np, flags);
 3185
 3186	parent = np->parent;
 3187	err = 0;
 3188	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3189		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3190			fflp_reset(np);
 3191			fflp_set_timings(np);
 3192			err = fflp_disable_all_partitions(np);
 3193			if (err) {
 3194				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3195					     "fflp_disable_all_partitions failed, err=%d\n",
 3196					     err);
 3197				goto out;
 3198			}
 3199		}
 3200
 3201		err = tcam_early_init(np);
 3202		if (err) {
 3203			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3204				     "tcam_early_init failed, err=%d\n", err);
 3205			goto out;
 3206		}
 3207		fflp_llcsnap_enable(np, 1);
 3208		fflp_errors_enable(np, 0);
 3209		nw64(H1POLY, 0);
 3210		nw64(H2POLY, 0);
 3211
 3212		err = tcam_flush_all(np);
 3213		if (err) {
 3214			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3215				     "tcam_flush_all failed, err=%d\n", err);
 3216			goto out;
 3217		}
 3218		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3219			err = fflp_hash_clear(np);
 3220			if (err) {
 3221				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3222					     "fflp_hash_clear failed, err=%d\n",
 3223					     err);
 3224				goto out;
 3225			}
 3226		}
 3227
 3228		vlan_tbl_clear(np);
 3229
 3230		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3231	}
 3232out:
 3233	niu_unlock_parent(np, flags);
 3234	return err;
 3235}
 3236
 3237static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3238{
 3239	if (class_code < CLASS_CODE_USER_PROG1 ||
 3240	    class_code > CLASS_CODE_SCTP_IPV6)
 3241		return -EINVAL;
 3242
 3243	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3244	return 0;
 3245}
 3246
 3247static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3248{
 3249	if (class_code < CLASS_CODE_USER_PROG1 ||
 3250	    class_code > CLASS_CODE_SCTP_IPV6)
 3251		return -EINVAL;
 3252
 3253	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3254	return 0;
 3255}
 3256
 3257/* Entries for the ports are interleaved in the TCAM */
 3258static u16 tcam_get_index(struct niu *np, u16 idx)
 3259{
 3260	/* One entry reserved for IP fragment rule */
 3261	if (idx >= (np->clas.tcam_sz - 1))
 3262		idx = 0;
 3263	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3264}
 3265
 3266static u16 tcam_get_size(struct niu *np)
 3267{
 3268	/* One entry reserved for IP fragment rule */
 3269	return np->clas.tcam_sz - 1;
 3270}
 3271
 3272static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3273{
 3274	/* One entry reserved for IP fragment rule */
 3275	return np->clas.tcam_valid_entries - 1;
 3276}
 3277
 3278static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3279			      u32 offset, u32 size, u32 truesize)
 3280{
 3281	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
 3282
 3283	skb->len += size;
 3284	skb->data_len += size;
 3285	skb->truesize += truesize;
 3286}
 3287
 3288static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3289{
 3290	a >>= PAGE_SHIFT;
 3291	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3292
 3293	return a & (MAX_RBR_RING_SIZE - 1);
 3294}
 3295
 3296static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3297				    struct page ***link)
 3298{
 3299	unsigned int h = niu_hash_rxaddr(rp, addr);
 3300	struct page *p, **pp;
 3301
 3302	addr &= PAGE_MASK;
 3303	pp = &rp->rxhash[h];
 3304	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3305		if (p->index == addr) {
 3306			*link = pp;
 3307			goto found;
 3308		}
 3309	}
 3310	BUG();
 3311
 3312found:
 3313	return p;
 3314}
 3315
 3316static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3317{
 3318	unsigned int h = niu_hash_rxaddr(rp, base);
 3319
 3320	page->index = base;
 3321	page->mapping = (struct address_space *) rp->rxhash[h];
 3322	rp->rxhash[h] = page;
 3323}
 3324
 3325static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3326			    gfp_t mask, int start_index)
 3327{
 3328	struct page *page;
 3329	u64 addr;
 3330	int i;
 3331
 3332	page = alloc_page(mask);
 3333	if (!page)
 3334		return -ENOMEM;
 3335
 3336	addr = np->ops->map_page(np->device, page, 0,
 3337				 PAGE_SIZE, DMA_FROM_DEVICE);
 3338	if (!addr) {
 3339		__free_page(page);
 3340		return -ENOMEM;
 3341	}
 3342
 3343	niu_hash_page(rp, page, addr);
 3344	if (rp->rbr_blocks_per_page > 1)
 3345		page_ref_add(page, rp->rbr_blocks_per_page - 1);
 3346
 3347	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3348		__le32 *rbr = &rp->rbr[start_index + i];
 3349
 3350		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3351		addr += rp->rbr_block_size;
 3352	}
 3353
 3354	return 0;
 3355}
 3356
 3357static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3358{
 3359	int index = rp->rbr_index;
 3360
 3361	rp->rbr_pending++;
 3362	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3363		int err = niu_rbr_add_page(np, rp, mask, index);
 3364
 3365		if (unlikely(err)) {
 3366			rp->rbr_pending--;
 3367			return;
 3368		}
 3369
 3370		rp->rbr_index += rp->rbr_blocks_per_page;
 3371		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3372		if (rp->rbr_index == rp->rbr_table_size)
 3373			rp->rbr_index = 0;
 3374
 3375		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3376			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3377			rp->rbr_pending = 0;
 3378		}
 3379	}
 3380}
 3381
 3382static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3383{
 3384	unsigned int index = rp->rcr_index;
 3385	int num_rcr = 0;
 3386
 3387	rp->rx_dropped++;
 3388	while (1) {
 3389		struct page *page, **link;
 3390		u64 addr, val;
 3391		u32 rcr_size;
 3392
 3393		num_rcr++;
 3394
 3395		val = le64_to_cpup(&rp->rcr[index]);
 3396		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3397			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3398		page = niu_find_rxpage(rp, addr, &link);
 3399
 3400		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3401					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3402		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3403			*link = (struct page *) page->mapping;
 3404			np->ops->unmap_page(np->device, page->index,
 3405					    PAGE_SIZE, DMA_FROM_DEVICE);
 3406			page->index = 0;
 3407			page->mapping = NULL;
 3408			__free_page(page);
 3409			rp->rbr_refill_pending++;
 3410		}
 3411
 3412		index = NEXT_RCR(rp, index);
 3413		if (!(val & RCR_ENTRY_MULTI))
 3414			break;
 3415
 3416	}
 3417	rp->rcr_index = index;
 3418
 3419	return num_rcr;
 3420}
 3421
 3422static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3423			      struct rx_ring_info *rp)
 3424{
 3425	unsigned int index = rp->rcr_index;
 3426	struct rx_pkt_hdr1 *rh;
 3427	struct sk_buff *skb;
 3428	int len, num_rcr;
 3429
 3430	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3431	if (unlikely(!skb))
 3432		return niu_rx_pkt_ignore(np, rp);
 3433
 3434	num_rcr = 0;
 3435	while (1) {
 3436		struct page *page, **link;
 3437		u32 rcr_size, append_size;
 3438		u64 addr, val, off;
 3439
 3440		num_rcr++;
 3441
 3442		val = le64_to_cpup(&rp->rcr[index]);
 3443
 3444		len = (val & RCR_ENTRY_L2_LEN) >>
 3445			RCR_ENTRY_L2_LEN_SHIFT;
 3446		append_size = len + ETH_HLEN + ETH_FCS_LEN;
 3447
 3448		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3449			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3450		page = niu_find_rxpage(rp, addr, &link);
 3451
 3452		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3453					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3454
 3455		off = addr & ~PAGE_MASK;
 3456		if (num_rcr == 1) {
 3457			int ptype;
 3458
 3459			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3460			if ((ptype == RCR_PKT_TYPE_TCP ||
 3461			     ptype == RCR_PKT_TYPE_UDP) &&
 3462			    !(val & (RCR_ENTRY_NOPORT |
 3463				     RCR_ENTRY_ERROR)))
 3464				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3465			else
 3466				skb_checksum_none_assert(skb);
 3467		} else if (!(val & RCR_ENTRY_MULTI))
 3468			append_size = append_size - skb->len;
 3469
 3470		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 3471		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3472			*link = (struct page *) page->mapping;
 3473			np->ops->unmap_page(np->device, page->index,
 3474					    PAGE_SIZE, DMA_FROM_DEVICE);
 3475			page->index = 0;
 3476			page->mapping = NULL;
 3477			rp->rbr_refill_pending++;
 3478		} else
 3479			get_page(page);
 3480
 3481		index = NEXT_RCR(rp, index);
 3482		if (!(val & RCR_ENTRY_MULTI))
 3483			break;
 3484
 3485	}
 3486	rp->rcr_index = index;
 3487
 3488	len += sizeof(*rh);
 3489	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3490	__pskb_pull_tail(skb, len);
 3491
 3492	rh = (struct rx_pkt_hdr1 *) skb->data;
 3493	if (np->dev->features & NETIF_F_RXHASH)
 3494		skb_set_hash(skb,
 3495			     ((u32)rh->hashval2_0 << 24 |
 3496			      (u32)rh->hashval2_1 << 16 |
 3497			      (u32)rh->hashval1_1 << 8 |
 3498			      (u32)rh->hashval1_2 << 0),
 3499			     PKT_HASH_TYPE_L3);
 3500	skb_pull(skb, sizeof(*rh));
 3501
 3502	rp->rx_packets++;
 3503	rp->rx_bytes += skb->len;
 3504
 3505	skb->protocol = eth_type_trans(skb, np->dev);
 3506	skb_record_rx_queue(skb, rp->rx_channel);
 3507	napi_gro_receive(napi, skb);
 3508
 3509	return num_rcr;
 3510}
 3511
 3512static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3513{
 3514	int blocks_per_page = rp->rbr_blocks_per_page;
 3515	int err, index = rp->rbr_index;
 3516
 3517	err = 0;
 3518	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3519		err = niu_rbr_add_page(np, rp, mask, index);
 3520		if (unlikely(err))
 3521			break;
 3522
 3523		index += blocks_per_page;
 3524	}
 3525
 3526	rp->rbr_index = index;
 3527	return err;
 3528}
 3529
 3530static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3531{
 3532	int i;
 3533
 3534	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3535		struct page *page;
 3536
 3537		page = rp->rxhash[i];
 3538		while (page) {
 3539			struct page *next = (struct page *) page->mapping;
 3540			u64 base = page->index;
 3541
 3542			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3543					    DMA_FROM_DEVICE);
 3544			page->index = 0;
 3545			page->mapping = NULL;
 3546
 3547			__free_page(page);
 3548
 3549			page = next;
 3550		}
 3551	}
 3552
 3553	for (i = 0; i < rp->rbr_table_size; i++)
 3554		rp->rbr[i] = cpu_to_le32(0);
 3555	rp->rbr_index = 0;
 3556}
 3557
 3558static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3559{
 3560	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3561	struct sk_buff *skb = tb->skb;
 3562	struct tx_pkt_hdr *tp;
 3563	u64 tx_flags;
 3564	int i, len;
 3565
 3566	tp = (struct tx_pkt_hdr *) skb->data;
 3567	tx_flags = le64_to_cpup(&tp->flags);
 3568
 3569	rp->tx_packets++;
 3570	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3571			 ((tx_flags & TXHDR_PAD) / 2));
 3572
 3573	len = skb_headlen(skb);
 3574	np->ops->unmap_single(np->device, tb->mapping,
 3575			      len, DMA_TO_DEVICE);
 3576
 3577	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3578		rp->mark_pending--;
 3579
 3580	tb->skb = NULL;
 3581	do {
 3582		idx = NEXT_TX(rp, idx);
 3583		len -= MAX_TX_DESC_LEN;
 3584	} while (len > 0);
 3585
 3586	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3587		tb = &rp->tx_buffs[idx];
 3588		BUG_ON(tb->skb != NULL);
 3589		np->ops->unmap_page(np->device, tb->mapping,
 3590				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
 3591				    DMA_TO_DEVICE);
 3592		idx = NEXT_TX(rp, idx);
 3593	}
 3594
 3595	dev_kfree_skb(skb);
 3596
 3597	return idx;
 3598}
 3599
 3600#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3601
 3602static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3603{
 3604	struct netdev_queue *txq;
 3605	u16 pkt_cnt, tmp;
 3606	int cons, index;
 3607	u64 cs;
 3608
 3609	index = (rp - np->tx_rings);
 3610	txq = netdev_get_tx_queue(np->dev, index);
 3611
 3612	cs = rp->tx_cs;
 3613	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3614		goto out;
 3615
 3616	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3617	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3618		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3619
 3620	rp->last_pkt_cnt = tmp;
 3621
 3622	cons = rp->cons;
 3623
 3624	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3625		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3626
 3627	while (pkt_cnt--)
 3628		cons = release_tx_packet(np, rp, cons);
 3629
 3630	rp->cons = cons;
 3631	smp_mb();
 3632
 3633out:
 3634	if (unlikely(netif_tx_queue_stopped(txq) &&
 3635		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3636		__netif_tx_lock(txq, smp_processor_id());
 3637		if (netif_tx_queue_stopped(txq) &&
 3638		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3639			netif_tx_wake_queue(txq);
 3640		__netif_tx_unlock(txq);
 3641	}
 3642}
 3643
 3644static inline void niu_sync_rx_discard_stats(struct niu *np,
 3645					     struct rx_ring_info *rp,
 3646					     const int limit)
 3647{
 3648	/* This elaborate scheme is needed for reading the RX discard
 3649	 * counters, as they are only 16-bit and can overflow quickly,
 3650	 * and because the overflow indication bit is not usable as
 3651	 * the counter value does not wrap, but remains at max value
 3652	 * 0xFFFF.
 3653	 *
 3654	 * In theory and in practice counters can be lost in between
 3655	 * reading nr64() and clearing the counter nw64().  For this
 3656	 * reason, the number of counter clearings nw64() is
 3657	 * limited/reduced though the limit parameter.
 3658	 */
 3659	int rx_channel = rp->rx_channel;
 3660	u32 misc, wred;
 3661
 3662	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3663	 * following discard events: IPP (Input Port Process),
 3664	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3665	 * Block Ring) prefetch buffer is empty.
 3666	 */
 3667	misc = nr64(RXMISC(rx_channel));
 3668	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3669		nw64(RXMISC(rx_channel), 0);
 3670		rp->rx_errors += misc & RXMISC_COUNT;
 3671
 3672		if (unlikely(misc & RXMISC_OFLOW))
 3673			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3674				rx_channel);
 3675
 3676		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3677			     "rx-%d: MISC drop=%u over=%u\n",
 3678			     rx_channel, misc, misc-limit);
 3679	}
 3680
 3681	/* WRED (Weighted Random Early Discard) by hardware */
 3682	wred = nr64(RED_DIS_CNT(rx_channel));
 3683	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3684		nw64(RED_DIS_CNT(rx_channel), 0);
 3685		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3686
 3687		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3688			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3689
 3690		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3691			     "rx-%d: WRED drop=%u over=%u\n",
 3692			     rx_channel, wred, wred-limit);
 3693	}
 3694}
 3695
 3696static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3697		       struct rx_ring_info *rp, int budget)
 3698{
 3699	int qlen, rcr_done = 0, work_done = 0;
 3700	struct rxdma_mailbox *mbox = rp->mbox;
 3701	u64 stat;
 3702
 3703#if 1
 3704	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3705	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3706#else
 3707	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3708	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3709#endif
 3710	mbox->rx_dma_ctl_stat = 0;
 3711	mbox->rcrstat_a = 0;
 3712
 3713	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3714		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3715		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3716
 3717	rcr_done = work_done = 0;
 3718	qlen = min(qlen, budget);
 3719	while (work_done < qlen) {
 3720		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3721		work_done++;
 3722	}
 3723
 3724	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3725		unsigned int i;
 3726
 3727		for (i = 0; i < rp->rbr_refill_pending; i++)
 3728			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3729		rp->rbr_refill_pending = 0;
 3730	}
 3731
 3732	stat = (RX_DMA_CTL_STAT_MEX |
 3733		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3734		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3735
 3736	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3737
 3738	/* Only sync discards stats when qlen indicate potential for drops */
 3739	if (qlen > 10)
 3740		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3741
 3742	return work_done;
 3743}
 3744
 3745static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3746{
 3747	u64 v0 = lp->v0;
 3748	u32 tx_vec = (v0 >> 32);
 3749	u32 rx_vec = (v0 & 0xffffffff);
 3750	int i, work_done = 0;
 3751
 3752	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3753		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3754
 3755	for (i = 0; i < np->num_tx_rings; i++) {
 3756		struct tx_ring_info *rp = &np->tx_rings[i];
 3757		if (tx_vec & (1 << rp->tx_channel))
 3758			niu_tx_work(np, rp);
 3759		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3760	}
 3761
 3762	for (i = 0; i < np->num_rx_rings; i++) {
 3763		struct rx_ring_info *rp = &np->rx_rings[i];
 3764
 3765		if (rx_vec & (1 << rp->rx_channel)) {
 3766			int this_work_done;
 3767
 3768			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3769						     budget);
 3770
 3771			budget -= this_work_done;
 3772			work_done += this_work_done;
 3773		}
 3774		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3775	}
 3776
 3777	return work_done;
 3778}
 3779
 3780static int niu_poll(struct napi_struct *napi, int budget)
 3781{
 3782	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3783	struct niu *np = lp->np;
 3784	int work_done;
 3785
 3786	work_done = niu_poll_core(np, lp, budget);
 3787
 3788	if (work_done < budget) {
 3789		napi_complete_done(napi, work_done);
 3790		niu_ldg_rearm(np, lp, 1);
 3791	}
 3792	return work_done;
 3793}
 3794
 3795static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3796				  u64 stat)
 3797{
 3798	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3799
 3800	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3801		pr_cont("RBR_TMOUT ");
 3802	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3803		pr_cont("RSP_CNT ");
 3804	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3805		pr_cont("BYTE_EN_BUS ");
 3806	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3807		pr_cont("RSP_DAT ");
 3808	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3809		pr_cont("RCR_ACK ");
 3810	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3811		pr_cont("RCR_SHA_PAR ");
 3812	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3813		pr_cont("RBR_PRE_PAR ");
 3814	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3815		pr_cont("CONFIG ");
 3816	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3817		pr_cont("RCRINCON ");
 3818	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3819		pr_cont("RCRFULL ");
 3820	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3821		pr_cont("RBRFULL ");
 3822	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3823		pr_cont("RBRLOGPAGE ");
 3824	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3825		pr_cont("CFIGLOGPAGE ");
 3826	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3827		pr_cont("DC_FIDO ");
 3828
 3829	pr_cont(")\n");
 3830}
 3831
 3832static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3833{
 3834	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3835	int err = 0;
 3836
 3837
 3838	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3839		    RX_DMA_CTL_STAT_PORT_FATAL))
 3840		err = -EINVAL;
 3841
 3842	if (err) {
 3843		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3844			   rp->rx_channel,
 3845			   (unsigned long long) stat);
 3846
 3847		niu_log_rxchan_errors(np, rp, stat);
 3848	}
 3849
 3850	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3851	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3852
 3853	return err;
 3854}
 3855
 3856static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3857				  u64 cs)
 3858{
 3859	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3860
 3861	if (cs & TX_CS_MBOX_ERR)
 3862		pr_cont("MBOX ");
 3863	if (cs & TX_CS_PKT_SIZE_ERR)
 3864		pr_cont("PKT_SIZE ");
 3865	if (cs & TX_CS_TX_RING_OFLOW)
 3866		pr_cont("TX_RING_OFLOW ");
 3867	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3868		pr_cont("PREF_BUF_PAR ");
 3869	if (cs & TX_CS_NACK_PREF)
 3870		pr_cont("NACK_PREF ");
 3871	if (cs & TX_CS_NACK_PKT_RD)
 3872		pr_cont("NACK_PKT_RD ");
 3873	if (cs & TX_CS_CONF_PART_ERR)
 3874		pr_cont("CONF_PART ");
 3875	if (cs & TX_CS_PKT_PRT_ERR)
 3876		pr_cont("PKT_PTR ");
 3877
 3878	pr_cont(")\n");
 3879}
 3880
 3881static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3882{
 3883	u64 cs, logh, logl;
 3884
 3885	cs = nr64(TX_CS(rp->tx_channel));
 3886	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3887	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3888
 3889	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3890		   rp->tx_channel,
 3891		   (unsigned long long)cs,
 3892		   (unsigned long long)logh,
 3893		   (unsigned long long)logl);
 3894
 3895	niu_log_txchan_errors(np, rp, cs);
 3896
 3897	return -ENODEV;
 3898}
 3899
 3900static int niu_mif_interrupt(struct niu *np)
 3901{
 3902	u64 mif_status = nr64(MIF_STATUS);
 3903	int phy_mdint = 0;
 3904
 3905	if (np->flags & NIU_FLAGS_XMAC) {
 3906		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3907
 3908		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3909			phy_mdint = 1;
 3910	}
 3911
 3912	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3913		   (unsigned long long)mif_status, phy_mdint);
 3914
 3915	return -ENODEV;
 3916}
 3917
 3918static void niu_xmac_interrupt(struct niu *np)
 3919{
 3920	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3921	u64 val;
 3922
 3923	val = nr64_mac(XTXMAC_STATUS);
 3924	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3925		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3926	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3927		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3928	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3929		mp->tx_fifo_errors++;
 3930	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3931		mp->tx_overflow_errors++;
 3932	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3933		mp->tx_max_pkt_size_errors++;
 3934	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3935		mp->tx_underflow_errors++;
 3936
 3937	val = nr64_mac(XRXMAC_STATUS);
 3938	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3939		mp->rx_local_faults++;
 3940	if (val & XRXMAC_STATUS_RFLT_DET)
 3941		mp->rx_remote_faults++;
 3942	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3943		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3944	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3945		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3946	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3947		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3948	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3949		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3950	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3951		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3952	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3953		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3954	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3955		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3956	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3957		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3958	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3959		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3960	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3961		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3962	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3963		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3964	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3965		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3966	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3967		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3968	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3969		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3970	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3971		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3972	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3973		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3974	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3975		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3976	if (val & XRXMAC_STATUS_RXUFLOW)
 3977		mp->rx_underflows++;
 3978	if (val & XRXMAC_STATUS_RXOFLOW)
 3979		mp->rx_overflows++;
 3980
 3981	val = nr64_mac(XMAC_FC_STAT);
 3982	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3983		mp->pause_off_state++;
 3984	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3985		mp->pause_on_state++;
 3986	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 3987		mp->pause_received++;
 3988}
 3989
 3990static void niu_bmac_interrupt(struct niu *np)
 3991{
 3992	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 3993	u64 val;
 3994
 3995	val = nr64_mac(BTXMAC_STATUS);
 3996	if (val & BTXMAC_STATUS_UNDERRUN)
 3997		mp->tx_underflow_errors++;
 3998	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 3999		mp->tx_max_pkt_size_errors++;
 4000	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 4001		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 4002	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 4003		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 4004
 4005	val = nr64_mac(BRXMAC_STATUS);
 4006	if (val & BRXMAC_STATUS_OVERFLOW)
 4007		mp->rx_overflows++;
 4008	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 4009		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 4010	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 4011		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4012	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 4013		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4014	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 4015		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 4016
 4017	val = nr64_mac(BMAC_CTRL_STATUS);
 4018	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4019		mp->pause_off_state++;
 4020	if (val & BMAC_CTRL_STATUS_PAUSE)
 4021		mp->pause_on_state++;
 4022	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4023		mp->pause_received++;
 4024}
 4025
 4026static int niu_mac_interrupt(struct niu *np)
 4027{
 4028	if (np->flags & NIU_FLAGS_XMAC)
 4029		niu_xmac_interrupt(np);
 4030	else
 4031		niu_bmac_interrupt(np);
 4032
 4033	return 0;
 4034}
 4035
 4036static void niu_log_device_error(struct niu *np, u64 stat)
 4037{
 4038	netdev_err(np->dev, "Core device errors ( ");
 4039
 4040	if (stat & SYS_ERR_MASK_META2)
 4041		pr_cont("META2 ");
 4042	if (stat & SYS_ERR_MASK_META1)
 4043		pr_cont("META1 ");
 4044	if (stat & SYS_ERR_MASK_PEU)
 4045		pr_cont("PEU ");
 4046	if (stat & SYS_ERR_MASK_TXC)
 4047		pr_cont("TXC ");
 4048	if (stat & SYS_ERR_MASK_RDMC)
 4049		pr_cont("RDMC ");
 4050	if (stat & SYS_ERR_MASK_TDMC)
 4051		pr_cont("TDMC ");
 4052	if (stat & SYS_ERR_MASK_ZCP)
 4053		pr_cont("ZCP ");
 4054	if (stat & SYS_ERR_MASK_FFLP)
 4055		pr_cont("FFLP ");
 4056	if (stat & SYS_ERR_MASK_IPP)
 4057		pr_cont("IPP ");
 4058	if (stat & SYS_ERR_MASK_MAC)
 4059		pr_cont("MAC ");
 4060	if (stat & SYS_ERR_MASK_SMX)
 4061		pr_cont("SMX ");
 4062
 4063	pr_cont(")\n");
 4064}
 4065
 4066static int niu_device_error(struct niu *np)
 4067{
 4068	u64 stat = nr64(SYS_ERR_STAT);
 4069
 4070	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4071		   (unsigned long long)stat);
 4072
 4073	niu_log_device_error(np, stat);
 4074
 4075	return -ENODEV;
 4076}
 4077
 4078static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4079			      u64 v0, u64 v1, u64 v2)
 4080{
 4081
 4082	int i, err = 0;
 4083
 4084	lp->v0 = v0;
 4085	lp->v1 = v1;
 4086	lp->v2 = v2;
 4087
 4088	if (v1 & 0x00000000ffffffffULL) {
 4089		u32 rx_vec = (v1 & 0xffffffff);
 4090
 4091		for (i = 0; i < np->num_rx_rings; i++) {
 4092			struct rx_ring_info *rp = &np->rx_rings[i];
 4093
 4094			if (rx_vec & (1 << rp->rx_channel)) {
 4095				int r = niu_rx_error(np, rp);
 4096				if (r) {
 4097					err = r;
 4098				} else {
 4099					if (!v0)
 4100						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4101						     RX_DMA_CTL_STAT_MEX);
 4102				}
 4103			}
 4104		}
 4105	}
 4106	if (v1 & 0x7fffffff00000000ULL) {
 4107		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4108
 4109		for (i = 0; i < np->num_tx_rings; i++) {
 4110			struct tx_ring_info *rp = &np->tx_rings[i];
 4111
 4112			if (tx_vec & (1 << rp->tx_channel)) {
 4113				int r = niu_tx_error(np, rp);
 4114				if (r)
 4115					err = r;
 4116			}
 4117		}
 4118	}
 4119	if ((v0 | v1) & 0x8000000000000000ULL) {
 4120		int r = niu_mif_interrupt(np);
 4121		if (r)
 4122			err = r;
 4123	}
 4124	if (v2) {
 4125		if (v2 & 0x01ef) {
 4126			int r = niu_mac_interrupt(np);
 4127			if (r)
 4128				err = r;
 4129		}
 4130		if (v2 & 0x0210) {
 4131			int r = niu_device_error(np);
 4132			if (r)
 4133				err = r;
 4134		}
 4135	}
 4136
 4137	if (err)
 4138		niu_enable_interrupts(np, 0);
 4139
 4140	return err;
 4141}
 4142
 4143static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4144			    int ldn)
 4145{
 4146	struct rxdma_mailbox *mbox = rp->mbox;
 4147	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4148
 4149	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4150		      RX_DMA_CTL_STAT_RCRTO);
 4151	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4152
 4153	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4154		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4155}
 4156
 4157static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4158			    int ldn)
 4159{
 4160	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4161
 4162	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4163		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4164}
 4165
 4166static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4167{
 4168	struct niu_parent *parent = np->parent;
 4169	u32 rx_vec, tx_vec;
 4170	int i;
 4171
 4172	tx_vec = (v0 >> 32);
 4173	rx_vec = (v0 & 0xffffffff);
 4174
 4175	for (i = 0; i < np->num_rx_rings; i++) {
 4176		struct rx_ring_info *rp = &np->rx_rings[i];
 4177		int ldn = LDN_RXDMA(rp->rx_channel);
 4178
 4179		if (parent->ldg_map[ldn] != ldg)
 4180			continue;
 4181
 4182		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4183		if (rx_vec & (1 << rp->rx_channel))
 4184			niu_rxchan_intr(np, rp, ldn);
 4185	}
 4186
 4187	for (i = 0; i < np->num_tx_rings; i++) {
 4188		struct tx_ring_info *rp = &np->tx_rings[i];
 4189		int ldn = LDN_TXDMA(rp->tx_channel);
 4190
 4191		if (parent->ldg_map[ldn] != ldg)
 4192			continue;
 4193
 4194		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4195		if (tx_vec & (1 << rp->tx_channel))
 4196			niu_txchan_intr(np, rp, ldn);
 4197	}
 4198}
 4199
 4200static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4201			      u64 v0, u64 v1, u64 v2)
 4202{
 4203	if (likely(napi_schedule_prep(&lp->napi))) {
 4204		lp->v0 = v0;
 4205		lp->v1 = v1;
 4206		lp->v2 = v2;
 4207		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4208		__napi_schedule(&lp->napi);
 4209	}
 4210}
 4211
 4212static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4213{
 4214	struct niu_ldg *lp = dev_id;
 4215	struct niu *np = lp->np;
 4216	int ldg = lp->ldg_num;
 4217	unsigned long flags;
 4218	u64 v0, v1, v2;
 4219
 4220	if (netif_msg_intr(np))
 4221		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4222		       __func__, lp, ldg);
 4223
 4224	spin_lock_irqsave(&np->lock, flags);
 4225
 4226	v0 = nr64(LDSV0(ldg));
 4227	v1 = nr64(LDSV1(ldg));
 4228	v2 = nr64(LDSV2(ldg));
 4229
 4230	if (netif_msg_intr(np))
 4231		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4232		       (unsigned long long) v0,
 4233		       (unsigned long long) v1,
 4234		       (unsigned long long) v2);
 4235
 4236	if (unlikely(!v0 && !v1 && !v2)) {
 4237		spin_unlock_irqrestore(&np->lock, flags);
 4238		return IRQ_NONE;
 4239	}
 4240
 4241	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4242		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4243		if (err)
 4244			goto out;
 4245	}
 4246	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4247		niu_schedule_napi(np, lp, v0, v1, v2);
 4248	else
 4249		niu_ldg_rearm(np, lp, 1);
 4250out:
 4251	spin_unlock_irqrestore(&np->lock, flags);
 4252
 4253	return IRQ_HANDLED;
 4254}
 4255
 4256static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4257{
 4258	if (rp->mbox) {
 4259		np->ops->free_coherent(np->device,
 4260				       sizeof(struct rxdma_mailbox),
 4261				       rp->mbox, rp->mbox_dma);
 4262		rp->mbox = NULL;
 4263	}
 4264	if (rp->rcr) {
 4265		np->ops->free_coherent(np->device,
 4266				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4267				       rp->rcr, rp->rcr_dma);
 4268		rp->rcr = NULL;
 4269		rp->rcr_table_size = 0;
 4270		rp->rcr_index = 0;
 4271	}
 4272	if (rp->rbr) {
 4273		niu_rbr_free(np, rp);
 4274
 4275		np->ops->free_coherent(np->device,
 4276				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4277				       rp->rbr, rp->rbr_dma);
 4278		rp->rbr = NULL;
 4279		rp->rbr_table_size = 0;
 4280		rp->rbr_index = 0;
 4281	}
 4282	kfree(rp->rxhash);
 4283	rp->rxhash = NULL;
 4284}
 4285
 4286static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4287{
 4288	if (rp->mbox) {
 4289		np->ops->free_coherent(np->device,
 4290				       sizeof(struct txdma_mailbox),
 4291				       rp->mbox, rp->mbox_dma);
 4292		rp->mbox = NULL;
 4293	}
 4294	if (rp->descr) {
 4295		int i;
 4296
 4297		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4298			if (rp->tx_buffs[i].skb)
 4299				(void) release_tx_packet(np, rp, i);
 4300		}
 4301
 4302		np->ops->free_coherent(np->device,
 4303				       MAX_TX_RING_SIZE * sizeof(__le64),
 4304				       rp->descr, rp->descr_dma);
 4305		rp->descr = NULL;
 4306		rp->pending = 0;
 4307		rp->prod = 0;
 4308		rp->cons = 0;
 4309		rp->wrap_bit = 0;
 4310	}
 4311}
 4312
 4313static void niu_free_channels(struct niu *np)
 4314{
 4315	int i;
 4316
 4317	if (np->rx_rings) {
 4318		for (i = 0; i < np->num_rx_rings; i++) {
 4319			struct rx_ring_info *rp = &np->rx_rings[i];
 4320
 4321			niu_free_rx_ring_info(np, rp);
 4322		}
 4323		kfree(np->rx_rings);
 4324		np->rx_rings = NULL;
 4325		np->num_rx_rings = 0;
 4326	}
 4327
 4328	if (np->tx_rings) {
 4329		for (i = 0; i < np->num_tx_rings; i++) {
 4330			struct tx_ring_info *rp = &np->tx_rings[i];
 4331
 4332			niu_free_tx_ring_info(np, rp);
 4333		}
 4334		kfree(np->tx_rings);
 4335		np->tx_rings = NULL;
 4336		np->num_tx_rings = 0;
 4337	}
 4338}
 4339
 4340static int niu_alloc_rx_ring_info(struct niu *np,
 4341				  struct rx_ring_info *rp)
 4342{
 4343	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4344
 4345	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
 4346			     GFP_KERNEL);
 4347	if (!rp->rxhash)
 4348		return -ENOMEM;
 4349
 4350	rp->mbox = np->ops->alloc_coherent(np->device,
 4351					   sizeof(struct rxdma_mailbox),
 4352					   &rp->mbox_dma, GFP_KERNEL);
 4353	if (!rp->mbox)
 4354		return -ENOMEM;
 4355	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4356		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4357			   rp->mbox);
 4358		return -EINVAL;
 4359	}
 4360
 4361	rp->rcr = np->ops->alloc_coherent(np->device,
 4362					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4363					  &rp->rcr_dma, GFP_KERNEL);
 4364	if (!rp->rcr)
 4365		return -ENOMEM;
 4366	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4367		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4368			   rp->rcr);
 4369		return -EINVAL;
 4370	}
 4371	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4372	rp->rcr_index = 0;
 4373
 4374	rp->rbr = np->ops->alloc_coherent(np->device,
 4375					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4376					  &rp->rbr_dma, GFP_KERNEL);
 4377	if (!rp->rbr)
 4378		return -ENOMEM;
 4379	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4380		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4381			   rp->rbr);
 4382		return -EINVAL;
 4383	}
 4384	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4385	rp->rbr_index = 0;
 4386	rp->rbr_pending = 0;
 4387
 4388	return 0;
 4389}
 4390
 4391static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4392{
 4393	int mtu = np->dev->mtu;
 4394
 4395	/* These values are recommended by the HW designers for fair
 4396	 * utilization of DRR amongst the rings.
 4397	 */
 4398	rp->max_burst = mtu + 32;
 4399	if (rp->max_burst > 4096)
 4400		rp->max_burst = 4096;
 4401}
 4402
 4403static int niu_alloc_tx_ring_info(struct niu *np,
 4404				  struct tx_ring_info *rp)
 4405{
 4406	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4407
 4408	rp->mbox = np->ops->alloc_coherent(np->device,
 4409					   sizeof(struct txdma_mailbox),
 4410					   &rp->mbox_dma, GFP_KERNEL);
 4411	if (!rp->mbox)
 4412		return -ENOMEM;
 4413	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4414		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4415			   rp->mbox);
 4416		return -EINVAL;
 4417	}
 4418
 4419	rp->descr = np->ops->alloc_coherent(np->device,
 4420					    MAX_TX_RING_SIZE * sizeof(__le64),
 4421					    &rp->descr_dma, GFP_KERNEL);
 4422	if (!rp->descr)
 4423		return -ENOMEM;
 4424	if ((unsigned long)rp->descr & (64UL - 1)) {
 4425		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4426			   rp->descr);
 4427		return -EINVAL;
 4428	}
 4429
 4430	rp->pending = MAX_TX_RING_SIZE;
 4431	rp->prod = 0;
 4432	rp->cons = 0;
 4433	rp->wrap_bit = 0;
 4434
 4435	/* XXX make these configurable... XXX */
 4436	rp->mark_freq = rp->pending / 4;
 4437
 4438	niu_set_max_burst(np, rp);
 4439
 4440	return 0;
 4441}
 4442
 4443static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4444{
 4445	u16 bss;
 4446
 4447	bss = min(PAGE_SHIFT, 15);
 4448
 4449	rp->rbr_block_size = 1 << bss;
 4450	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4451
 4452	rp->rbr_sizes[0] = 256;
 4453	rp->rbr_sizes[1] = 1024;
 4454	if (np->dev->mtu > ETH_DATA_LEN) {
 4455		switch (PAGE_SIZE) {
 4456		case 4 * 1024:
 4457			rp->rbr_sizes[2] = 4096;
 4458			break;
 4459
 4460		default:
 4461			rp->rbr_sizes[2] = 8192;
 4462			break;
 4463		}
 4464	} else {
 4465		rp->rbr_sizes[2] = 2048;
 4466	}
 4467	rp->rbr_sizes[3] = rp->rbr_block_size;
 4468}
 4469
 4470static int niu_alloc_channels(struct niu *np)
 4471{
 4472	struct niu_parent *parent = np->parent;
 4473	int first_rx_channel, first_tx_channel;
 4474	int num_rx_rings, num_tx_rings;
 4475	struct rx_ring_info *rx_rings;
 4476	struct tx_ring_info *tx_rings;
 4477	int i, port, err;
 4478
 4479	port = np->port;
 4480	first_rx_channel = first_tx_channel = 0;
 4481	for (i = 0; i < port; i++) {
 4482		first_rx_channel += parent->rxchan_per_port[i];
 4483		first_tx_channel += parent->txchan_per_port[i];
 4484	}
 4485
 4486	num_rx_rings = parent->rxchan_per_port[port];
 4487	num_tx_rings = parent->txchan_per_port[port];
 4488
 4489	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4490			   GFP_KERNEL);
 4491	err = -ENOMEM;
 4492	if (!rx_rings)
 4493		goto out_err;
 4494
 4495	np->num_rx_rings = num_rx_rings;
 4496	smp_wmb();
 4497	np->rx_rings = rx_rings;
 4498
 4499	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4500
 4501	for (i = 0; i < np->num_rx_rings; i++) {
 4502		struct rx_ring_info *rp = &np->rx_rings[i];
 4503
 4504		rp->np = np;
 4505		rp->rx_channel = first_rx_channel + i;
 4506
 4507		err = niu_alloc_rx_ring_info(np, rp);
 4508		if (err)
 4509			goto out_err;
 4510
 4511		niu_size_rbr(np, rp);
 4512
 4513		/* XXX better defaults, configurable, etc... XXX */
 4514		rp->nonsyn_window = 64;
 4515		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4516		rp->syn_window = 64;
 4517		rp->syn_threshold = rp->rcr_table_size - 64;
 4518		rp->rcr_pkt_threshold = 16;
 4519		rp->rcr_timeout = 8;
 4520		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4521		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4522			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4523
 4524		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4525		if (err)
 4526			return err;
 4527	}
 4528
 4529	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4530			   GFP_KERNEL);
 4531	err = -ENOMEM;
 4532	if (!tx_rings)
 4533		goto out_err;
 4534
 4535	np->num_tx_rings = num_tx_rings;
 4536	smp_wmb();
 4537	np->tx_rings = tx_rings;
 4538
 4539	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4540
 4541	for (i = 0; i < np->num_tx_rings; i++) {
 4542		struct tx_ring_info *rp = &np->tx_rings[i];
 4543
 4544		rp->np = np;
 4545		rp->tx_channel = first_tx_channel + i;
 4546
 4547		err = niu_alloc_tx_ring_info(np, rp);
 4548		if (err)
 4549			goto out_err;
 4550	}
 4551
 4552	return 0;
 4553
 4554out_err:
 4555	niu_free_channels(np);
 4556	return err;
 4557}
 4558
 4559static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4560{
 4561	int limit = 1000;
 4562
 4563	while (--limit > 0) {
 4564		u64 val = nr64(TX_CS(channel));
 4565		if (val & TX_CS_SNG_STATE)
 4566			return 0;
 4567	}
 4568	return -ENODEV;
 4569}
 4570
 4571static int niu_tx_channel_stop(struct niu *np, int channel)
 4572{
 4573	u64 val = nr64(TX_CS(channel));
 4574
 4575	val |= TX_CS_STOP_N_GO;
 4576	nw64(TX_CS(channel), val);
 4577
 4578	return niu_tx_cs_sng_poll(np, channel);
 4579}
 4580
 4581static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4582{
 4583	int limit = 1000;
 4584
 4585	while (--limit > 0) {
 4586		u64 val = nr64(TX_CS(channel));
 4587		if (!(val & TX_CS_RST))
 4588			return 0;
 4589	}
 4590	return -ENODEV;
 4591}
 4592
 4593static int niu_tx_channel_reset(struct niu *np, int channel)
 4594{
 4595	u64 val = nr64(TX_CS(channel));
 4596	int err;
 4597
 4598	val |= TX_CS_RST;
 4599	nw64(TX_CS(channel), val);
 4600
 4601	err = niu_tx_cs_reset_poll(np, channel);
 4602	if (!err)
 4603		nw64(TX_RING_KICK(channel), 0);
 4604
 4605	return err;
 4606}
 4607
 4608static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4609{
 4610	u64 val;
 4611
 4612	nw64(TX_LOG_MASK1(channel), 0);
 4613	nw64(TX_LOG_VAL1(channel), 0);
 4614	nw64(TX_LOG_MASK2(channel), 0);
 4615	nw64(TX_LOG_VAL2(channel), 0);
 4616	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4617	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4618	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4619
 4620	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4621	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4622	nw64(TX_LOG_PAGE_VLD(channel), val);
 4623
 4624	/* XXX TXDMA 32bit mode? XXX */
 4625
 4626	return 0;
 4627}
 4628
 4629static void niu_txc_enable_port(struct niu *np, int on)
 4630{
 4631	unsigned long flags;
 4632	u64 val, mask;
 4633
 4634	niu_lock_parent(np, flags);
 4635	val = nr64(TXC_CONTROL);
 4636	mask = (u64)1 << np->port;
 4637	if (on) {
 4638		val |= TXC_CONTROL_ENABLE | mask;
 4639	} else {
 4640		val &= ~mask;
 4641		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4642			val &= ~TXC_CONTROL_ENABLE;
 4643	}
 4644	nw64(TXC_CONTROL, val);
 4645	niu_unlock_parent(np, flags);
 4646}
 4647
 4648static void niu_txc_set_imask(struct niu *np, u64 imask)
 4649{
 4650	unsigned long flags;
 4651	u64 val;
 4652
 4653	niu_lock_parent(np, flags);
 4654	val = nr64(TXC_INT_MASK);
 4655	val &= ~TXC_INT_MASK_VAL(np->port);
 4656	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4657	niu_unlock_parent(np, flags);
 4658}
 4659
 4660static void niu_txc_port_dma_enable(struct niu *np, int on)
 4661{
 4662	u64 val = 0;
 4663
 4664	if (on) {
 4665		int i;
 4666
 4667		for (i = 0; i < np->num_tx_rings; i++)
 4668			val |= (1 << np->tx_rings[i].tx_channel);
 4669	}
 4670	nw64(TXC_PORT_DMA(np->port), val);
 4671}
 4672
 4673static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4674{
 4675	int err, channel = rp->tx_channel;
 4676	u64 val, ring_len;
 4677
 4678	err = niu_tx_channel_stop(np, channel);
 4679	if (err)
 4680		return err;
 4681
 4682	err = niu_tx_channel_reset(np, channel);
 4683	if (err)
 4684		return err;
 4685
 4686	err = niu_tx_channel_lpage_init(np, channel);
 4687	if (err)
 4688		return err;
 4689
 4690	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4691	nw64(TX_ENT_MSK(channel), 0);
 4692
 4693	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4694			      TX_RNG_CFIG_STADDR)) {
 4695		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4696			   channel, (unsigned long long)rp->descr_dma);
 4697		return -EINVAL;
 4698	}
 4699
 4700	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4701	 * blocks.  rp->pending is the number of TX descriptors in
 4702	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4703	 * to get the proper value the chip wants.
 4704	 */
 4705	ring_len = (rp->pending / 8);
 4706
 4707	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4708	       rp->descr_dma);
 4709	nw64(TX_RNG_CFIG(channel), val);
 4710
 4711	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4712	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4713		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4714			    channel, (unsigned long long)rp->mbox_dma);
 4715		return -EINVAL;
 4716	}
 4717	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4718	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4719
 4720	nw64(TX_CS(channel), 0);
 4721
 4722	rp->last_pkt_cnt = 0;
 4723
 4724	return 0;
 4725}
 4726
 4727static void niu_init_rdc_groups(struct niu *np)
 4728{
 4729	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4730	int i, first_table_num = tp->first_table_num;
 4731
 4732	for (i = 0; i < tp->num_tables; i++) {
 4733		struct rdc_table *tbl = &tp->tables[i];
 4734		int this_table = first_table_num + i;
 4735		int slot;
 4736
 4737		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4738			nw64(RDC_TBL(this_table, slot),
 4739			     tbl->rxdma_channel[slot]);
 4740	}
 4741
 4742	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4743}
 4744
 4745static void niu_init_drr_weight(struct niu *np)
 4746{
 4747	int type = phy_decode(np->parent->port_phy, np->port);
 4748	u64 val;
 4749
 4750	switch (type) {
 4751	case PORT_TYPE_10G:
 4752		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4753		break;
 4754
 4755	case PORT_TYPE_1G:
 4756	default:
 4757		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4758		break;
 4759	}
 4760	nw64(PT_DRR_WT(np->port), val);
 4761}
 4762
 4763static int niu_init_hostinfo(struct niu *np)
 4764{
 4765	struct niu_parent *parent = np->parent;
 4766	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4767	int i, err, num_alt = niu_num_alt_addr(np);
 4768	int first_rdc_table = tp->first_table_num;
 4769
 4770	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4771	if (err)
 4772		return err;
 4773
 4774	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4775	if (err)
 4776		return err;
 4777
 4778	for (i = 0; i < num_alt; i++) {
 4779		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4780		if (err)
 4781			return err;
 4782	}
 4783
 4784	return 0;
 4785}
 4786
 4787static int niu_rx_channel_reset(struct niu *np, int channel)
 4788{
 4789	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4790				      RXDMA_CFIG1_RST, 1000, 10,
 4791				      "RXDMA_CFIG1");
 4792}
 4793
 4794static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4795{
 4796	u64 val;
 4797
 4798	nw64(RX_LOG_MASK1(channel), 0);
 4799	nw64(RX_LOG_VAL1(channel), 0);
 4800	nw64(RX_LOG_MASK2(channel), 0);
 4801	nw64(RX_LOG_VAL2(channel), 0);
 4802	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4803	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4804	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4805
 4806	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4807	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4808	nw64(RX_LOG_PAGE_VLD(channel), val);
 4809
 4810	return 0;
 4811}
 4812
 4813static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4814{
 4815	u64 val;
 4816
 4817	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4818	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4819	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4820	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4821	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4822}
 4823
 4824static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4825{
 4826	u64 val = 0;
 4827
 4828	*ret = 0;
 4829	switch (rp->rbr_block_size) {
 4830	case 4 * 1024:
 4831		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4832		break;
 4833	case 8 * 1024:
 4834		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4835		break;
 4836	case 16 * 1024:
 4837		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4838		break;
 4839	case 32 * 1024:
 4840		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4841		break;
 4842	default:
 4843		return -EINVAL;
 4844	}
 4845	val |= RBR_CFIG_B_VLD2;
 4846	switch (rp->rbr_sizes[2]) {
 4847	case 2 * 1024:
 4848		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4849		break;
 4850	case 4 * 1024:
 4851		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4852		break;
 4853	case 8 * 1024:
 4854		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4855		break;
 4856	case 16 * 1024:
 4857		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4858		break;
 4859
 4860	default:
 4861		return -EINVAL;
 4862	}
 4863	val |= RBR_CFIG_B_VLD1;
 4864	switch (rp->rbr_sizes[1]) {
 4865	case 1 * 1024:
 4866		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4867		break;
 4868	case 2 * 1024:
 4869		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4870		break;
 4871	case 4 * 1024:
 4872		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4873		break;
 4874	case 8 * 1024:
 4875		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4876		break;
 4877
 4878	default:
 4879		return -EINVAL;
 4880	}
 4881	val |= RBR_CFIG_B_VLD0;
 4882	switch (rp->rbr_sizes[0]) {
 4883	case 256:
 4884		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4885		break;
 4886	case 512:
 4887		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4888		break;
 4889	case 1 * 1024:
 4890		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4891		break;
 4892	case 2 * 1024:
 4893		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4894		break;
 4895
 4896	default:
 4897		return -EINVAL;
 4898	}
 4899
 4900	*ret = val;
 4901	return 0;
 4902}
 4903
 4904static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4905{
 4906	u64 val = nr64(RXDMA_CFIG1(channel));
 4907	int limit;
 4908
 4909	if (on)
 4910		val |= RXDMA_CFIG1_EN;
 4911	else
 4912		val &= ~RXDMA_CFIG1_EN;
 4913	nw64(RXDMA_CFIG1(channel), val);
 4914
 4915	limit = 1000;
 4916	while (--limit > 0) {
 4917		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4918			break;
 4919		udelay(10);
 4920	}
 4921	if (limit <= 0)
 4922		return -ENODEV;
 4923	return 0;
 4924}
 4925
 4926static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4927{
 4928	int err, channel = rp->rx_channel;
 4929	u64 val;
 4930
 4931	err = niu_rx_channel_reset(np, channel);
 4932	if (err)
 4933		return err;
 4934
 4935	err = niu_rx_channel_lpage_init(np, channel);
 4936	if (err)
 4937		return err;
 4938
 4939	niu_rx_channel_wred_init(np, rp);
 4940
 4941	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4942	nw64(RX_DMA_CTL_STAT(channel),
 4943	     (RX_DMA_CTL_STAT_MEX |
 4944	      RX_DMA_CTL_STAT_RCRTHRES |
 4945	      RX_DMA_CTL_STAT_RCRTO |
 4946	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4947	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4948	nw64(RXDMA_CFIG2(channel),
 4949	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4950	      RXDMA_CFIG2_FULL_HDR));
 4951	nw64(RBR_CFIG_A(channel),
 4952	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4953	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4954	err = niu_compute_rbr_cfig_b(rp, &val);
 4955	if (err)
 4956		return err;
 4957	nw64(RBR_CFIG_B(channel), val);
 4958	nw64(RCRCFIG_A(channel),
 4959	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4960	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4961	nw64(RCRCFIG_B(channel),
 4962	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4963	     RCRCFIG_B_ENTOUT |
 4964	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4965
 4966	err = niu_enable_rx_channel(np, channel, 1);
 4967	if (err)
 4968		return err;
 4969
 4970	nw64(RBR_KICK(channel), rp->rbr_index);
 4971
 4972	val = nr64(RX_DMA_CTL_STAT(channel));
 4973	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4974	nw64(RX_DMA_CTL_STAT(channel), val);
 4975
 4976	return 0;
 4977}
 4978
 4979static int niu_init_rx_channels(struct niu *np)
 4980{
 4981	unsigned long flags;
 4982	u64 seed = jiffies_64;
 4983	int err, i;
 4984
 4985	niu_lock_parent(np, flags);
 4986	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 4987	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 4988	niu_unlock_parent(np, flags);
 4989
 4990	/* XXX RXDMA 32bit mode? XXX */
 4991
 4992	niu_init_rdc_groups(np);
 4993	niu_init_drr_weight(np);
 4994
 4995	err = niu_init_hostinfo(np);
 4996	if (err)
 4997		return err;
 4998
 4999	for (i = 0; i < np->num_rx_rings; i++) {
 5000		struct rx_ring_info *rp = &np->rx_rings[i];
 5001
 5002		err = niu_init_one_rx_channel(np, rp);
 5003		if (err)
 5004			return err;
 5005	}
 5006
 5007	return 0;
 5008}
 5009
 5010static int niu_set_ip_frag_rule(struct niu *np)
 5011{
 5012	struct niu_parent *parent = np->parent;
 5013	struct niu_classifier *cp = &np->clas;
 5014	struct niu_tcam_entry *tp;
 5015	int index, err;
 5016
 5017	index = cp->tcam_top;
 5018	tp = &parent->tcam[index];
 5019
 5020	/* Note that the noport bit is the same in both ipv4 and
 5021	 * ipv6 format TCAM entries.
 5022	 */
 5023	memset(tp, 0, sizeof(*tp));
 5024	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5025	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5026	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5027			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5028	err = tcam_write(np, index, tp->key, tp->key_mask);
 5029	if (err)
 5030		return err;
 5031	err = tcam_assoc_write(np, index, tp->assoc_data);
 5032	if (err)
 5033		return err;
 5034	tp->valid = 1;
 5035	cp->tcam_valid_entries++;
 5036
 5037	return 0;
 5038}
 5039
 5040static int niu_init_classifier_hw(struct niu *np)
 5041{
 5042	struct niu_parent *parent = np->parent;
 5043	struct niu_classifier *cp = &np->clas;
 5044	int i, err;
 5045
 5046	nw64(H1POLY, cp->h1_init);
 5047	nw64(H2POLY, cp->h2_init);
 5048
 5049	err = niu_init_hostinfo(np);
 5050	if (err)
 5051		return err;
 5052
 5053	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5054		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5055
 5056		vlan_tbl_write(np, i, np->port,
 5057			       vp->vlan_pref, vp->rdc_num);
 5058	}
 5059
 5060	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5061		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5062
 5063		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5064						ap->rdc_num, ap->mac_pref);
 5065		if (err)
 5066			return err;
 5067	}
 5068
 5069	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5070		int index = i - CLASS_CODE_USER_PROG1;
 5071
 5072		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5073		if (err)
 5074			return err;
 5075		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5076		if (err)
 5077			return err;
 5078	}
 5079
 5080	err = niu_set_ip_frag_rule(np);
 5081	if (err)
 5082		return err;
 5083
 5084	tcam_enable(np, 1);
 5085
 5086	return 0;
 5087}
 5088
 5089static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5090{
 5091	nw64(ZCP_RAM_DATA0, data[0]);
 5092	nw64(ZCP_RAM_DATA1, data[1]);
 5093	nw64(ZCP_RAM_DATA2, data[2]);
 5094	nw64(ZCP_RAM_DATA3, data[3]);
 5095	nw64(ZCP_RAM_DATA4, data[4]);
 5096	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5097	nw64(ZCP_RAM_ACC,
 5098	     (ZCP_RAM_ACC_WRITE |
 5099	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5100	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5101
 5102	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5103				   1000, 100);
 5104}
 5105
 5106static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5107{
 5108	int err;
 5109
 5110	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5111				  1000, 100);
 5112	if (err) {
 5113		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5114			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5115		return err;
 5116	}
 5117
 5118	nw64(ZCP_RAM_ACC,
 5119	     (ZCP_RAM_ACC_READ |
 5120	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5121	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5122
 5123	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5124				  1000, 100);
 5125	if (err) {
 5126		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5127			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5128		return err;
 5129	}
 5130
 5131	data[0] = nr64(ZCP_RAM_DATA0);
 5132	data[1] = nr64(ZCP_RAM_DATA1);
 5133	data[2] = nr64(ZCP_RAM_DATA2);
 5134	data[3] = nr64(ZCP_RAM_DATA3);
 5135	data[4] = nr64(ZCP_RAM_DATA4);
 5136
 5137	return 0;
 5138}
 5139
 5140static void niu_zcp_cfifo_reset(struct niu *np)
 5141{
 5142	u64 val = nr64(RESET_CFIFO);
 5143
 5144	val |= RESET_CFIFO_RST(np->port);
 5145	nw64(RESET_CFIFO, val);
 5146	udelay(10);
 5147
 5148	val &= ~RESET_CFIFO_RST(np->port);
 5149	nw64(RESET_CFIFO, val);
 5150}
 5151
 5152static int niu_init_zcp(struct niu *np)
 5153{
 5154	u64 data[5], rbuf[5];
 5155	int i, max, err;
 5156
 5157	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5158		if (np->port == 0 || np->port == 1)
 5159			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5160		else
 5161			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5162	} else
 5163		max = NIU_CFIFO_ENTRIES;
 5164
 5165	data[0] = 0;
 5166	data[1] = 0;
 5167	data[2] = 0;
 5168	data[3] = 0;
 5169	data[4] = 0;
 5170
 5171	for (i = 0; i < max; i++) {
 5172		err = niu_zcp_write(np, i, data);
 5173		if (err)
 5174			return err;
 5175		err = niu_zcp_read(np, i, rbuf);
 5176		if (err)
 5177			return err;
 5178	}
 5179
 5180	niu_zcp_cfifo_reset(np);
 5181	nw64(CFIFO_ECC(np->port), 0);
 5182	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5183	(void) nr64(ZCP_INT_STAT);
 5184	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5185
 5186	return 0;
 5187}
 5188
 5189static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5190{
 5191	u64 val = nr64_ipp(IPP_CFIG);
 5192
 5193	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5194	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5195	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5196	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5197	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5198	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5199	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5200	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5201}
 5202
 5203static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5204{
 5205	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5206	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5207	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5208	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5209	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5210	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5211}
 5212
 5213static int niu_ipp_reset(struct niu *np)
 5214{
 5215	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5216					  1000, 100, "IPP_CFIG");
 5217}
 5218
 5219static int niu_init_ipp(struct niu *np)
 5220{
 5221	u64 data[5], rbuf[5], val;
 5222	int i, max, err;
 5223
 5224	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5225		if (np->port == 0 || np->port == 1)
 5226			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5227		else
 5228			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5229	} else
 5230		max = NIU_DFIFO_ENTRIES;
 5231
 5232	data[0] = 0;
 5233	data[1] = 0;
 5234	data[2] = 0;
 5235	data[3] = 0;
 5236	data[4] = 0;
 5237
 5238	for (i = 0; i < max; i++) {
 5239		niu_ipp_write(np, i, data);
 5240		niu_ipp_read(np, i, rbuf);
 5241	}
 5242
 5243	(void) nr64_ipp(IPP_INT_STAT);
 5244	(void) nr64_ipp(IPP_INT_STAT);
 5245
 5246	err = niu_ipp_reset(np);
 5247	if (err)
 5248		return err;
 5249
 5250	(void) nr64_ipp(IPP_PKT_DIS);
 5251	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5252	(void) nr64_ipp(IPP_ECC);
 5253
 5254	(void) nr64_ipp(IPP_INT_STAT);
 5255
 5256	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5257
 5258	val = nr64_ipp(IPP_CFIG);
 5259	val &= ~IPP_CFIG_IP_MAX_PKT;
 5260	val |= (IPP_CFIG_IPP_ENABLE |
 5261		IPP_CFIG_DFIFO_ECC_EN |
 5262		IPP_CFIG_DROP_BAD_CRC |
 5263		IPP_CFIG_CKSUM_EN |
 5264		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5265	nw64_ipp(IPP_CFIG, val);
 5266
 5267	return 0;
 5268}
 5269
 5270static void niu_handle_led(struct niu *np, int status)
 5271{
 5272	u64 val;
 5273	val = nr64_mac(XMAC_CONFIG);
 5274
 5275	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5276	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5277		if (status) {
 5278			val |= XMAC_CONFIG_LED_POLARITY;
 5279			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5280		} else {
 5281			val |= XMAC_CONFIG_FORCE_LED_ON;
 5282			val &= ~XMAC_CONFIG_LED_POLARITY;
 5283		}
 5284	}
 5285
 5286	nw64_mac(XMAC_CONFIG, val);
 5287}
 5288
 5289static void niu_init_xif_xmac(struct niu *np)
 5290{
 5291	struct niu_link_config *lp = &np->link_config;
 5292	u64 val;
 5293
 5294	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5295		val = nr64(MIF_CONFIG);
 5296		val |= MIF_CONFIG_ATCA_GE;
 5297		nw64(MIF_CONFIG, val);
 5298	}
 5299
 5300	val = nr64_mac(XMAC_CONFIG);
 5301	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5302
 5303	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5304
 5305	if (lp->loopback_mode == LOOPBACK_MAC) {
 5306		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5307		val |= XMAC_CONFIG_LOOPBACK;
 5308	} else {
 5309		val &= ~XMAC_CONFIG_LOOPBACK;
 5310	}
 5311
 5312	if (np->flags & NIU_FLAGS_10G) {
 5313		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5314	} else {
 5315		val |= XMAC_CONFIG_LFS_DISABLE;
 5316		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5317		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5318			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5319		else
 5320			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5321	}
 5322
 5323	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5324
 5325	if (lp->active_speed == SPEED_100)
 5326		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5327	else
 5328		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5329
 5330	nw64_mac(XMAC_CONFIG, val);
 5331
 5332	val = nr64_mac(XMAC_CONFIG);
 5333	val &= ~XMAC_CONFIG_MODE_MASK;
 5334	if (np->flags & NIU_FLAGS_10G) {
 5335		val |= XMAC_CONFIG_MODE_XGMII;
 5336	} else {
 5337		if (lp->active_speed == SPEED_1000)
 5338			val |= XMAC_CONFIG_MODE_GMII;
 5339		else
 5340			val |= XMAC_CONFIG_MODE_MII;
 5341	}
 5342
 5343	nw64_mac(XMAC_CONFIG, val);
 5344}
 5345
 5346static void niu_init_xif_bmac(struct niu *np)
 5347{
 5348	struct niu_link_config *lp = &np->link_config;
 5349	u64 val;
 5350
 5351	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5352
 5353	if (lp->loopback_mode == LOOPBACK_MAC)
 5354		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5355	else
 5356		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5357
 5358	if (lp->active_speed == SPEED_1000)
 5359		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5360	else
 5361		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5362
 5363	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5364		 BMAC_XIF_CONFIG_LED_POLARITY);
 5365
 5366	if (!(np->flags & NIU_FLAGS_10G) &&
 5367	    !(np->flags & NIU_FLAGS_FIBER) &&
 5368	    lp->active_speed == SPEED_100)
 5369		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5370	else
 5371		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5372
 5373	nw64_mac(BMAC_XIF_CONFIG, val);
 5374}
 5375
 5376static void niu_init_xif(struct niu *np)
 5377{
 5378	if (np->flags & NIU_FLAGS_XMAC)
 5379		niu_init_xif_xmac(np);
 5380	else
 5381		niu_init_xif_bmac(np);
 5382}
 5383
 5384static void niu_pcs_mii_reset(struct niu *np)
 5385{
 5386	int limit = 1000;
 5387	u64 val = nr64_pcs(PCS_MII_CTL);
 5388	val |= PCS_MII_CTL_RST;
 5389	nw64_pcs(PCS_MII_CTL, val);
 5390	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5391		udelay(100);
 5392		val = nr64_pcs(PCS_MII_CTL);
 5393	}
 5394}
 5395
 5396static void niu_xpcs_reset(struct niu *np)
 5397{
 5398	int limit = 1000;
 5399	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5400	val |= XPCS_CONTROL1_RESET;
 5401	nw64_xpcs(XPCS_CONTROL1, val);
 5402	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5403		udelay(100);
 5404		val = nr64_xpcs(XPCS_CONTROL1);
 5405	}
 5406}
 5407
 5408static int niu_init_pcs(struct niu *np)
 5409{
 5410	struct niu_link_config *lp = &np->link_config;
 5411	u64 val;
 5412
 5413	switch (np->flags & (NIU_FLAGS_10G |
 5414			     NIU_FLAGS_FIBER |
 5415			     NIU_FLAGS_XCVR_SERDES)) {
 5416	case NIU_FLAGS_FIBER:
 5417		/* 1G fiber */
 5418		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5419		nw64_pcs(PCS_DPATH_MODE, 0);
 5420		niu_pcs_mii_reset(np);
 5421		break;
 5422
 5423	case NIU_FLAGS_10G:
 5424	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5425	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5426		/* 10G SERDES */
 5427		if (!(np->flags & NIU_FLAGS_XMAC))
 5428			return -EINVAL;
 5429
 5430		/* 10G copper or fiber */
 5431		val = nr64_mac(XMAC_CONFIG);
 5432		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5433		nw64_mac(XMAC_CONFIG, val);
 5434
 5435		niu_xpcs_reset(np);
 5436
 5437		val = nr64_xpcs(XPCS_CONTROL1);
 5438		if (lp->loopback_mode == LOOPBACK_PHY)
 5439			val |= XPCS_CONTROL1_LOOPBACK;
 5440		else
 5441			val &= ~XPCS_CONTROL1_LOOPBACK;
 5442		nw64_xpcs(XPCS_CONTROL1, val);
 5443
 5444		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5445		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5446		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5447		break;
 5448
 5449
 5450	case NIU_FLAGS_XCVR_SERDES:
 5451		/* 1G SERDES */
 5452		niu_pcs_mii_reset(np);
 5453		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5454		nw64_pcs(PCS_DPATH_MODE, 0);
 5455		break;
 5456
 5457	case 0:
 5458		/* 1G copper */
 5459	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5460		/* 1G RGMII FIBER */
 5461		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5462		niu_pcs_mii_reset(np);
 5463		break;
 5464
 5465	default:
 5466		return -EINVAL;
 5467	}
 5468
 5469	return 0;
 5470}
 5471
 5472static int niu_reset_tx_xmac(struct niu *np)
 5473{
 5474	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5475					  (XTXMAC_SW_RST_REG_RS |
 5476					   XTXMAC_SW_RST_SOFT_RST),
 5477					  1000, 100, "XTXMAC_SW_RST");
 5478}
 5479
 5480static int niu_reset_tx_bmac(struct niu *np)
 5481{
 5482	int limit;
 5483
 5484	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5485	limit = 1000;
 5486	while (--limit >= 0) {
 5487		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5488			break;
 5489		udelay(100);
 5490	}
 5491	if (limit < 0) {
 5492		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5493			np->port,
 5494			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5495		return -ENODEV;
 5496	}
 5497
 5498	return 0;
 5499}
 5500
 5501static int niu_reset_tx_mac(struct niu *np)
 5502{
 5503	if (np->flags & NIU_FLAGS_XMAC)
 5504		return niu_reset_tx_xmac(np);
 5505	else
 5506		return niu_reset_tx_bmac(np);
 5507}
 5508
 5509static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5510{
 5511	u64 val;
 5512
 5513	val = nr64_mac(XMAC_MIN);
 5514	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5515		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5516	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5517	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5518	nw64_mac(XMAC_MIN, val);
 5519
 5520	nw64_mac(XMAC_MAX, max);
 5521
 5522	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5523
 5524	val = nr64_mac(XMAC_IPG);
 5525	if (np->flags & NIU_FLAGS_10G) {
 5526		val &= ~XMAC_IPG_IPG_XGMII;
 5527		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5528	} else {
 5529		val &= ~XMAC_IPG_IPG_MII_GMII;
 5530		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5531	}
 5532	nw64_mac(XMAC_IPG, val);
 5533
 5534	val = nr64_mac(XMAC_CONFIG);
 5535	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5536		 XMAC_CONFIG_STRETCH_MODE |
 5537		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5538		 XMAC_CONFIG_TX_ENABLE);
 5539	nw64_mac(XMAC_CONFIG, val);
 5540
 5541	nw64_mac(TXMAC_FRM_CNT, 0);
 5542	nw64_mac(TXMAC_BYTE_CNT, 0);
 5543}
 5544
 5545static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5546{
 5547	u64 val;
 5548
 5549	nw64_mac(BMAC_MIN_FRAME, min);
 5550	nw64_mac(BMAC_MAX_FRAME, max);
 5551
 5552	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5553	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5554	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5555
 5556	val = nr64_mac(BTXMAC_CONFIG);
 5557	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5558		 BTXMAC_CONFIG_ENABLE);
 5559	nw64_mac(BTXMAC_CONFIG, val);
 5560}
 5561
 5562static void niu_init_tx_mac(struct niu *np)
 5563{
 5564	u64 min, max;
 5565
 5566	min = 64;
 5567	if (np->dev->mtu > ETH_DATA_LEN)
 5568		max = 9216;
 5569	else
 5570		max = 1522;
 5571
 5572	/* The XMAC_MIN register only accepts values for TX min which
 5573	 * have the low 3 bits cleared.
 5574	 */
 5575	BUG_ON(min & 0x7);
 5576
 5577	if (np->flags & NIU_FLAGS_XMAC)
 5578		niu_init_tx_xmac(np, min, max);
 5579	else
 5580		niu_init_tx_bmac(np, min, max);
 5581}
 5582
 5583static int niu_reset_rx_xmac(struct niu *np)
 5584{
 5585	int limit;
 5586
 5587	nw64_mac(XRXMAC_SW_RST,
 5588		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5589	limit = 1000;
 5590	while (--limit >= 0) {
 5591		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5592						 XRXMAC_SW_RST_SOFT_RST)))
 5593			break;
 5594		udelay(100);
 5595	}
 5596	if (limit < 0) {
 5597		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5598			np->port,
 5599			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5600		return -ENODEV;
 5601	}
 5602
 5603	return 0;
 5604}
 5605
 5606static int niu_reset_rx_bmac(struct niu *np)
 5607{
 5608	int limit;
 5609
 5610	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5611	limit = 1000;
 5612	while (--limit >= 0) {
 5613		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5614			break;
 5615		udelay(100);
 5616	}
 5617	if (limit < 0) {
 5618		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5619			np->port,
 5620			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5621		return -ENODEV;
 5622	}
 5623
 5624	return 0;
 5625}
 5626
 5627static int niu_reset_rx_mac(struct niu *np)
 5628{
 5629	if (np->flags & NIU_FLAGS_XMAC)
 5630		return niu_reset_rx_xmac(np);
 5631	else
 5632		return niu_reset_rx_bmac(np);
 5633}
 5634
 5635static void niu_init_rx_xmac(struct niu *np)
 5636{
 5637	struct niu_parent *parent = np->parent;
 5638	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5639	int first_rdc_table = tp->first_table_num;
 5640	unsigned long i;
 5641	u64 val;
 5642
 5643	nw64_mac(XMAC_ADD_FILT0, 0);
 5644	nw64_mac(XMAC_ADD_FILT1, 0);
 5645	nw64_mac(XMAC_ADD_FILT2, 0);
 5646	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5647	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5648	for (i = 0; i < MAC_NUM_HASH; i++)
 5649		nw64_mac(XMAC_HASH_TBL(i), 0);
 5650	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5651	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5652	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5653
 5654	val = nr64_mac(XMAC_CONFIG);
 5655	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5656		 XMAC_CONFIG_PROMISCUOUS |
 5657		 XMAC_CONFIG_PROMISC_GROUP |
 5658		 XMAC_CONFIG_ERR_CHK_DIS |
 5659		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5660		 XMAC_CONFIG_RESERVED_MULTICAST |
 5661		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5662		 XMAC_CONFIG_ADDR_FILTER_EN |
 5663		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5664		 XMAC_CONFIG_STRIP_CRC |
 5665		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5666		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5667	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5668	nw64_mac(XMAC_CONFIG, val);
 5669
 5670	nw64_mac(RXMAC_BT_CNT, 0);
 5671	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5672	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5673	nw64_mac(RXMAC_FRAG_CNT, 0);
 5674	nw64_mac(RXMAC_HIST_CNT1, 0);
 5675	nw64_mac(RXMAC_HIST_CNT2, 0);
 5676	nw64_mac(RXMAC_HIST_CNT3, 0);
 5677	nw64_mac(RXMAC_HIST_CNT4, 0);
 5678	nw64_mac(RXMAC_HIST_CNT5, 0);
 5679	nw64_mac(RXMAC_HIST_CNT6, 0);
 5680	nw64_mac(RXMAC_HIST_CNT7, 0);
 5681	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5682	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5683	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5684	nw64_mac(LINK_FAULT_CNT, 0);
 5685}
 5686
 5687static void niu_init_rx_bmac(struct niu *np)
 5688{
 5689	struct niu_parent *parent = np->parent;
 5690	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5691	int first_rdc_table = tp->first_table_num;
 5692	unsigned long i;
 5693	u64 val;
 5694
 5695	nw64_mac(BMAC_ADD_FILT0, 0);
 5696	nw64_mac(BMAC_ADD_FILT1, 0);
 5697	nw64_mac(BMAC_ADD_FILT2, 0);
 5698	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5699	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5700	for (i = 0; i < MAC_NUM_HASH; i++)
 5701		nw64_mac(BMAC_HASH_TBL(i), 0);
 5702	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5703	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5704	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5705
 5706	val = nr64_mac(BRXMAC_CONFIG);
 5707	val &= ~(BRXMAC_CONFIG_ENABLE |
 5708		 BRXMAC_CONFIG_STRIP_PAD |
 5709		 BRXMAC_CONFIG_STRIP_FCS |
 5710		 BRXMAC_CONFIG_PROMISC |
 5711		 BRXMAC_CONFIG_PROMISC_GRP |
 5712		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5713		 BRXMAC_CONFIG_DISCARD_DIS);
 5714	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5715	nw64_mac(BRXMAC_CONFIG, val);
 5716
 5717	val = nr64_mac(BMAC_ADDR_CMPEN);
 5718	val |= BMAC_ADDR_CMPEN_EN0;
 5719	nw64_mac(BMAC_ADDR_CMPEN, val);
 5720}
 5721
 5722static void niu_init_rx_mac(struct niu *np)
 5723{
 5724	niu_set_primary_mac(np, np->dev->dev_addr);
 5725
 5726	if (np->flags & NIU_FLAGS_XMAC)
 5727		niu_init_rx_xmac(np);
 5728	else
 5729		niu_init_rx_bmac(np);
 5730}
 5731
 5732static void niu_enable_tx_xmac(struct niu *np, int on)
 5733{
 5734	u64 val = nr64_mac(XMAC_CONFIG);
 5735
 5736	if (on)
 5737		val |= XMAC_CONFIG_TX_ENABLE;
 5738	else
 5739		val &= ~XMAC_CONFIG_TX_ENABLE;
 5740	nw64_mac(XMAC_CONFIG, val);
 5741}
 5742
 5743static void niu_enable_tx_bmac(struct niu *np, int on)
 5744{
 5745	u64 val = nr64_mac(BTXMAC_CONFIG);
 5746
 5747	if (on)
 5748		val |= BTXMAC_CONFIG_ENABLE;
 5749	else
 5750		val &= ~BTXMAC_CONFIG_ENABLE;
 5751	nw64_mac(BTXMAC_CONFIG, val);
 5752}
 5753
 5754static void niu_enable_tx_mac(struct niu *np, int on)
 5755{
 5756	if (np->flags & NIU_FLAGS_XMAC)
 5757		niu_enable_tx_xmac(np, on);
 5758	else
 5759		niu_enable_tx_bmac(np, on);
 5760}
 5761
 5762static void niu_enable_rx_xmac(struct niu *np, int on)
 5763{
 5764	u64 val = nr64_mac(XMAC_CONFIG);
 5765
 5766	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5767		 XMAC_CONFIG_PROMISCUOUS);
 5768
 5769	if (np->flags & NIU_FLAGS_MCAST)
 5770		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5771	if (np->flags & NIU_FLAGS_PROMISC)
 5772		val |= XMAC_CONFIG_PROMISCUOUS;
 5773
 5774	if (on)
 5775		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5776	else
 5777		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5778	nw64_mac(XMAC_CONFIG, val);
 5779}
 5780
 5781static void niu_enable_rx_bmac(struct niu *np, int on)
 5782{
 5783	u64 val = nr64_mac(BRXMAC_CONFIG);
 5784
 5785	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5786		 BRXMAC_CONFIG_PROMISC);
 5787
 5788	if (np->flags & NIU_FLAGS_MCAST)
 5789		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5790	if (np->flags & NIU_FLAGS_PROMISC)
 5791		val |= BRXMAC_CONFIG_PROMISC;
 5792
 5793	if (on)
 5794		val |= BRXMAC_CONFIG_ENABLE;
 5795	else
 5796		val &= ~BRXMAC_CONFIG_ENABLE;
 5797	nw64_mac(BRXMAC_CONFIG, val);
 5798}
 5799
 5800static void niu_enable_rx_mac(struct niu *np, int on)
 5801{
 5802	if (np->flags & NIU_FLAGS_XMAC)
 5803		niu_enable_rx_xmac(np, on);
 5804	else
 5805		niu_enable_rx_bmac(np, on);
 5806}
 5807
 5808static int niu_init_mac(struct niu *np)
 5809{
 5810	int err;
 5811
 5812	niu_init_xif(np);
 5813	err = niu_init_pcs(np);
 5814	if (err)
 5815		return err;
 5816
 5817	err = niu_reset_tx_mac(np);
 5818	if (err)
 5819		return err;
 5820	niu_init_tx_mac(np);
 5821	err = niu_reset_rx_mac(np);
 5822	if (err)
 5823		return err;
 5824	niu_init_rx_mac(np);
 5825
 5826	/* This looks hookey but the RX MAC reset we just did will
 5827	 * undo some of the state we setup in niu_init_tx_mac() so we
 5828	 * have to call it again.  In particular, the RX MAC reset will
 5829	 * set the XMAC_MAX register back to it's default value.
 5830	 */
 5831	niu_init_tx_mac(np);
 5832	niu_enable_tx_mac(np, 1);
 5833
 5834	niu_enable_rx_mac(np, 1);
 5835
 5836	return 0;
 5837}
 5838
 5839static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5840{
 5841	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5842}
 5843
 5844static void niu_stop_tx_channels(struct niu *np)
 5845{
 5846	int i;
 5847
 5848	for (i = 0; i < np->num_tx_rings; i++) {
 5849		struct tx_ring_info *rp = &np->tx_rings[i];
 5850
 5851		niu_stop_one_tx_channel(np, rp);
 5852	}
 5853}
 5854
 5855static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5856{
 5857	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5858}
 5859
 5860static void niu_reset_tx_channels(struct niu *np)
 5861{
 5862	int i;
 5863
 5864	for (i = 0; i < np->num_tx_rings; i++) {
 5865		struct tx_ring_info *rp = &np->tx_rings[i];
 5866
 5867		niu_reset_one_tx_channel(np, rp);
 5868	}
 5869}
 5870
 5871static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5872{
 5873	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5874}
 5875
 5876static void niu_stop_rx_channels(struct niu *np)
 5877{
 5878	int i;
 5879
 5880	for (i = 0; i < np->num_rx_rings; i++) {
 5881		struct rx_ring_info *rp = &np->rx_rings[i];
 5882
 5883		niu_stop_one_rx_channel(np, rp);
 5884	}
 5885}
 5886
 5887static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5888{
 5889	int channel = rp->rx_channel;
 5890
 5891	(void) niu_rx_channel_reset(np, channel);
 5892	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5893	nw64(RX_DMA_CTL_STAT(channel), 0);
 5894	(void) niu_enable_rx_channel(np, channel, 0);
 5895}
 5896
 5897static void niu_reset_rx_channels(struct niu *np)
 5898{
 5899	int i;
 5900
 5901	for (i = 0; i < np->num_rx_rings; i++) {
 5902		struct rx_ring_info *rp = &np->rx_rings[i];
 5903
 5904		niu_reset_one_rx_channel(np, rp);
 5905	}
 5906}
 5907
 5908static void niu_disable_ipp(struct niu *np)
 5909{
 5910	u64 rd, wr, val;
 5911	int limit;
 5912
 5913	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5914	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5915	limit = 100;
 5916	while (--limit >= 0 && (rd != wr)) {
 5917		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5918		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5919	}
 5920	if (limit < 0 &&
 5921	    (rd != 0 && wr != 1)) {
 5922		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5923			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5924			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5925	}
 5926
 5927	val = nr64_ipp(IPP_CFIG);
 5928	val &= ~(IPP_CFIG_IPP_ENABLE |
 5929		 IPP_CFIG_DFIFO_ECC_EN |
 5930		 IPP_CFIG_DROP_BAD_CRC |
 5931		 IPP_CFIG_CKSUM_EN);
 5932	nw64_ipp(IPP_CFIG, val);
 5933
 5934	(void) niu_ipp_reset(np);
 5935}
 5936
 5937static int niu_init_hw(struct niu *np)
 5938{
 5939	int i, err;
 5940
 5941	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5942	niu_txc_enable_port(np, 1);
 5943	niu_txc_port_dma_enable(np, 1);
 5944	niu_txc_set_imask(np, 0);
 5945
 5946	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5947	for (i = 0; i < np->num_tx_rings; i++) {
 5948		struct tx_ring_info *rp = &np->tx_rings[i];
 5949
 5950		err = niu_init_one_tx_channel(np, rp);
 5951		if (err)
 5952			return err;
 5953	}
 5954
 5955	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5956	err = niu_init_rx_channels(np);
 5957	if (err)
 5958		goto out_uninit_tx_channels;
 5959
 5960	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5961	err = niu_init_classifier_hw(np);
 5962	if (err)
 5963		goto out_uninit_rx_channels;
 5964
 5965	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5966	err = niu_init_zcp(np);
 5967	if (err)
 5968		goto out_uninit_rx_channels;
 5969
 5970	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5971	err = niu_init_ipp(np);
 5972	if (err)
 5973		goto out_uninit_rx_channels;
 5974
 5975	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5976	err = niu_init_mac(np);
 5977	if (err)
 5978		goto out_uninit_ipp;
 5979
 5980	return 0;
 5981
 5982out_uninit_ipp:
 5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5984	niu_disable_ipp(np);
 5985
 5986out_uninit_rx_channels:
 5987	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 5988	niu_stop_rx_channels(np);
 5989	niu_reset_rx_channels(np);
 5990
 5991out_uninit_tx_channels:
 5992	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 5993	niu_stop_tx_channels(np);
 5994	niu_reset_tx_channels(np);
 5995
 5996	return err;
 5997}
 5998
 5999static void niu_stop_hw(struct niu *np)
 6000{
 6001	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 6002	niu_enable_interrupts(np, 0);
 6003
 6004	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 6005	niu_enable_rx_mac(np, 0);
 6006
 6007	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 6008	niu_disable_ipp(np);
 6009
 6010	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 6011	niu_stop_tx_channels(np);
 6012
 6013	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 6014	niu_stop_rx_channels(np);
 6015
 6016	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 6017	niu_reset_tx_channels(np);
 6018
 6019	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6020	niu_reset_rx_channels(np);
 6021}
 6022
 6023static void niu_set_irq_name(struct niu *np)
 6024{
 6025	int port = np->port;
 6026	int i, j = 1;
 6027
 6028	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6029
 6030	if (port == 0) {
 6031		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6032		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6033		j = 3;
 6034	}
 6035
 6036	for (i = 0; i < np->num_ldg - j; i++) {
 6037		if (i < np->num_rx_rings)
 6038			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6039				np->dev->name, i);
 6040		else if (i < np->num_tx_rings + np->num_rx_rings)
 6041			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6042				i - np->num_rx_rings);
 6043	}
 6044}
 6045
 6046static int niu_request_irq(struct niu *np)
 6047{
 6048	int i, j, err;
 6049
 6050	niu_set_irq_name(np);
 6051
 6052	err = 0;
 6053	for (i = 0; i < np->num_ldg; i++) {
 6054		struct niu_ldg *lp = &np->ldg[i];
 6055
 6056		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6057				  np->irq_name[i], lp);
 6058		if (err)
 6059			goto out_free_irqs;
 6060
 6061	}
 6062
 6063	return 0;
 6064
 6065out_free_irqs:
 6066	for (j = 0; j < i; j++) {
 6067		struct niu_ldg *lp = &np->ldg[j];
 6068
 6069		free_irq(lp->irq, lp);
 6070	}
 6071	return err;
 6072}
 6073
 6074static void niu_free_irq(struct niu *np)
 6075{
 6076	int i;
 6077
 6078	for (i = 0; i < np->num_ldg; i++) {
 6079		struct niu_ldg *lp = &np->ldg[i];
 6080
 6081		free_irq(lp->irq, lp);
 6082	}
 6083}
 6084
 6085static void niu_enable_napi(struct niu *np)
 6086{
 6087	int i;
 6088
 6089	for (i = 0; i < np->num_ldg; i++)
 6090		napi_enable(&np->ldg[i].napi);
 6091}
 6092
 6093static void niu_disable_napi(struct niu *np)
 6094{
 6095	int i;
 6096
 6097	for (i = 0; i < np->num_ldg; i++)
 6098		napi_disable(&np->ldg[i].napi);
 6099}
 6100
 6101static int niu_open(struct net_device *dev)
 6102{
 6103	struct niu *np = netdev_priv(dev);
 6104	int err;
 6105
 6106	netif_carrier_off(dev);
 6107
 6108	err = niu_alloc_channels(np);
 6109	if (err)
 6110		goto out_err;
 6111
 6112	err = niu_enable_interrupts(np, 0);
 6113	if (err)
 6114		goto out_free_channels;
 6115
 6116	err = niu_request_irq(np);
 6117	if (err)
 6118		goto out_free_channels;
 6119
 6120	niu_enable_napi(np);
 6121
 6122	spin_lock_irq(&np->lock);
 6123
 6124	err = niu_init_hw(np);
 6125	if (!err) {
 6126		timer_setup(&np->timer, niu_timer, 0);
 6127		np->timer.expires = jiffies + HZ;
 6128
 6129		err = niu_enable_interrupts(np, 1);
 6130		if (err)
 6131			niu_stop_hw(np);
 6132	}
 6133
 6134	spin_unlock_irq(&np->lock);
 6135
 6136	if (err) {
 6137		niu_disable_napi(np);
 6138		goto out_free_irq;
 6139	}
 6140
 6141	netif_tx_start_all_queues(dev);
 6142
 6143	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6144		netif_carrier_on(dev);
 6145
 6146	add_timer(&np->timer);
 6147
 6148	return 0;
 6149
 6150out_free_irq:
 6151	niu_free_irq(np);
 6152
 6153out_free_channels:
 6154	niu_free_channels(np);
 6155
 6156out_err:
 6157	return err;
 6158}
 6159
 6160static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6161{
 6162	cancel_work_sync(&np->reset_task);
 6163
 6164	niu_disable_napi(np);
 6165	netif_tx_stop_all_queues(dev);
 6166
 6167	del_timer_sync(&np->timer);
 6168
 6169	spin_lock_irq(&np->lock);
 6170
 6171	niu_stop_hw(np);
 6172
 6173	spin_unlock_irq(&np->lock);
 6174}
 6175
 6176static int niu_close(struct net_device *dev)
 6177{
 6178	struct niu *np = netdev_priv(dev);
 6179
 6180	niu_full_shutdown(np, dev);
 6181
 6182	niu_free_irq(np);
 6183
 6184	niu_free_channels(np);
 6185
 6186	niu_handle_led(np, 0);
 6187
 6188	return 0;
 6189}
 6190
 6191static void niu_sync_xmac_stats(struct niu *np)
 6192{
 6193	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6194
 6195	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6196	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6197
 6198	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6199	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6200	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6201	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6202	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6203	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6204	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6205	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6206	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6207	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6208	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6209	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6210	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6211	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6212	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6213	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6214}
 6215
 6216static void niu_sync_bmac_stats(struct niu *np)
 6217{
 6218	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6219
 6220	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6221	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6222
 6223	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6224	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6225	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6226	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6227}
 6228
 6229static void niu_sync_mac_stats(struct niu *np)
 6230{
 6231	if (np->flags & NIU_FLAGS_XMAC)
 6232		niu_sync_xmac_stats(np);
 6233	else
 6234		niu_sync_bmac_stats(np);
 6235}
 6236
 6237static void niu_get_rx_stats(struct niu *np,
 6238			     struct rtnl_link_stats64 *stats)
 6239{
 6240	u64 pkts, dropped, errors, bytes;
 6241	struct rx_ring_info *rx_rings;
 6242	int i;
 6243
 6244	pkts = dropped = errors = bytes = 0;
 6245
 6246	rx_rings = READ_ONCE(np->rx_rings);
 6247	if (!rx_rings)
 6248		goto no_rings;
 6249
 6250	for (i = 0; i < np->num_rx_rings; i++) {
 6251		struct rx_ring_info *rp = &rx_rings[i];
 6252
 6253		niu_sync_rx_discard_stats(np, rp, 0);
 6254
 6255		pkts += rp->rx_packets;
 6256		bytes += rp->rx_bytes;
 6257		dropped += rp->rx_dropped;
 6258		errors += rp->rx_errors;
 6259	}
 6260
 6261no_rings:
 6262	stats->rx_packets = pkts;
 6263	stats->rx_bytes = bytes;
 6264	stats->rx_dropped = dropped;
 6265	stats->rx_errors = errors;
 6266}
 6267
 6268static void niu_get_tx_stats(struct niu *np,
 6269			     struct rtnl_link_stats64 *stats)
 6270{
 6271	u64 pkts, errors, bytes;
 6272	struct tx_ring_info *tx_rings;
 6273	int i;
 6274
 6275	pkts = errors = bytes = 0;
 6276
 6277	tx_rings = READ_ONCE(np->tx_rings);
 6278	if (!tx_rings)
 6279		goto no_rings;
 6280
 6281	for (i = 0; i < np->num_tx_rings; i++) {
 6282		struct tx_ring_info *rp = &tx_rings[i];
 6283
 6284		pkts += rp->tx_packets;
 6285		bytes += rp->tx_bytes;
 6286		errors += rp->tx_errors;
 6287	}
 6288
 6289no_rings:
 6290	stats->tx_packets = pkts;
 6291	stats->tx_bytes = bytes;
 6292	stats->tx_errors = errors;
 6293}
 6294
 6295static void niu_get_stats(struct net_device *dev,
 6296			  struct rtnl_link_stats64 *stats)
 6297{
 6298	struct niu *np = netdev_priv(dev);
 6299
 6300	if (netif_running(dev)) {
 6301		niu_get_rx_stats(np, stats);
 6302		niu_get_tx_stats(np, stats);
 6303	}
 6304}
 6305
 6306static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6307{
 6308	int i;
 6309
 6310	for (i = 0; i < 16; i++)
 6311		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6312}
 6313
 6314static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6315{
 6316	int i;
 6317
 6318	for (i = 0; i < 16; i++)
 6319		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6320}
 6321
 6322static void niu_load_hash(struct niu *np, u16 *hash)
 6323{
 6324	if (np->flags & NIU_FLAGS_XMAC)
 6325		niu_load_hash_xmac(np, hash);
 6326	else
 6327		niu_load_hash_bmac(np, hash);
 6328}
 6329
 6330static void niu_set_rx_mode(struct net_device *dev)
 6331{
 6332	struct niu *np = netdev_priv(dev);
 6333	int i, alt_cnt, err;
 6334	struct netdev_hw_addr *ha;
 6335	unsigned long flags;
 6336	u16 hash[16] = { 0, };
 6337
 6338	spin_lock_irqsave(&np->lock, flags);
 6339	niu_enable_rx_mac(np, 0);
 6340
 6341	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6342	if (dev->flags & IFF_PROMISC)
 6343		np->flags |= NIU_FLAGS_PROMISC;
 6344	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6345		np->flags |= NIU_FLAGS_MCAST;
 6346
 6347	alt_cnt = netdev_uc_count(dev);
 6348	if (alt_cnt > niu_num_alt_addr(np)) {
 6349		alt_cnt = 0;
 6350		np->flags |= NIU_FLAGS_PROMISC;
 6351	}
 6352
 6353	if (alt_cnt) {
 6354		int index = 0;
 6355
 6356		netdev_for_each_uc_addr(ha, dev) {
 6357			err = niu_set_alt_mac(np, index, ha->addr);
 6358			if (err)
 6359				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6360					    err, index);
 6361			err = niu_enable_alt_mac(np, index, 1);
 6362			if (err)
 6363				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6364					    err, index);
 6365
 6366			index++;
 6367		}
 6368	} else {
 6369		int alt_start;
 6370		if (np->flags & NIU_FLAGS_XMAC)
 6371			alt_start = 0;
 6372		else
 6373			alt_start = 1;
 6374		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6375			err = niu_enable_alt_mac(np, i, 0);
 6376			if (err)
 6377				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6378					    err, i);
 6379		}
 6380	}
 6381	if (dev->flags & IFF_ALLMULTI) {
 6382		for (i = 0; i < 16; i++)
 6383			hash[i] = 0xffff;
 6384	} else if (!netdev_mc_empty(dev)) {
 6385		netdev_for_each_mc_addr(ha, dev) {
 6386			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6387
 6388			crc >>= 24;
 6389			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6390		}
 6391	}
 6392
 6393	if (np->flags & NIU_FLAGS_MCAST)
 6394		niu_load_hash(np, hash);
 6395
 6396	niu_enable_rx_mac(np, 1);
 6397	spin_unlock_irqrestore(&np->lock, flags);
 6398}
 6399
 6400static int niu_set_mac_addr(struct net_device *dev, void *p)
 6401{
 6402	struct niu *np = netdev_priv(dev);
 6403	struct sockaddr *addr = p;
 6404	unsigned long flags;
 6405
 6406	if (!is_valid_ether_addr(addr->sa_data))
 6407		return -EADDRNOTAVAIL;
 6408
 6409	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6410
 6411	if (!netif_running(dev))
 6412		return 0;
 6413
 6414	spin_lock_irqsave(&np->lock, flags);
 6415	niu_enable_rx_mac(np, 0);
 6416	niu_set_primary_mac(np, dev->dev_addr);
 6417	niu_enable_rx_mac(np, 1);
 6418	spin_unlock_irqrestore(&np->lock, flags);
 6419
 6420	return 0;
 6421}
 6422
 6423static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6424{
 6425	return -EOPNOTSUPP;
 6426}
 6427
 6428static void niu_netif_stop(struct niu *np)
 6429{
 6430	netif_trans_update(np->dev);	/* prevent tx timeout */
 6431
 6432	niu_disable_napi(np);
 6433
 6434	netif_tx_disable(np->dev);
 6435}
 6436
 6437static void niu_netif_start(struct niu *np)
 6438{
 6439	/* NOTE: unconditional netif_wake_queue is only appropriate
 6440	 * so long as all callers are assured to have free tx slots
 6441	 * (such as after niu_init_hw).
 6442	 */
 6443	netif_tx_wake_all_queues(np->dev);
 6444
 6445	niu_enable_napi(np);
 6446
 6447	niu_enable_interrupts(np, 1);
 6448}
 6449
 6450static void niu_reset_buffers(struct niu *np)
 6451{
 6452	int i, j, k, err;
 6453
 6454	if (np->rx_rings) {
 6455		for (i = 0; i < np->num_rx_rings; i++) {
 6456			struct rx_ring_info *rp = &np->rx_rings[i];
 6457
 6458			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6459				struct page *page;
 6460
 6461				page = rp->rxhash[j];
 6462				while (page) {
 6463					struct page *next =
 6464						(struct page *) page->mapping;
 6465					u64 base = page->index;
 6466					base = base >> RBR_DESCR_ADDR_SHIFT;
 6467					rp->rbr[k++] = cpu_to_le32(base);
 6468					page = next;
 6469				}
 6470			}
 6471			for (; k < MAX_RBR_RING_SIZE; k++) {
 6472				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6473				if (unlikely(err))
 6474					break;
 6475			}
 6476
 6477			rp->rbr_index = rp->rbr_table_size - 1;
 6478			rp->rcr_index = 0;
 6479			rp->rbr_pending = 0;
 6480			rp->rbr_refill_pending = 0;
 6481		}
 6482	}
 6483	if (np->tx_rings) {
 6484		for (i = 0; i < np->num_tx_rings; i++) {
 6485			struct tx_ring_info *rp = &np->tx_rings[i];
 6486
 6487			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6488				if (rp->tx_buffs[j].skb)
 6489					(void) release_tx_packet(np, rp, j);
 6490			}
 6491
 6492			rp->pending = MAX_TX_RING_SIZE;
 6493			rp->prod = 0;
 6494			rp->cons = 0;
 6495			rp->wrap_bit = 0;
 6496		}
 6497	}
 6498}
 6499
 6500static void niu_reset_task(struct work_struct *work)
 6501{
 6502	struct niu *np = container_of(work, struct niu, reset_task);
 6503	unsigned long flags;
 6504	int err;
 6505
 6506	spin_lock_irqsave(&np->lock, flags);
 6507	if (!netif_running(np->dev)) {
 6508		spin_unlock_irqrestore(&np->lock, flags);
 6509		return;
 6510	}
 6511
 6512	spin_unlock_irqrestore(&np->lock, flags);
 6513
 6514	del_timer_sync(&np->timer);
 6515
 6516	niu_netif_stop(np);
 6517
 6518	spin_lock_irqsave(&np->lock, flags);
 6519
 6520	niu_stop_hw(np);
 6521
 6522	spin_unlock_irqrestore(&np->lock, flags);
 6523
 6524	niu_reset_buffers(np);
 6525
 6526	spin_lock_irqsave(&np->lock, flags);
 6527
 6528	err = niu_init_hw(np);
 6529	if (!err) {
 6530		np->timer.expires = jiffies + HZ;
 6531		add_timer(&np->timer);
 6532		niu_netif_start(np);
 6533	}
 6534
 6535	spin_unlock_irqrestore(&np->lock, flags);
 6536}
 6537
 6538static void niu_tx_timeout(struct net_device *dev)
 6539{
 6540	struct niu *np = netdev_priv(dev);
 6541
 6542	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6543		dev->name);
 6544
 6545	schedule_work(&np->reset_task);
 6546}
 6547
 6548static void niu_set_txd(struct tx_ring_info *rp, int index,
 6549			u64 mapping, u64 len, u64 mark,
 6550			u64 n_frags)
 6551{
 6552	__le64 *desc = &rp->descr[index];
 6553
 6554	*desc = cpu_to_le64(mark |
 6555			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6556			    (len << TX_DESC_TR_LEN_SHIFT) |
 6557			    (mapping & TX_DESC_SAD));
 6558}
 6559
 6560static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6561				u64 pad_bytes, u64 len)
 6562{
 6563	u16 eth_proto, eth_proto_inner;
 6564	u64 csum_bits, l3off, ihl, ret;
 6565	u8 ip_proto;
 6566	int ipv6;
 6567
 6568	eth_proto = be16_to_cpu(ehdr->h_proto);
 6569	eth_proto_inner = eth_proto;
 6570	if (eth_proto == ETH_P_8021Q) {
 6571		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6572		__be16 val = vp->h_vlan_encapsulated_proto;
 6573
 6574		eth_proto_inner = be16_to_cpu(val);
 6575	}
 6576
 6577	ipv6 = ihl = 0;
 6578	switch (skb->protocol) {
 6579	case cpu_to_be16(ETH_P_IP):
 6580		ip_proto = ip_hdr(skb)->protocol;
 6581		ihl = ip_hdr(skb)->ihl;
 6582		break;
 6583	case cpu_to_be16(ETH_P_IPV6):
 6584		ip_proto = ipv6_hdr(skb)->nexthdr;
 6585		ihl = (40 >> 2);
 6586		ipv6 = 1;
 6587		break;
 6588	default:
 6589		ip_proto = ihl = 0;
 6590		break;
 6591	}
 6592
 6593	csum_bits = TXHDR_CSUM_NONE;
 6594	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6595		u64 start, stuff;
 6596
 6597		csum_bits = (ip_proto == IPPROTO_TCP ?
 6598			     TXHDR_CSUM_TCP :
 6599			     (ip_proto == IPPROTO_UDP ?
 6600			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6601
 6602		start = skb_checksum_start_offset(skb) -
 6603			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6604		stuff = start + skb->csum_offset;
 6605
 6606		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6607		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6608	}
 6609
 6610	l3off = skb_network_offset(skb) -
 6611		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6612
 6613	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6614	       (len << TXHDR_LEN_SHIFT) |
 6615	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6616	       (ihl << TXHDR_IHL_SHIFT) |
 6617	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
 6618	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6619	       (ipv6 ? TXHDR_IP_VER : 0) |
 6620	       csum_bits);
 6621
 6622	return ret;
 6623}
 6624
 6625static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6626				  struct net_device *dev)
 6627{
 6628	struct niu *np = netdev_priv(dev);
 6629	unsigned long align, headroom;
 6630	struct netdev_queue *txq;
 6631	struct tx_ring_info *rp;
 6632	struct tx_pkt_hdr *tp;
 6633	unsigned int len, nfg;
 6634	struct ethhdr *ehdr;
 6635	int prod, i, tlen;
 6636	u64 mapping, mrk;
 6637
 6638	i = skb_get_queue_mapping(skb);
 6639	rp = &np->tx_rings[i];
 6640	txq = netdev_get_tx_queue(dev, i);
 6641
 6642	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6643		netif_tx_stop_queue(txq);
 6644		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6645		rp->tx_errors++;
 6646		return NETDEV_TX_BUSY;
 6647	}
 6648
 6649	if (eth_skb_pad(skb))
 6650		goto out;
 6651
 6652	len = sizeof(struct tx_pkt_hdr) + 15;
 6653	if (skb_headroom(skb) < len) {
 6654		struct sk_buff *skb_new;
 6655
 6656		skb_new = skb_realloc_headroom(skb, len);
 6657		if (!skb_new)
 6658			goto out_drop;
 6659		kfree_skb(skb);
 6660		skb = skb_new;
 6661	} else
 6662		skb_orphan(skb);
 6663
 6664	align = ((unsigned long) skb->data & (16 - 1));
 6665	headroom = align + sizeof(struct tx_pkt_hdr);
 6666
 6667	ehdr = (struct ethhdr *) skb->data;
 6668	tp = skb_push(skb, headroom);
 6669
 6670	len = skb->len - sizeof(struct tx_pkt_hdr);
 6671	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6672	tp->resv = 0;
 6673
 6674	len = skb_headlen(skb);
 6675	mapping = np->ops->map_single(np->device, skb->data,
 6676				      len, DMA_TO_DEVICE);
 6677
 6678	prod = rp->prod;
 6679
 6680	rp->tx_buffs[prod].skb = skb;
 6681	rp->tx_buffs[prod].mapping = mapping;
 6682
 6683	mrk = TX_DESC_SOP;
 6684	if (++rp->mark_counter == rp->mark_freq) {
 6685		rp->mark_counter = 0;
 6686		mrk |= TX_DESC_MARK;
 6687		rp->mark_pending++;
 6688	}
 6689
 6690	tlen = len;
 6691	nfg = skb_shinfo(skb)->nr_frags;
 6692	while (tlen > 0) {
 6693		tlen -= MAX_TX_DESC_LEN;
 6694		nfg++;
 6695	}
 6696
 6697	while (len > 0) {
 6698		unsigned int this_len = len;
 6699
 6700		if (this_len > MAX_TX_DESC_LEN)
 6701			this_len = MAX_TX_DESC_LEN;
 6702
 6703		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6704		mrk = nfg = 0;
 6705
 6706		prod = NEXT_TX(rp, prod);
 6707		mapping += this_len;
 6708		len -= this_len;
 6709	}
 6710
 6711	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6712		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6713
 6714		len = skb_frag_size(frag);
 6715		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
 6716					    frag->page_offset, len,
 6717					    DMA_TO_DEVICE);
 6718
 6719		rp->tx_buffs[prod].skb = NULL;
 6720		rp->tx_buffs[prod].mapping = mapping;
 6721
 6722		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6723
 6724		prod = NEXT_TX(rp, prod);
 6725	}
 6726
 6727	if (prod < rp->prod)
 6728		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6729	rp->prod = prod;
 6730
 6731	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6732
 6733	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6734		netif_tx_stop_queue(txq);
 6735		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6736			netif_tx_wake_queue(txq);
 6737	}
 6738
 6739out:
 6740	return NETDEV_TX_OK;
 6741
 6742out_drop:
 6743	rp->tx_errors++;
 6744	kfree_skb(skb);
 6745	goto out;
 6746}
 6747
 6748static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6749{
 6750	struct niu *np = netdev_priv(dev);
 6751	int err, orig_jumbo, new_jumbo;
 6752
 6753	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6754	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6755
 6756	dev->mtu = new_mtu;
 6757
 6758	if (!netif_running(dev) ||
 6759	    (orig_jumbo == new_jumbo))
 6760		return 0;
 6761
 6762	niu_full_shutdown(np, dev);
 6763
 6764	niu_free_channels(np);
 6765
 6766	niu_enable_napi(np);
 6767
 6768	err = niu_alloc_channels(np);
 6769	if (err)
 6770		return err;
 6771
 6772	spin_lock_irq(&np->lock);
 6773
 6774	err = niu_init_hw(np);
 6775	if (!err) {
 6776		timer_setup(&np->timer, niu_timer, 0);
 6777		np->timer.expires = jiffies + HZ;
 6778
 6779		err = niu_enable_interrupts(np, 1);
 6780		if (err)
 6781			niu_stop_hw(np);
 6782	}
 6783
 6784	spin_unlock_irq(&np->lock);
 6785
 6786	if (!err) {
 6787		netif_tx_start_all_queues(dev);
 6788		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6789			netif_carrier_on(dev);
 6790
 6791		add_timer(&np->timer);
 6792	}
 6793
 6794	return err;
 6795}
 6796
 6797static void niu_get_drvinfo(struct net_device *dev,
 6798			    struct ethtool_drvinfo *info)
 6799{
 6800	struct niu *np = netdev_priv(dev);
 6801	struct niu_vpd *vpd = &np->vpd;
 6802
 6803	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 6804	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 6805	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 6806		vpd->fcode_major, vpd->fcode_minor);
 6807	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6808		strlcpy(info->bus_info, pci_name(np->pdev),
 6809			sizeof(info->bus_info));
 6810}
 6811
 6812static int niu_get_link_ksettings(struct net_device *dev,
 6813				  struct ethtool_link_ksettings *cmd)
 6814{
 6815	struct niu *np = netdev_priv(dev);
 6816	struct niu_link_config *lp;
 6817
 6818	lp = &np->link_config;
 6819
 6820	memset(cmd, 0, sizeof(*cmd));
 6821	cmd->base.phy_address = np->phy_addr;
 6822	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 6823						lp->supported);
 6824	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 6825						lp->active_advertising);
 6826	cmd->base.autoneg = lp->active_autoneg;
 6827	cmd->base.speed = lp->active_speed;
 6828	cmd->base.duplex = lp->active_duplex;
 6829	cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6830
 6831	return 0;
 6832}
 6833
 6834static int niu_set_link_ksettings(struct net_device *dev,
 6835				  const struct ethtool_link_ksettings *cmd)
 6836{
 6837	struct niu *np = netdev_priv(dev);
 6838	struct niu_link_config *lp = &np->link_config;
 6839
 6840	ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
 6841						cmd->link_modes.advertising);
 6842	lp->speed = cmd->base.speed;
 6843	lp->duplex = cmd->base.duplex;
 6844	lp->autoneg = cmd->base.autoneg;
 6845	return niu_init_link(np);
 6846}
 6847
 6848static u32 niu_get_msglevel(struct net_device *dev)
 6849{
 6850	struct niu *np = netdev_priv(dev);
 6851	return np->msg_enable;
 6852}
 6853
 6854static void niu_set_msglevel(struct net_device *dev, u32 value)
 6855{
 6856	struct niu *np = netdev_priv(dev);
 6857	np->msg_enable = value;
 6858}
 6859
 6860static int niu_nway_reset(struct net_device *dev)
 6861{
 6862	struct niu *np = netdev_priv(dev);
 6863
 6864	if (np->link_config.autoneg)
 6865		return niu_init_link(np);
 6866
 6867	return 0;
 6868}
 6869
 6870static int niu_get_eeprom_len(struct net_device *dev)
 6871{
 6872	struct niu *np = netdev_priv(dev);
 6873
 6874	return np->eeprom_len;
 6875}
 6876
 6877static int niu_get_eeprom(struct net_device *dev,
 6878			  struct ethtool_eeprom *eeprom, u8 *data)
 6879{
 6880	struct niu *np = netdev_priv(dev);
 6881	u32 offset, len, val;
 6882
 6883	offset = eeprom->offset;
 6884	len = eeprom->len;
 6885
 6886	if (offset + len < offset)
 6887		return -EINVAL;
 6888	if (offset >= np->eeprom_len)
 6889		return -EINVAL;
 6890	if (offset + len > np->eeprom_len)
 6891		len = eeprom->len = np->eeprom_len - offset;
 6892
 6893	if (offset & 3) {
 6894		u32 b_offset, b_count;
 6895
 6896		b_offset = offset & 3;
 6897		b_count = 4 - b_offset;
 6898		if (b_count > len)
 6899			b_count = len;
 6900
 6901		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6902		memcpy(data, ((char *)&val) + b_offset, b_count);
 6903		data += b_count;
 6904		len -= b_count;
 6905		offset += b_count;
 6906	}
 6907	while (len >= 4) {
 6908		val = nr64(ESPC_NCR(offset / 4));
 6909		memcpy(data, &val, 4);
 6910		data += 4;
 6911		len -= 4;
 6912		offset += 4;
 6913	}
 6914	if (len) {
 6915		val = nr64(ESPC_NCR(offset / 4));
 6916		memcpy(data, &val, len);
 6917	}
 6918	return 0;
 6919}
 6920
 6921static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6922{
 6923	switch (flow_type) {
 6924	case TCP_V4_FLOW:
 6925	case TCP_V6_FLOW:
 6926		*pid = IPPROTO_TCP;
 6927		break;
 6928	case UDP_V4_FLOW:
 6929	case UDP_V6_FLOW:
 6930		*pid = IPPROTO_UDP;
 6931		break;
 6932	case SCTP_V4_FLOW:
 6933	case SCTP_V6_FLOW:
 6934		*pid = IPPROTO_SCTP;
 6935		break;
 6936	case AH_V4_FLOW:
 6937	case AH_V6_FLOW:
 6938		*pid = IPPROTO_AH;
 6939		break;
 6940	case ESP_V4_FLOW:
 6941	case ESP_V6_FLOW:
 6942		*pid = IPPROTO_ESP;
 6943		break;
 6944	default:
 6945		*pid = 0;
 6946		break;
 6947	}
 6948}
 6949
 6950static int niu_class_to_ethflow(u64 class, int *flow_type)
 6951{
 6952	switch (class) {
 6953	case CLASS_CODE_TCP_IPV4:
 6954		*flow_type = TCP_V4_FLOW;
 6955		break;
 6956	case CLASS_CODE_UDP_IPV4:
 6957		*flow_type = UDP_V4_FLOW;
 6958		break;
 6959	case CLASS_CODE_AH_ESP_IPV4:
 6960		*flow_type = AH_V4_FLOW;
 6961		break;
 6962	case CLASS_CODE_SCTP_IPV4:
 6963		*flow_type = SCTP_V4_FLOW;
 6964		break;
 6965	case CLASS_CODE_TCP_IPV6:
 6966		*flow_type = TCP_V6_FLOW;
 6967		break;
 6968	case CLASS_CODE_UDP_IPV6:
 6969		*flow_type = UDP_V6_FLOW;
 6970		break;
 6971	case CLASS_CODE_AH_ESP_IPV6:
 6972		*flow_type = AH_V6_FLOW;
 6973		break;
 6974	case CLASS_CODE_SCTP_IPV6:
 6975		*flow_type = SCTP_V6_FLOW;
 6976		break;
 6977	case CLASS_CODE_USER_PROG1:
 6978	case CLASS_CODE_USER_PROG2:
 6979	case CLASS_CODE_USER_PROG3:
 6980	case CLASS_CODE_USER_PROG4:
 6981		*flow_type = IP_USER_FLOW;
 6982		break;
 6983	default:
 6984		return -EINVAL;
 6985	}
 6986
 6987	return 0;
 6988}
 6989
 6990static int niu_ethflow_to_class(int flow_type, u64 *class)
 6991{
 6992	switch (flow_type) {
 6993	case TCP_V4_FLOW:
 6994		*class = CLASS_CODE_TCP_IPV4;
 6995		break;
 6996	case UDP_V4_FLOW:
 6997		*class = CLASS_CODE_UDP_IPV4;
 6998		break;
 6999	case AH_ESP_V4_FLOW:
 7000	case AH_V4_FLOW:
 7001	case ESP_V4_FLOW:
 7002		*class = CLASS_CODE_AH_ESP_IPV4;
 7003		break;
 7004	case SCTP_V4_FLOW:
 7005		*class = CLASS_CODE_SCTP_IPV4;
 7006		break;
 7007	case TCP_V6_FLOW:
 7008		*class = CLASS_CODE_TCP_IPV6;
 7009		break;
 7010	case UDP_V6_FLOW:
 7011		*class = CLASS_CODE_UDP_IPV6;
 7012		break;
 7013	case AH_ESP_V6_FLOW:
 7014	case AH_V6_FLOW:
 7015	case ESP_V6_FLOW:
 7016		*class = CLASS_CODE_AH_ESP_IPV6;
 7017		break;
 7018	case SCTP_V6_FLOW:
 7019		*class = CLASS_CODE_SCTP_IPV6;
 7020		break;
 7021	default:
 7022		return 0;
 7023	}
 7024
 7025	return 1;
 7026}
 7027
 7028static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7029{
 7030	u64 ethflow = 0;
 7031
 7032	if (flow_key & FLOW_KEY_L2DA)
 7033		ethflow |= RXH_L2DA;
 7034	if (flow_key & FLOW_KEY_VLAN)
 7035		ethflow |= RXH_VLAN;
 7036	if (flow_key & FLOW_KEY_IPSA)
 7037		ethflow |= RXH_IP_SRC;
 7038	if (flow_key & FLOW_KEY_IPDA)
 7039		ethflow |= RXH_IP_DST;
 7040	if (flow_key & FLOW_KEY_PROTO)
 7041		ethflow |= RXH_L3_PROTO;
 7042	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7043		ethflow |= RXH_L4_B_0_1;
 7044	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7045		ethflow |= RXH_L4_B_2_3;
 7046
 7047	return ethflow;
 7048
 7049}
 7050
 7051static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7052{
 7053	u64 key = 0;
 7054
 7055	if (ethflow & RXH_L2DA)
 7056		key |= FLOW_KEY_L2DA;
 7057	if (ethflow & RXH_VLAN)
 7058		key |= FLOW_KEY_VLAN;
 7059	if (ethflow & RXH_IP_SRC)
 7060		key |= FLOW_KEY_IPSA;
 7061	if (ethflow & RXH_IP_DST)
 7062		key |= FLOW_KEY_IPDA;
 7063	if (ethflow & RXH_L3_PROTO)
 7064		key |= FLOW_KEY_PROTO;
 7065	if (ethflow & RXH_L4_B_0_1)
 7066		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7067	if (ethflow & RXH_L4_B_2_3)
 7068		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7069
 7070	*flow_key = key;
 7071
 7072	return 1;
 7073
 7074}
 7075
 7076static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7077{
 7078	u64 class;
 7079
 7080	nfc->data = 0;
 7081
 7082	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7083		return -EINVAL;
 7084
 7085	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7086	    TCAM_KEY_DISC)
 7087		nfc->data = RXH_DISCARD;
 7088	else
 7089		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7090						      CLASS_CODE_USER_PROG1]);
 7091	return 0;
 7092}
 7093
 7094static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7095					struct ethtool_rx_flow_spec *fsp)
 7096{
 7097	u32 tmp;
 7098	u16 prt;
 7099
 7100	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7101	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7102
 7103	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7104	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7105
 7106	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7107	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7108
 7109	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7110	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7111
 7112	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7113		TCAM_V4KEY2_TOS_SHIFT;
 7114	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7115		TCAM_V4KEY2_TOS_SHIFT;
 7116
 7117	switch (fsp->flow_type) {
 7118	case TCP_V4_FLOW:
 7119	case UDP_V4_FLOW:
 7120	case SCTP_V4_FLOW:
 7121		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7122			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7123		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7124
 7125		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7126			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7127		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7128
 7129		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7130			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7131		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7132
 7133		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7134			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7135		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7136		break;
 7137	case AH_V4_FLOW:
 7138	case ESP_V4_FLOW:
 7139		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7140			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7141		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7142
 7143		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7144			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7145		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7146		break;
 7147	case IP_USER_FLOW:
 7148		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7149			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7150		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7151
 7152		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7153			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7154		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7155
 7156		fsp->h_u.usr_ip4_spec.proto =
 7157			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7158			TCAM_V4KEY2_PROTO_SHIFT;
 7159		fsp->m_u.usr_ip4_spec.proto =
 7160			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7161			TCAM_V4KEY2_PROTO_SHIFT;
 7162
 7163		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7164		break;
 7165	default:
 7166		break;
 7167	}
 7168}
 7169
 7170static int niu_get_ethtool_tcam_entry(struct niu *np,
 7171				      struct ethtool_rxnfc *nfc)
 7172{
 7173	struct niu_parent *parent = np->parent;
 7174	struct niu_tcam_entry *tp;
 7175	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7176	u16 idx;
 7177	u64 class;
 7178	int ret = 0;
 7179
 7180	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7181
 7182	tp = &parent->tcam[idx];
 7183	if (!tp->valid) {
 7184		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7185			    parent->index, (u16)nfc->fs.location, idx);
 7186		return -EINVAL;
 7187	}
 7188
 7189	/* fill the flow spec entry */
 7190	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7191		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7192	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7193	if (ret < 0) {
 7194		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7195			    parent->index);
 7196		goto out;
 7197	}
 7198
 7199	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7200		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7201			TCAM_V4KEY2_PROTO_SHIFT;
 7202		if (proto == IPPROTO_ESP) {
 7203			if (fsp->flow_type == AH_V4_FLOW)
 7204				fsp->flow_type = ESP_V4_FLOW;
 7205			else
 7206				fsp->flow_type = ESP_V6_FLOW;
 7207		}
 7208	}
 7209
 7210	switch (fsp->flow_type) {
 7211	case TCP_V4_FLOW:
 7212	case UDP_V4_FLOW:
 7213	case SCTP_V4_FLOW:
 7214	case AH_V4_FLOW:
 7215	case ESP_V4_FLOW:
 7216		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7217		break;
 7218	case TCP_V6_FLOW:
 7219	case UDP_V6_FLOW:
 7220	case SCTP_V6_FLOW:
 7221	case AH_V6_FLOW:
 7222	case ESP_V6_FLOW:
 7223		/* Not yet implemented */
 7224		ret = -EINVAL;
 7225		break;
 7226	case IP_USER_FLOW:
 7227		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7228		break;
 7229	default:
 7230		ret = -EINVAL;
 7231		break;
 7232	}
 7233
 7234	if (ret < 0)
 7235		goto out;
 7236
 7237	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7238		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7239	else
 7240		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7241			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7242
 7243	/* put the tcam size here */
 7244	nfc->data = tcam_get_size(np);
 7245out:
 7246	return ret;
 7247}
 7248
 7249static int niu_get_ethtool_tcam_all(struct niu *np,
 7250				    struct ethtool_rxnfc *nfc,
 7251				    u32 *rule_locs)
 7252{
 7253	struct niu_parent *parent = np->parent;
 7254	struct niu_tcam_entry *tp;
 7255	int i, idx, cnt;
 7256	unsigned long flags;
 7257	int ret = 0;
 7258
 7259	/* put the tcam size here */
 7260	nfc->data = tcam_get_size(np);
 7261
 7262	niu_lock_parent(np, flags);
 7263	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7264		idx = tcam_get_index(np, i);
 7265		tp = &parent->tcam[idx];
 7266		if (!tp->valid)
 7267			continue;
 7268		if (cnt == nfc->rule_cnt) {
 7269			ret = -EMSGSIZE;
 7270			break;
 7271		}
 7272		rule_locs[cnt] = i;
 7273		cnt++;
 7274	}
 7275	niu_unlock_parent(np, flags);
 7276
 7277	nfc->rule_cnt = cnt;
 7278
 7279	return ret;
 7280}
 7281
 7282static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7283		       u32 *rule_locs)
 7284{
 7285	struct niu *np = netdev_priv(dev);
 7286	int ret = 0;
 7287
 7288	switch (cmd->cmd) {
 7289	case ETHTOOL_GRXFH:
 7290		ret = niu_get_hash_opts(np, cmd);
 7291		break;
 7292	case ETHTOOL_GRXRINGS:
 7293		cmd->data = np->num_rx_rings;
 7294		break;
 7295	case ETHTOOL_GRXCLSRLCNT:
 7296		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7297		break;
 7298	case ETHTOOL_GRXCLSRULE:
 7299		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7300		break;
 7301	case ETHTOOL_GRXCLSRLALL:
 7302		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
 7303		break;
 7304	default:
 7305		ret = -EINVAL;
 7306		break;
 7307	}
 7308
 7309	return ret;
 7310}
 7311
 7312static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7313{
 7314	u64 class;
 7315	u64 flow_key = 0;
 7316	unsigned long flags;
 7317
 7318	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7319		return -EINVAL;
 7320
 7321	if (class < CLASS_CODE_USER_PROG1 ||
 7322	    class > CLASS_CODE_SCTP_IPV6)
 7323		return -EINVAL;
 7324
 7325	if (nfc->data & RXH_DISCARD) {
 7326		niu_lock_parent(np, flags);
 7327		flow_key = np->parent->tcam_key[class -
 7328					       CLASS_CODE_USER_PROG1];
 7329		flow_key |= TCAM_KEY_DISC;
 7330		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7331		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7332		niu_unlock_parent(np, flags);
 7333		return 0;
 7334	} else {
 7335		/* Discard was set before, but is not set now */
 7336		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7337		    TCAM_KEY_DISC) {
 7338			niu_lock_parent(np, flags);
 7339			flow_key = np->parent->tcam_key[class -
 7340					       CLASS_CODE_USER_PROG1];
 7341			flow_key &= ~TCAM_KEY_DISC;
 7342			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7343			     flow_key);
 7344			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7345				flow_key;
 7346			niu_unlock_parent(np, flags);
 7347		}
 7348	}
 7349
 7350	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7351		return -EINVAL;
 7352
 7353	niu_lock_parent(np, flags);
 7354	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7355	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7356	niu_unlock_parent(np, flags);
 7357
 7358	return 0;
 7359}
 7360
 7361static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7362				       struct niu_tcam_entry *tp,
 7363				       int l2_rdc_tab, u64 class)
 7364{
 7365	u8 pid = 0;
 7366	u32 sip, dip, sipm, dipm, spi, spim;
 7367	u16 sport, dport, spm, dpm;
 7368
 7369	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7370	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7371	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7372	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7373
 7374	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7375	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7376	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7377	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7378
 7379	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7380	tp->key[3] |= dip;
 7381
 7382	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7383	tp->key_mask[3] |= dipm;
 7384
 7385	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7386		       TCAM_V4KEY2_TOS_SHIFT);
 7387	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7388			    TCAM_V4KEY2_TOS_SHIFT);
 7389	switch (fsp->flow_type) {
 7390	case TCP_V4_FLOW:
 7391	case UDP_V4_FLOW:
 7392	case SCTP_V4_FLOW:
 7393		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7394		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7395		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7396		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7397
 7398		tp->key[2] |= (((u64)sport << 16) | dport);
 7399		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7400		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7401		break;
 7402	case AH_V4_FLOW:
 7403	case ESP_V4_FLOW:
 7404		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7405		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7406
 7407		tp->key[2] |= spi;
 7408		tp->key_mask[2] |= spim;
 7409		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7410		break;
 7411	case IP_USER_FLOW:
 7412		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7413		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7414
 7415		tp->key[2] |= spi;
 7416		tp->key_mask[2] |= spim;
 7417		pid = fsp->h_u.usr_ip4_spec.proto;
 7418		break;
 7419	default:
 7420		break;
 7421	}
 7422
 7423	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7424	if (pid) {
 7425		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7426	}
 7427}
 7428
 7429static int niu_add_ethtool_tcam_entry(struct niu *np,
 7430				      struct ethtool_rxnfc *nfc)
 7431{
 7432	struct niu_parent *parent = np->parent;
 7433	struct niu_tcam_entry *tp;
 7434	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7435	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7436	int l2_rdc_table = rdc_table->first_table_num;
 7437	u16 idx;
 7438	u64 class;
 7439	unsigned long flags;
 7440	int err, ret;
 7441
 7442	ret = 0;
 7443
 7444	idx = nfc->fs.location;
 7445	if (idx >= tcam_get_size(np))
 7446		return -EINVAL;
 7447
 7448	if (fsp->flow_type == IP_USER_FLOW) {
 7449		int i;
 7450		int add_usr_cls = 0;
 7451		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7452		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7453
 7454		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7455			return -EINVAL;
 7456
 7457		niu_lock_parent(np, flags);
 7458
 7459		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7460			if (parent->l3_cls[i]) {
 7461				if (uspec->proto == parent->l3_cls_pid[i]) {
 7462					class = parent->l3_cls[i];
 7463					parent->l3_cls_refcnt[i]++;
 7464					add_usr_cls = 1;
 7465					break;
 7466				}
 7467			} else {
 7468				/* Program new user IP class */
 7469				switch (i) {
 7470				case 0:
 7471					class = CLASS_CODE_USER_PROG1;
 7472					break;
 7473				case 1:
 7474					class = CLASS_CODE_USER_PROG2;
 7475					break;
 7476				case 2:
 7477					class = CLASS_CODE_USER_PROG3;
 7478					break;
 7479				case 3:
 7480					class = CLASS_CODE_USER_PROG4;
 7481					break;
 7482				default:
 7483					break;
 7484				}
 7485				ret = tcam_user_ip_class_set(np, class, 0,
 7486							     uspec->proto,
 7487							     uspec->tos,
 7488							     umask->tos);
 7489				if (ret)
 7490					goto out;
 7491
 7492				ret = tcam_user_ip_class_enable(np, class, 1);
 7493				if (ret)
 7494					goto out;
 7495				parent->l3_cls[i] = class;
 7496				parent->l3_cls_pid[i] = uspec->proto;
 7497				parent->l3_cls_refcnt[i]++;
 7498				add_usr_cls = 1;
 7499				break;
 7500			}
 7501		}
 7502		if (!add_usr_cls) {
 7503			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7504				    parent->index, __func__, uspec->proto);
 7505			ret = -EINVAL;
 7506			goto out;
 7507		}
 7508		niu_unlock_parent(np, flags);
 7509	} else {
 7510		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7511			return -EINVAL;
 7512		}
 7513	}
 7514
 7515	niu_lock_parent(np, flags);
 7516
 7517	idx = tcam_get_index(np, idx);
 7518	tp = &parent->tcam[idx];
 7519
 7520	memset(tp, 0, sizeof(*tp));
 7521
 7522	/* fill in the tcam key and mask */
 7523	switch (fsp->flow_type) {
 7524	case TCP_V4_FLOW:
 7525	case UDP_V4_FLOW:
 7526	case SCTP_V4_FLOW:
 7527	case AH_V4_FLOW:
 7528	case ESP_V4_FLOW:
 7529		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7530		break;
 7531	case TCP_V6_FLOW:
 7532	case UDP_V6_FLOW:
 7533	case SCTP_V6_FLOW:
 7534	case AH_V6_FLOW:
 7535	case ESP_V6_FLOW:
 7536		/* Not yet implemented */
 7537		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7538			    parent->index, __func__, fsp->flow_type);
 7539		ret = -EINVAL;
 7540		goto out;
 7541	case IP_USER_FLOW:
 7542		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7543		break;
 7544	default:
 7545		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7546			    parent->index, __func__, fsp->flow_type);
 7547		ret = -EINVAL;
 7548		goto out;
 7549	}
 7550
 7551	/* fill in the assoc data */
 7552	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7553		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7554	} else {
 7555		if (fsp->ring_cookie >= np->num_rx_rings) {
 7556			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7557				    parent->index, __func__,
 7558				    (long long)fsp->ring_cookie);
 7559			ret = -EINVAL;
 7560			goto out;
 7561		}
 7562		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7563				  (fsp->ring_cookie <<
 7564				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7565	}
 7566
 7567	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7568	if (err) {
 7569		ret = -EINVAL;
 7570		goto out;
 7571	}
 7572	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7573	if (err) {
 7574		ret = -EINVAL;
 7575		goto out;
 7576	}
 7577
 7578	/* validate the entry */
 7579	tp->valid = 1;
 7580	np->clas.tcam_valid_entries++;
 7581out:
 7582	niu_unlock_parent(np, flags);
 7583
 7584	return ret;
 7585}
 7586
 7587static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7588{
 7589	struct niu_parent *parent = np->parent;
 7590	struct niu_tcam_entry *tp;
 7591	u16 idx;
 7592	unsigned long flags;
 7593	u64 class;
 7594	int ret = 0;
 7595
 7596	if (loc >= tcam_get_size(np))
 7597		return -EINVAL;
 7598
 7599	niu_lock_parent(np, flags);
 7600
 7601	idx = tcam_get_index(np, loc);
 7602	tp = &parent->tcam[idx];
 7603
 7604	/* if the entry is of a user defined class, then update*/
 7605	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7606		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7607
 7608	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7609		int i;
 7610		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7611			if (parent->l3_cls[i] == class) {
 7612				parent->l3_cls_refcnt[i]--;
 7613				if (!parent->l3_cls_refcnt[i]) {
 7614					/* disable class */
 7615					ret = tcam_user_ip_class_enable(np,
 7616									class,
 7617									0);
 7618					if (ret)
 7619						goto out;
 7620					parent->l3_cls[i] = 0;
 7621					parent->l3_cls_pid[i] = 0;
 7622				}
 7623				break;
 7624			}
 7625		}
 7626		if (i == NIU_L3_PROG_CLS) {
 7627			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7628				    parent->index, __func__,
 7629				    (unsigned long long)class);
 7630			ret = -EINVAL;
 7631			goto out;
 7632		}
 7633	}
 7634
 7635	ret = tcam_flush(np, idx);
 7636	if (ret)
 7637		goto out;
 7638
 7639	/* invalidate the entry */
 7640	tp->valid = 0;
 7641	np->clas.tcam_valid_entries--;
 7642out:
 7643	niu_unlock_parent(np, flags);
 7644
 7645	return ret;
 7646}
 7647
 7648static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7649{
 7650	struct niu *np = netdev_priv(dev);
 7651	int ret = 0;
 7652
 7653	switch (cmd->cmd) {
 7654	case ETHTOOL_SRXFH:
 7655		ret = niu_set_hash_opts(np, cmd);
 7656		break;
 7657	case ETHTOOL_SRXCLSRLINS:
 7658		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7659		break;
 7660	case ETHTOOL_SRXCLSRLDEL:
 7661		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7662		break;
 7663	default:
 7664		ret = -EINVAL;
 7665		break;
 7666	}
 7667
 7668	return ret;
 7669}
 7670
 7671static const struct {
 7672	const char string[ETH_GSTRING_LEN];
 7673} niu_xmac_stat_keys[] = {
 7674	{ "tx_frames" },
 7675	{ "tx_bytes" },
 7676	{ "tx_fifo_errors" },
 7677	{ "tx_overflow_errors" },
 7678	{ "tx_max_pkt_size_errors" },
 7679	{ "tx_underflow_errors" },
 7680	{ "rx_local_faults" },
 7681	{ "rx_remote_faults" },
 7682	{ "rx_link_faults" },
 7683	{ "rx_align_errors" },
 7684	{ "rx_frags" },
 7685	{ "rx_mcasts" },
 7686	{ "rx_bcasts" },
 7687	{ "rx_hist_cnt1" },
 7688	{ "rx_hist_cnt2" },
 7689	{ "rx_hist_cnt3" },
 7690	{ "rx_hist_cnt4" },
 7691	{ "rx_hist_cnt5" },
 7692	{ "rx_hist_cnt6" },
 7693	{ "rx_hist_cnt7" },
 7694	{ "rx_octets" },
 7695	{ "rx_code_violations" },
 7696	{ "rx_len_errors" },
 7697	{ "rx_crc_errors" },
 7698	{ "rx_underflows" },
 7699	{ "rx_overflows" },
 7700	{ "pause_off_state" },
 7701	{ "pause_on_state" },
 7702	{ "pause_received" },
 7703};
 7704
 7705#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7706
 7707static const struct {
 7708	const char string[ETH_GSTRING_LEN];
 7709} niu_bmac_stat_keys[] = {
 7710	{ "tx_underflow_errors" },
 7711	{ "tx_max_pkt_size_errors" },
 7712	{ "tx_bytes" },
 7713	{ "tx_frames" },
 7714	{ "rx_overflows" },
 7715	{ "rx_frames" },
 7716	{ "rx_align_errors" },
 7717	{ "rx_crc_errors" },
 7718	{ "rx_len_errors" },
 7719	{ "pause_off_state" },
 7720	{ "pause_on_state" },
 7721	{ "pause_received" },
 7722};
 7723
 7724#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7725
 7726static const struct {
 7727	const char string[ETH_GSTRING_LEN];
 7728} niu_rxchan_stat_keys[] = {
 7729	{ "rx_channel" },
 7730	{ "rx_packets" },
 7731	{ "rx_bytes" },
 7732	{ "rx_dropped" },
 7733	{ "rx_errors" },
 7734};
 7735
 7736#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7737
 7738static const struct {
 7739	const char string[ETH_GSTRING_LEN];
 7740} niu_txchan_stat_keys[] = {
 7741	{ "tx_channel" },
 7742	{ "tx_packets" },
 7743	{ "tx_bytes" },
 7744	{ "tx_errors" },
 7745};
 7746
 7747#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7748
 7749static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7750{
 7751	struct niu *np = netdev_priv(dev);
 7752	int i;
 7753
 7754	if (stringset != ETH_SS_STATS)
 7755		return;
 7756
 7757	if (np->flags & NIU_FLAGS_XMAC) {
 7758		memcpy(data, niu_xmac_stat_keys,
 7759		       sizeof(niu_xmac_stat_keys));
 7760		data += sizeof(niu_xmac_stat_keys);
 7761	} else {
 7762		memcpy(data, niu_bmac_stat_keys,
 7763		       sizeof(niu_bmac_stat_keys));
 7764		data += sizeof(niu_bmac_stat_keys);
 7765	}
 7766	for (i = 0; i < np->num_rx_rings; i++) {
 7767		memcpy(data, niu_rxchan_stat_keys,
 7768		       sizeof(niu_rxchan_stat_keys));
 7769		data += sizeof(niu_rxchan_stat_keys);
 7770	}
 7771	for (i = 0; i < np->num_tx_rings; i++) {
 7772		memcpy(data, niu_txchan_stat_keys,
 7773		       sizeof(niu_txchan_stat_keys));
 7774		data += sizeof(niu_txchan_stat_keys);
 7775	}
 7776}
 7777
 7778static int niu_get_sset_count(struct net_device *dev, int stringset)
 7779{
 7780	struct niu *np = netdev_priv(dev);
 7781
 7782	if (stringset != ETH_SS_STATS)
 7783		return -EINVAL;
 7784
 7785	return (np->flags & NIU_FLAGS_XMAC ?
 7786		 NUM_XMAC_STAT_KEYS :
 7787		 NUM_BMAC_STAT_KEYS) +
 7788		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7789		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7790}
 7791
 7792static void niu_get_ethtool_stats(struct net_device *dev,
 7793				  struct ethtool_stats *stats, u64 *data)
 7794{
 7795	struct niu *np = netdev_priv(dev);
 7796	int i;
 7797
 7798	niu_sync_mac_stats(np);
 7799	if (np->flags & NIU_FLAGS_XMAC) {
 7800		memcpy(data, &np->mac_stats.xmac,
 7801		       sizeof(struct niu_xmac_stats));
 7802		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7803	} else {
 7804		memcpy(data, &np->mac_stats.bmac,
 7805		       sizeof(struct niu_bmac_stats));
 7806		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7807	}
 7808	for (i = 0; i < np->num_rx_rings; i++) {
 7809		struct rx_ring_info *rp = &np->rx_rings[i];
 7810
 7811		niu_sync_rx_discard_stats(np, rp, 0);
 7812
 7813		data[0] = rp->rx_channel;
 7814		data[1] = rp->rx_packets;
 7815		data[2] = rp->rx_bytes;
 7816		data[3] = rp->rx_dropped;
 7817		data[4] = rp->rx_errors;
 7818		data += 5;
 7819	}
 7820	for (i = 0; i < np->num_tx_rings; i++) {
 7821		struct tx_ring_info *rp = &np->tx_rings[i];
 7822
 7823		data[0] = rp->tx_channel;
 7824		data[1] = rp->tx_packets;
 7825		data[2] = rp->tx_bytes;
 7826		data[3] = rp->tx_errors;
 7827		data += 4;
 7828	}
 7829}
 7830
 7831static u64 niu_led_state_save(struct niu *np)
 7832{
 7833	if (np->flags & NIU_FLAGS_XMAC)
 7834		return nr64_mac(XMAC_CONFIG);
 7835	else
 7836		return nr64_mac(BMAC_XIF_CONFIG);
 7837}
 7838
 7839static void niu_led_state_restore(struct niu *np, u64 val)
 7840{
 7841	if (np->flags & NIU_FLAGS_XMAC)
 7842		nw64_mac(XMAC_CONFIG, val);
 7843	else
 7844		nw64_mac(BMAC_XIF_CONFIG, val);
 7845}
 7846
 7847static void niu_force_led(struct niu *np, int on)
 7848{
 7849	u64 val, reg, bit;
 7850
 7851	if (np->flags & NIU_FLAGS_XMAC) {
 7852		reg = XMAC_CONFIG;
 7853		bit = XMAC_CONFIG_FORCE_LED_ON;
 7854	} else {
 7855		reg = BMAC_XIF_CONFIG;
 7856		bit = BMAC_XIF_CONFIG_LINK_LED;
 7857	}
 7858
 7859	val = nr64_mac(reg);
 7860	if (on)
 7861		val |= bit;
 7862	else
 7863		val &= ~bit;
 7864	nw64_mac(reg, val);
 7865}
 7866
 7867static int niu_set_phys_id(struct net_device *dev,
 7868			   enum ethtool_phys_id_state state)
 7869
 7870{
 7871	struct niu *np = netdev_priv(dev);
 7872
 7873	if (!netif_running(dev))
 7874		return -EAGAIN;
 7875
 7876	switch (state) {
 7877	case ETHTOOL_ID_ACTIVE:
 7878		np->orig_led_state = niu_led_state_save(np);
 7879		return 1;	/* cycle on/off once per second */
 7880
 7881	case ETHTOOL_ID_ON:
 7882		niu_force_led(np, 1);
 7883		break;
 7884
 7885	case ETHTOOL_ID_OFF:
 7886		niu_force_led(np, 0);
 7887		break;
 7888
 7889	case ETHTOOL_ID_INACTIVE:
 7890		niu_led_state_restore(np, np->orig_led_state);
 7891	}
 7892
 7893	return 0;
 7894}
 7895
 7896static const struct ethtool_ops niu_ethtool_ops = {
 7897	.get_drvinfo		= niu_get_drvinfo,
 7898	.get_link		= ethtool_op_get_link,
 7899	.get_msglevel		= niu_get_msglevel,
 7900	.set_msglevel		= niu_set_msglevel,
 7901	.nway_reset		= niu_nway_reset,
 7902	.get_eeprom_len		= niu_get_eeprom_len,
 7903	.get_eeprom		= niu_get_eeprom,
 7904	.get_strings		= niu_get_strings,
 7905	.get_sset_count		= niu_get_sset_count,
 7906	.get_ethtool_stats	= niu_get_ethtool_stats,
 7907	.set_phys_id		= niu_set_phys_id,
 7908	.get_rxnfc		= niu_get_nfc,
 7909	.set_rxnfc		= niu_set_nfc,
 7910	.get_link_ksettings	= niu_get_link_ksettings,
 7911	.set_link_ksettings	= niu_set_link_ksettings,
 7912};
 7913
 7914static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7915			      int ldg, int ldn)
 7916{
 7917	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7918		return -EINVAL;
 7919	if (ldn < 0 || ldn > LDN_MAX)
 7920		return -EINVAL;
 7921
 7922	parent->ldg_map[ldn] = ldg;
 7923
 7924	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7925		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7926		 * the firmware, and we're not supposed to change them.
 7927		 * Validate the mapping, because if it's wrong we probably
 7928		 * won't get any interrupts and that's painful to debug.
 7929		 */
 7930		if (nr64(LDG_NUM(ldn)) != ldg) {
 7931			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7932				np->port, ldn, ldg,
 7933				(unsigned long long) nr64(LDG_NUM(ldn)));
 7934			return -EINVAL;
 7935		}
 7936	} else
 7937		nw64(LDG_NUM(ldn), ldg);
 7938
 7939	return 0;
 7940}
 7941
 7942static int niu_set_ldg_timer_res(struct niu *np, int res)
 7943{
 7944	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7945		return -EINVAL;
 7946
 7947
 7948	nw64(LDG_TIMER_RES, res);
 7949
 7950	return 0;
 7951}
 7952
 7953static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7954{
 7955	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7956	    (func < 0 || func > 3) ||
 7957	    (vector < 0 || vector > 0x1f))
 7958		return -EINVAL;
 7959
 7960	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7961
 7962	return 0;
 7963}
 7964
 7965static int niu_pci_eeprom_read(struct niu *np, u32 addr)
 7966{
 7967	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7968				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7969	int limit;
 7970
 7971	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7972		return -EINVAL;
 7973
 7974	frame = frame_base;
 7975	nw64(ESPC_PIO_STAT, frame);
 7976	limit = 64;
 7977	do {
 7978		udelay(5);
 7979		frame = nr64(ESPC_PIO_STAT);
 7980		if (frame & ESPC_PIO_STAT_READ_END)
 7981			break;
 7982	} while (limit--);
 7983	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7984		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 7985			(unsigned long long) frame);
 7986		return -ENODEV;
 7987	}
 7988
 7989	frame = frame_base;
 7990	nw64(ESPC_PIO_STAT, frame);
 7991	limit = 64;
 7992	do {
 7993		udelay(5);
 7994		frame = nr64(ESPC_PIO_STAT);
 7995		if (frame & ESPC_PIO_STAT_READ_END)
 7996			break;
 7997	} while (limit--);
 7998	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7999		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8000			(unsigned long long) frame);
 8001		return -ENODEV;
 8002	}
 8003
 8004	frame = nr64(ESPC_PIO_STAT);
 8005	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 8006}
 8007
 8008static int niu_pci_eeprom_read16(struct niu *np, u32 off)
 8009{
 8010	int err = niu_pci_eeprom_read(np, off);
 8011	u16 val;
 8012
 8013	if (err < 0)
 8014		return err;
 8015	val = (err << 8);
 8016	err = niu_pci_eeprom_read(np, off + 1);
 8017	if (err < 0)
 8018		return err;
 8019	val |= (err & 0xff);
 8020
 8021	return val;
 8022}
 8023
 8024static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8025{
 8026	int err = niu_pci_eeprom_read(np, off);
 8027	u16 val;
 8028
 8029	if (err < 0)
 8030		return err;
 8031
 8032	val = (err & 0xff);
 8033	err = niu_pci_eeprom_read(np, off + 1);
 8034	if (err < 0)
 8035		return err;
 8036
 8037	val |= (err & 0xff) << 8;
 8038
 8039	return val;
 8040}
 8041
 8042static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
 8043				    int namebuf_len)
 8044{
 8045	int i;
 8046
 8047	for (i = 0; i < namebuf_len; i++) {
 8048		int err = niu_pci_eeprom_read(np, off + i);
 8049		if (err < 0)
 8050			return err;
 8051		*namebuf++ = err;
 8052		if (!err)
 8053			break;
 8054	}
 8055	if (i >= namebuf_len)
 8056		return -EINVAL;
 8057
 8058	return i + 1;
 8059}
 8060
 8061static void niu_vpd_parse_version(struct niu *np)
 8062{
 8063	struct niu_vpd *vpd = &np->vpd;
 8064	int len = strlen(vpd->version) + 1;
 8065	const char *s = vpd->version;
 8066	int i;
 8067
 8068	for (i = 0; i < len - 5; i++) {
 8069		if (!strncmp(s + i, "FCode ", 6))
 8070			break;
 8071	}
 8072	if (i >= len - 5)
 8073		return;
 8074
 8075	s += i + 5;
 8076	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8077
 8078	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8079		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8080		     vpd->fcode_major, vpd->fcode_minor);
 8081	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8082	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8083	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8084		np->flags |= NIU_FLAGS_VPD_VALID;
 8085}
 8086
 8087/* ESPC_PIO_EN_ENABLE must be set */
 8088static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
 8089{
 8090	unsigned int found_mask = 0;
 8091#define FOUND_MASK_MODEL	0x00000001
 8092#define FOUND_MASK_BMODEL	0x00000002
 8093#define FOUND_MASK_VERS		0x00000004
 8094#define FOUND_MASK_MAC		0x00000008
 8095#define FOUND_MASK_NMAC		0x00000010
 8096#define FOUND_MASK_PHY		0x00000020
 8097#define FOUND_MASK_ALL		0x0000003f
 8098
 8099	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8100		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8101	while (start < end) {
 8102		int len, err, prop_len;
 8103		char namebuf[64];
 8104		u8 *prop_buf;
 8105		int max_len;
 8106
 8107		if (found_mask == FOUND_MASK_ALL) {
 8108			niu_vpd_parse_version(np);
 8109			return 1;
 8110		}
 8111
 8112		err = niu_pci_eeprom_read(np, start + 2);
 8113		if (err < 0)
 8114			return err;
 8115		len = err;
 8116		start += 3;
 8117
 8118		prop_len = niu_pci_eeprom_read(np, start + 4);
 8119		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8120		if (err < 0)
 8121			return err;
 8122
 8123		prop_buf = NULL;
 8124		max_len = 0;
 8125		if (!strcmp(namebuf, "model")) {
 8126			prop_buf = np->vpd.model;
 8127			max_len = NIU_VPD_MODEL_MAX;
 8128			found_mask |= FOUND_MASK_MODEL;
 8129		} else if (!strcmp(namebuf, "board-model")) {
 8130			prop_buf = np->vpd.board_model;
 8131			max_len = NIU_VPD_BD_MODEL_MAX;
 8132			found_mask |= FOUND_MASK_BMODEL;
 8133		} else if (!strcmp(namebuf, "version")) {
 8134			prop_buf = np->vpd.version;
 8135			max_len = NIU_VPD_VERSION_MAX;
 8136			found_mask |= FOUND_MASK_VERS;
 8137		} else if (!strcmp(namebuf, "local-mac-address")) {
 8138			prop_buf = np->vpd.local_mac;
 8139			max_len = ETH_ALEN;
 8140			found_mask |= FOUND_MASK_MAC;
 8141		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8142			prop_buf = &np->vpd.mac_num;
 8143			max_len = 1;
 8144			found_mask |= FOUND_MASK_NMAC;
 8145		} else if (!strcmp(namebuf, "phy-type")) {
 8146			prop_buf = np->vpd.phy_type;
 8147			max_len = NIU_VPD_PHY_TYPE_MAX;
 8148			found_mask |= FOUND_MASK_PHY;
 8149		}
 8150
 8151		if (max_len && prop_len > max_len) {
 8152			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8153			return -EINVAL;
 8154		}
 8155
 8156		if (prop_buf) {
 8157			u32 off = start + 5 + err;
 8158			int i;
 8159
 8160			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8161				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8162				     namebuf, prop_len);
 8163			for (i = 0; i < prop_len; i++)
 8164				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
 8165		}
 8166
 8167		start += len;
 8168	}
 8169
 8170	return 0;
 8171}
 8172
 8173/* ESPC_PIO_EN_ENABLE must be set */
 8174static void niu_pci_vpd_fetch(struct niu *np, u32 start)
 8175{
 8176	u32 offset;
 8177	int err;
 8178
 8179	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8180	if (err < 0)
 8181		return;
 8182
 8183	offset = err + 3;
 8184
 8185	while (start + offset < ESPC_EEPROM_SIZE) {
 8186		u32 here = start + offset;
 8187		u32 end;
 8188
 8189		err = niu_pci_eeprom_read(np, here);
 8190		if (err != 0x90)
 8191			return;
 8192
 8193		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8194		if (err < 0)
 8195			return;
 8196
 8197		here = start + offset + 3;
 8198		end = start + offset + err;
 8199
 8200		offset += err;
 8201
 8202		err = niu_pci_vpd_scan_props(np, here, end);
 8203		if (err < 0 || err == 1)
 8204			return;
 8205	}
 8206}
 8207
 8208/* ESPC_PIO_EN_ENABLE must be set */
 8209static u32 niu_pci_vpd_offset(struct niu *np)
 8210{
 8211	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8212	int err;
 8213
 8214	while (start < end) {
 8215		ret = start;
 8216
 8217		/* ROM header signature?  */
 8218		err = niu_pci_eeprom_read16(np, start +  0);
 8219		if (err != 0x55aa)
 8220			return 0;
 8221
 8222		/* Apply offset to PCI data structure.  */
 8223		err = niu_pci_eeprom_read16(np, start + 23);
 8224		if (err < 0)
 8225			return 0;
 8226		start += err;
 8227
 8228		/* Check for "PCIR" signature.  */
 8229		err = niu_pci_eeprom_read16(np, start +  0);
 8230		if (err != 0x5043)
 8231			return 0;
 8232		err = niu_pci_eeprom_read16(np, start +  2);
 8233		if (err != 0x4952)
 8234			return 0;
 8235
 8236		/* Check for OBP image type.  */
 8237		err = niu_pci_eeprom_read(np, start + 20);
 8238		if (err < 0)
 8239			return 0;
 8240		if (err != 0x01) {
 8241			err = niu_pci_eeprom_read(np, ret + 2);
 8242			if (err < 0)
 8243				return 0;
 8244
 8245			start = ret + (err * 512);
 8246			continue;
 8247		}
 8248
 8249		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8250		if (err < 0)
 8251			return err;
 8252		ret += err;
 8253
 8254		err = niu_pci_eeprom_read(np, ret + 0);
 8255		if (err != 0x82)
 8256			return 0;
 8257
 8258		return ret;
 8259	}
 8260
 8261	return 0;
 8262}
 8263
 8264static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
 8265{
 8266	if (!strcmp(phy_prop, "mif")) {
 8267		/* 1G copper, MII */
 8268		np->flags &= ~(NIU_FLAGS_FIBER |
 8269			       NIU_FLAGS_10G);
 8270		np->mac_xcvr = MAC_XCVR_MII;
 8271	} else if (!strcmp(phy_prop, "xgf")) {
 8272		/* 10G fiber, XPCS */
 8273		np->flags |= (NIU_FLAGS_10G |
 8274			      NIU_FLAGS_FIBER);
 8275		np->mac_xcvr = MAC_XCVR_XPCS;
 8276	} else if (!strcmp(phy_prop, "pcs")) {
 8277		/* 1G fiber, PCS */
 8278		np->flags &= ~NIU_FLAGS_10G;
 8279		np->flags |= NIU_FLAGS_FIBER;
 8280		np->mac_xcvr = MAC_XCVR_PCS;
 8281	} else if (!strcmp(phy_prop, "xgc")) {
 8282		/* 10G copper, XPCS */
 8283		np->flags |= NIU_FLAGS_10G;
 8284		np->flags &= ~NIU_FLAGS_FIBER;
 8285		np->mac_xcvr = MAC_XCVR_XPCS;
 8286	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8287		/* 10G Serdes or 1G Serdes, default to 10G */
 8288		np->flags |= NIU_FLAGS_10G;
 8289		np->flags &= ~NIU_FLAGS_FIBER;
 8290		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8291		np->mac_xcvr = MAC_XCVR_XPCS;
 8292	} else {
 8293		return -EINVAL;
 8294	}
 8295	return 0;
 8296}
 8297
 8298static int niu_pci_vpd_get_nports(struct niu *np)
 8299{
 8300	int ports = 0;
 8301
 8302	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8303	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8304	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8305	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8306	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8307		ports = 4;
 8308	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8309		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8310		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8311		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8312		ports = 2;
 8313	}
 8314
 8315	return ports;
 8316}
 8317
 8318static void niu_pci_vpd_validate(struct niu *np)
 8319{
 8320	struct net_device *dev = np->dev;
 8321	struct niu_vpd *vpd = &np->vpd;
 8322	u8 val8;
 8323
 8324	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8325		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8326
 8327		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8328		return;
 8329	}
 8330
 8331	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8332	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8333		np->flags |= NIU_FLAGS_10G;
 8334		np->flags &= ~NIU_FLAGS_FIBER;
 8335		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8336		np->mac_xcvr = MAC_XCVR_PCS;
 8337		if (np->port > 1) {
 8338			np->flags |= NIU_FLAGS_FIBER;
 8339			np->flags &= ~NIU_FLAGS_10G;
 8340		}
 8341		if (np->flags & NIU_FLAGS_10G)
 8342			np->mac_xcvr = MAC_XCVR_XPCS;
 8343	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8344		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8345			      NIU_FLAGS_HOTPLUG_PHY);
 8346	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8347		dev_err(np->device, "Illegal phy string [%s]\n",
 8348			np->vpd.phy_type);
 8349		dev_err(np->device, "Falling back to SPROM\n");
 8350		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8351		return;
 8352	}
 8353
 8354	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
 8355
 8356	val8 = dev->dev_addr[5];
 8357	dev->dev_addr[5] += np->port;
 8358	if (dev->dev_addr[5] < val8)
 8359		dev->dev_addr[4]++;
 8360}
 8361
 8362static int niu_pci_probe_sprom(struct niu *np)
 8363{
 8364	struct net_device *dev = np->dev;
 8365	int len, i;
 8366	u64 val, sum;
 8367	u8 val8;
 8368
 8369	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8370	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8371	len = val / 4;
 8372
 8373	np->eeprom_len = len;
 8374
 8375	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8376		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8377
 8378	sum = 0;
 8379	for (i = 0; i < len; i++) {
 8380		val = nr64(ESPC_NCR(i));
 8381		sum += (val >>  0) & 0xff;
 8382		sum += (val >>  8) & 0xff;
 8383		sum += (val >> 16) & 0xff;
 8384		sum += (val >> 24) & 0xff;
 8385	}
 8386	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8387		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8388	if ((sum & 0xff) != 0xab) {
 8389		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8390		return -EINVAL;
 8391	}
 8392
 8393	val = nr64(ESPC_PHY_TYPE);
 8394	switch (np->port) {
 8395	case 0:
 8396		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8397			ESPC_PHY_TYPE_PORT0_SHIFT;
 8398		break;
 8399	case 1:
 8400		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8401			ESPC_PHY_TYPE_PORT1_SHIFT;
 8402		break;
 8403	case 2:
 8404		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8405			ESPC_PHY_TYPE_PORT2_SHIFT;
 8406		break;
 8407	case 3:
 8408		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8409			ESPC_PHY_TYPE_PORT3_SHIFT;
 8410		break;
 8411	default:
 8412		dev_err(np->device, "Bogus port number %u\n",
 8413			np->port);
 8414		return -EINVAL;
 8415	}
 8416	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8417		     "SPROM: PHY type %x\n", val8);
 8418
 8419	switch (val8) {
 8420	case ESPC_PHY_TYPE_1G_COPPER:
 8421		/* 1G copper, MII */
 8422		np->flags &= ~(NIU_FLAGS_FIBER |
 8423			       NIU_FLAGS_10G);
 8424		np->mac_xcvr = MAC_XCVR_MII;
 8425		break;
 8426
 8427	case ESPC_PHY_TYPE_1G_FIBER:
 8428		/* 1G fiber, PCS */
 8429		np->flags &= ~NIU_FLAGS_10G;
 8430		np->flags |= NIU_FLAGS_FIBER;
 8431		np->mac_xcvr = MAC_XCVR_PCS;
 8432		break;
 8433
 8434	case ESPC_PHY_TYPE_10G_COPPER:
 8435		/* 10G copper, XPCS */
 8436		np->flags |= NIU_FLAGS_10G;
 8437		np->flags &= ~NIU_FLAGS_FIBER;
 8438		np->mac_xcvr = MAC_XCVR_XPCS;
 8439		break;
 8440
 8441	case ESPC_PHY_TYPE_10G_FIBER:
 8442		/* 10G fiber, XPCS */
 8443		np->flags |= (NIU_FLAGS_10G |
 8444			      NIU_FLAGS_FIBER);
 8445		np->mac_xcvr = MAC_XCVR_XPCS;
 8446		break;
 8447
 8448	default:
 8449		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8450		return -EINVAL;
 8451	}
 8452
 8453	val = nr64(ESPC_MAC_ADDR0);
 8454	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8455		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8456	dev->dev_addr[0] = (val >>  0) & 0xff;
 8457	dev->dev_addr[1] = (val >>  8) & 0xff;
 8458	dev->dev_addr[2] = (val >> 16) & 0xff;
 8459	dev->dev_addr[3] = (val >> 24) & 0xff;
 8460
 8461	val = nr64(ESPC_MAC_ADDR1);
 8462	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8463		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8464	dev->dev_addr[4] = (val >>  0) & 0xff;
 8465	dev->dev_addr[5] = (val >>  8) & 0xff;
 8466
 8467	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 8468		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8469			dev->dev_addr);
 8470		return -EINVAL;
 8471	}
 8472
 8473	val8 = dev->dev_addr[5];
 8474	dev->dev_addr[5] += np->port;
 8475	if (dev->dev_addr[5] < val8)
 8476		dev->dev_addr[4]++;
 8477
 8478	val = nr64(ESPC_MOD_STR_LEN);
 8479	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8480		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8481	if (val >= 8 * 4)
 8482		return -EINVAL;
 8483
 8484	for (i = 0; i < val; i += 4) {
 8485		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8486
 8487		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8488		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8489		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8490		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8491	}
 8492	np->vpd.model[val] = '\0';
 8493
 8494	val = nr64(ESPC_BD_MOD_STR_LEN);
 8495	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8496		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8497	if (val >= 4 * 4)
 8498		return -EINVAL;
 8499
 8500	for (i = 0; i < val; i += 4) {
 8501		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8502
 8503		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8504		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8505		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8506		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8507	}
 8508	np->vpd.board_model[val] = '\0';
 8509
 8510	np->vpd.mac_num =
 8511		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8512	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8513		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8514
 8515	return 0;
 8516}
 8517
 8518static int niu_get_and_validate_port(struct niu *np)
 8519{
 8520	struct niu_parent *parent = np->parent;
 8521
 8522	if (np->port <= 1)
 8523		np->flags |= NIU_FLAGS_XMAC;
 8524
 8525	if (!parent->num_ports) {
 8526		if (parent->plat_type == PLAT_TYPE_NIU) {
 8527			parent->num_ports = 2;
 8528		} else {
 8529			parent->num_ports = niu_pci_vpd_get_nports(np);
 8530			if (!parent->num_ports) {
 8531				/* Fall back to SPROM as last resort.
 8532				 * This will fail on most cards.
 8533				 */
 8534				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8535					ESPC_NUM_PORTS_MACS_VAL;
 8536
 8537				/* All of the current probing methods fail on
 8538				 * Maramba on-board parts.
 8539				 */
 8540				if (!parent->num_ports)
 8541					parent->num_ports = 4;
 8542			}
 8543		}
 8544	}
 8545
 8546	if (np->port >= parent->num_ports)
 8547		return -ENODEV;
 8548
 8549	return 0;
 8550}
 8551
 8552static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
 8553		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
 8554{
 8555	u32 id = (dev_id_1 << 16) | dev_id_2;
 8556	u8 idx;
 8557
 8558	if (dev_id_1 < 0 || dev_id_2 < 0)
 8559		return 0;
 8560	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8561		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
 8562		 * test covers the 8706 as well.
 8563		 */
 8564		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8565		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 8566			return 0;
 8567	} else {
 8568		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8569			return 0;
 8570	}
 8571
 8572	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8573		parent->index, id,
 8574		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8575		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8576		phy_port);
 8577
 8578	if (p->cur[type] >= NIU_MAX_PORTS) {
 8579		pr_err("Too many PHY ports\n");
 8580		return -EINVAL;
 8581	}
 8582	idx = p->cur[type];
 8583	p->phy_id[type][idx] = id;
 8584	p->phy_port[type][idx] = phy_port;
 8585	p->cur[type] = idx + 1;
 8586	return 0;
 8587}
 8588
 8589static int port_has_10g(struct phy_probe_info *p, int port)
 8590{
 8591	int i;
 8592
 8593	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8594		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8595			return 1;
 8596	}
 8597	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8598		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8599			return 1;
 8600	}
 8601
 8602	return 0;
 8603}
 8604
 8605static int count_10g_ports(struct phy_probe_info *p, int *lowest)
 8606{
 8607	int port, cnt;
 8608
 8609	cnt = 0;
 8610	*lowest = 32;
 8611	for (port = 8; port < 32; port++) {
 8612		if (port_has_10g(p, port)) {
 8613			if (!cnt)
 8614				*lowest = port;
 8615			cnt++;
 8616		}
 8617	}
 8618
 8619	return cnt;
 8620}
 8621
 8622static int count_1g_ports(struct phy_probe_info *p, int *lowest)
 8623{
 8624	*lowest = 32;
 8625	if (p->cur[PHY_TYPE_MII])
 8626		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8627
 8628	return p->cur[PHY_TYPE_MII];
 8629}
 8630
 8631static void niu_n2_divide_channels(struct niu_parent *parent)
 8632{
 8633	int num_ports = parent->num_ports;
 8634	int i;
 8635
 8636	for (i = 0; i < num_ports; i++) {
 8637		parent->rxchan_per_port[i] = (16 / num_ports);
 8638		parent->txchan_per_port[i] = (16 / num_ports);
 8639
 8640		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8641			parent->index, i,
 8642			parent->rxchan_per_port[i],
 8643			parent->txchan_per_port[i]);
 8644	}
 8645}
 8646
 8647static void niu_divide_channels(struct niu_parent *parent,
 8648				int num_10g, int num_1g)
 8649{
 8650	int num_ports = parent->num_ports;
 8651	int rx_chans_per_10g, rx_chans_per_1g;
 8652	int tx_chans_per_10g, tx_chans_per_1g;
 8653	int i, tot_rx, tot_tx;
 8654
 8655	if (!num_10g || !num_1g) {
 8656		rx_chans_per_10g = rx_chans_per_1g =
 8657			(NIU_NUM_RXCHAN / num_ports);
 8658		tx_chans_per_10g = tx_chans_per_1g =
 8659			(NIU_NUM_TXCHAN / num_ports);
 8660	} else {
 8661		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8662		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8663				    (rx_chans_per_1g * num_1g)) /
 8664			num_10g;
 8665
 8666		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8667		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8668				    (tx_chans_per_1g * num_1g)) /
 8669			num_10g;
 8670	}
 8671
 8672	tot_rx = tot_tx = 0;
 8673	for (i = 0; i < num_ports; i++) {
 8674		int type = phy_decode(parent->port_phy, i);
 8675
 8676		if (type == PORT_TYPE_10G) {
 8677			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8678			parent->txchan_per_port[i] = tx_chans_per_10g;
 8679		} else {
 8680			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8681			parent->txchan_per_port[i] = tx_chans_per_1g;
 8682		}
 8683		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8684			parent->index, i,
 8685			parent->rxchan_per_port[i],
 8686			parent->txchan_per_port[i]);
 8687		tot_rx += parent->rxchan_per_port[i];
 8688		tot_tx += parent->txchan_per_port[i];
 8689	}
 8690
 8691	if (tot_rx > NIU_NUM_RXCHAN) {
 8692		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8693		       parent->index, tot_rx);
 8694		for (i = 0; i < num_ports; i++)
 8695			parent->rxchan_per_port[i] = 1;
 8696	}
 8697	if (tot_tx > NIU_NUM_TXCHAN) {
 8698		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8699		       parent->index, tot_tx);
 8700		for (i = 0; i < num_ports; i++)
 8701			parent->txchan_per_port[i] = 1;
 8702	}
 8703	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8704		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8705			parent->index, tot_rx, tot_tx);
 8706	}
 8707}
 8708
 8709static void niu_divide_rdc_groups(struct niu_parent *parent,
 8710				  int num_10g, int num_1g)
 8711{
 8712	int i, num_ports = parent->num_ports;
 8713	int rdc_group, rdc_groups_per_port;
 8714	int rdc_channel_base;
 8715
 8716	rdc_group = 0;
 8717	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8718
 8719	rdc_channel_base = 0;
 8720
 8721	for (i = 0; i < num_ports; i++) {
 8722		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8723		int grp, num_channels = parent->rxchan_per_port[i];
 8724		int this_channel_offset;
 8725
 8726		tp->first_table_num = rdc_group;
 8727		tp->num_tables = rdc_groups_per_port;
 8728		this_channel_offset = 0;
 8729		for (grp = 0; grp < tp->num_tables; grp++) {
 8730			struct rdc_table *rt = &tp->tables[grp];
 8731			int slot;
 8732
 8733			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8734				parent->index, i, tp->first_table_num + grp);
 8735			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8736				rt->rxdma_channel[slot] =
 8737					rdc_channel_base + this_channel_offset;
 8738
 8739				pr_cont("%d ", rt->rxdma_channel[slot]);
 8740
 8741				if (++this_channel_offset == num_channels)
 8742					this_channel_offset = 0;
 8743			}
 8744			pr_cont("]\n");
 8745		}
 8746
 8747		parent->rdc_default[i] = rdc_channel_base;
 8748
 8749		rdc_channel_base += num_channels;
 8750		rdc_group += rdc_groups_per_port;
 8751	}
 8752}
 8753
 8754static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
 8755			       struct phy_probe_info *info)
 8756{
 8757	unsigned long flags;
 8758	int port, err;
 8759
 8760	memset(info, 0, sizeof(*info));
 8761
 8762	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8763	niu_lock_parent(np, flags);
 8764	err = 0;
 8765	for (port = 8; port < 32; port++) {
 8766		int dev_id_1, dev_id_2;
 8767
 8768		dev_id_1 = mdio_read(np, port,
 8769				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8770		dev_id_2 = mdio_read(np, port,
 8771				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8772		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8773				 PHY_TYPE_PMA_PMD);
 8774		if (err)
 8775			break;
 8776		dev_id_1 = mdio_read(np, port,
 8777				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8778		dev_id_2 = mdio_read(np, port,
 8779				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8780		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8781				 PHY_TYPE_PCS);
 8782		if (err)
 8783			break;
 8784		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8785		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8786		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8787				 PHY_TYPE_MII);
 8788		if (err)
 8789			break;
 8790	}
 8791	niu_unlock_parent(np, flags);
 8792
 8793	return err;
 8794}
 8795
 8796static int walk_phys(struct niu *np, struct niu_parent *parent)
 8797{
 8798	struct phy_probe_info *info = &parent->phy_probe_info;
 8799	int lowest_10g, lowest_1g;
 8800	int num_10g, num_1g;
 8801	u32 val;
 8802	int err;
 8803
 8804	num_10g = num_1g = 0;
 8805
 8806	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8807	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8808		num_10g = 0;
 8809		num_1g = 2;
 8810		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8811		parent->num_ports = 4;
 8812		val = (phy_encode(PORT_TYPE_1G, 0) |
 8813		       phy_encode(PORT_TYPE_1G, 1) |
 8814		       phy_encode(PORT_TYPE_1G, 2) |
 8815		       phy_encode(PORT_TYPE_1G, 3));
 8816	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8817		num_10g = 2;
 8818		num_1g = 0;
 8819		parent->num_ports = 2;
 8820		val = (phy_encode(PORT_TYPE_10G, 0) |
 8821		       phy_encode(PORT_TYPE_10G, 1));
 8822	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8823		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8824		/* this is the Monza case */
 8825		if (np->flags & NIU_FLAGS_10G) {
 8826			val = (phy_encode(PORT_TYPE_10G, 0) |
 8827			       phy_encode(PORT_TYPE_10G, 1));
 8828		} else {
 8829			val = (phy_encode(PORT_TYPE_1G, 0) |
 8830			       phy_encode(PORT_TYPE_1G, 1));
 8831		}
 8832	} else {
 8833		err = fill_phy_probe_info(np, parent, info);
 8834		if (err)
 8835			return err;
 8836
 8837		num_10g = count_10g_ports(info, &lowest_10g);
 8838		num_1g = count_1g_ports(info, &lowest_1g);
 8839
 8840		switch ((num_10g << 4) | num_1g) {
 8841		case 0x24:
 8842			if (lowest_1g == 10)
 8843				parent->plat_type = PLAT_TYPE_VF_P0;
 8844			else if (lowest_1g == 26)
 8845				parent->plat_type = PLAT_TYPE_VF_P1;
 8846			else
 8847				goto unknown_vg_1g_port;
 8848
 8849			/* fallthru */
 8850		case 0x22:
 8851			val = (phy_encode(PORT_TYPE_10G, 0) |
 8852			       phy_encode(PORT_TYPE_10G, 1) |
 8853			       phy_encode(PORT_TYPE_1G, 2) |
 8854			       phy_encode(PORT_TYPE_1G, 3));
 8855			break;
 8856
 8857		case 0x20:
 8858			val = (phy_encode(PORT_TYPE_10G, 0) |
 8859			       phy_encode(PORT_TYPE_10G, 1));
 8860			break;
 8861
 8862		case 0x10:
 8863			val = phy_encode(PORT_TYPE_10G, np->port);
 8864			break;
 8865
 8866		case 0x14:
 8867			if (lowest_1g == 10)
 8868				parent->plat_type = PLAT_TYPE_VF_P0;
 8869			else if (lowest_1g == 26)
 8870				parent->plat_type = PLAT_TYPE_VF_P1;
 8871			else
 8872				goto unknown_vg_1g_port;
 8873
 8874			/* fallthru */
 8875		case 0x13:
 8876			if ((lowest_10g & 0x7) == 0)
 8877				val = (phy_encode(PORT_TYPE_10G, 0) |
 8878				       phy_encode(PORT_TYPE_1G, 1) |
 8879				       phy_encode(PORT_TYPE_1G, 2) |
 8880				       phy_encode(PORT_TYPE_1G, 3));
 8881			else
 8882				val = (phy_encode(PORT_TYPE_1G, 0) |
 8883				       phy_encode(PORT_TYPE_10G, 1) |
 8884				       phy_encode(PORT_TYPE_1G, 2) |
 8885				       phy_encode(PORT_TYPE_1G, 3));
 8886			break;
 8887
 8888		case 0x04:
 8889			if (lowest_1g == 10)
 8890				parent->plat_type = PLAT_TYPE_VF_P0;
 8891			else if (lowest_1g == 26)
 8892				parent->plat_type = PLAT_TYPE_VF_P1;
 8893			else
 8894				goto unknown_vg_1g_port;
 8895
 8896			val = (phy_encode(PORT_TYPE_1G, 0) |
 8897			       phy_encode(PORT_TYPE_1G, 1) |
 8898			       phy_encode(PORT_TYPE_1G, 2) |
 8899			       phy_encode(PORT_TYPE_1G, 3));
 8900			break;
 8901
 8902		default:
 8903			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8904			       num_10g, num_1g);
 8905			return -EINVAL;
 8906		}
 8907	}
 8908
 8909	parent->port_phy = val;
 8910
 8911	if (parent->plat_type == PLAT_TYPE_NIU)
 8912		niu_n2_divide_channels(parent);
 8913	else
 8914		niu_divide_channels(parent, num_10g, num_1g);
 8915
 8916	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8917
 8918	return 0;
 8919
 8920unknown_vg_1g_port:
 8921	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8922	return -EINVAL;
 8923}
 8924
 8925static int niu_probe_ports(struct niu *np)
 8926{
 8927	struct niu_parent *parent = np->parent;
 8928	int err, i;
 8929
 8930	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8931		err = walk_phys(np, parent);
 8932		if (err)
 8933			return err;
 8934
 8935		niu_set_ldg_timer_res(np, 2);
 8936		for (i = 0; i <= LDN_MAX; i++)
 8937			niu_ldn_irq_enable(np, i, 0);
 8938	}
 8939
 8940	if (parent->port_phy == PORT_PHY_INVALID)
 8941		return -EINVAL;
 8942
 8943	return 0;
 8944}
 8945
 8946static int niu_classifier_swstate_init(struct niu *np)
 8947{
 8948	struct niu_classifier *cp = &np->clas;
 8949
 8950	cp->tcam_top = (u16) np->port;
 8951	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8952	cp->h1_init = 0xffffffff;
 8953	cp->h2_init = 0xffff;
 8954
 8955	return fflp_early_init(np);
 8956}
 8957
 8958static void niu_link_config_init(struct niu *np)
 8959{
 8960	struct niu_link_config *lp = &np->link_config;
 8961
 8962	lp->advertising = (ADVERTISED_10baseT_Half |
 8963			   ADVERTISED_10baseT_Full |
 8964			   ADVERTISED_100baseT_Half |
 8965			   ADVERTISED_100baseT_Full |
 8966			   ADVERTISED_1000baseT_Half |
 8967			   ADVERTISED_1000baseT_Full |
 8968			   ADVERTISED_10000baseT_Full |
 8969			   ADVERTISED_Autoneg);
 8970	lp->speed = lp->active_speed = SPEED_INVALID;
 8971	lp->duplex = DUPLEX_FULL;
 8972	lp->active_duplex = DUPLEX_INVALID;
 8973	lp->autoneg = 1;
 8974#if 0
 8975	lp->loopback_mode = LOOPBACK_MAC;
 8976	lp->active_speed = SPEED_10000;
 8977	lp->active_duplex = DUPLEX_FULL;
 8978#else
 8979	lp->loopback_mode = LOOPBACK_DISABLED;
 8980#endif
 8981}
 8982
 8983static int niu_init_mac_ipp_pcs_base(struct niu *np)
 8984{
 8985	switch (np->port) {
 8986	case 0:
 8987		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 8988		np->ipp_off  = 0x00000;
 8989		np->pcs_off  = 0x04000;
 8990		np->xpcs_off = 0x02000;
 8991		break;
 8992
 8993	case 1:
 8994		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 8995		np->ipp_off  = 0x08000;
 8996		np->pcs_off  = 0x0a000;
 8997		np->xpcs_off = 0x08000;
 8998		break;
 8999
 9000	case 2:
 9001		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 9002		np->ipp_off  = 0x04000;
 9003		np->pcs_off  = 0x0e000;
 9004		np->xpcs_off = ~0UL;
 9005		break;
 9006
 9007	case 3:
 9008		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 9009		np->ipp_off  = 0x0c000;
 9010		np->pcs_off  = 0x12000;
 9011		np->xpcs_off = ~0UL;
 9012		break;
 9013
 9014	default:
 9015		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9016		return -EINVAL;
 9017	}
 9018
 9019	return 0;
 9020}
 9021
 9022static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9023{
 9024	struct msix_entry msi_vec[NIU_NUM_LDG];
 9025	struct niu_parent *parent = np->parent;
 9026	struct pci_dev *pdev = np->pdev;
 9027	int i, num_irqs;
 9028	u8 first_ldg;
 9029
 9030	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9031	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9032		ldg_num_map[i] = first_ldg + i;
 9033
 9034	num_irqs = (parent->rxchan_per_port[np->port] +
 9035		    parent->txchan_per_port[np->port] +
 9036		    (np->port == 0 ? 3 : 1));
 9037	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9038
 9039	for (i = 0; i < num_irqs; i++) {
 9040		msi_vec[i].vector = 0;
 9041		msi_vec[i].entry = i;
 9042	}
 9043
 9044	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
 9045	if (num_irqs < 0) {
 9046		np->flags &= ~NIU_FLAGS_MSIX;
 9047		return;
 9048	}
 9049
 9050	np->flags |= NIU_FLAGS_MSIX;
 9051	for (i = 0; i < num_irqs; i++)
 9052		np->ldg[i].irq = msi_vec[i].vector;
 9053	np->num_ldg = num_irqs;
 9054}
 9055
 9056static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9057{
 9058#ifdef CONFIG_SPARC64
 9059	struct platform_device *op = np->op;
 9060	const u32 *int_prop;
 9061	int i;
 9062
 9063	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9064	if (!int_prop)
 9065		return -ENODEV;
 9066
 9067	for (i = 0; i < op->archdata.num_irqs; i++) {
 9068		ldg_num_map[i] = int_prop[i];
 9069		np->ldg[i].irq = op->archdata.irqs[i];
 9070	}
 9071
 9072	np->num_ldg = op->archdata.num_irqs;
 9073
 9074	return 0;
 9075#else
 9076	return -EINVAL;
 9077#endif
 9078}
 9079
 9080static int niu_ldg_init(struct niu *np)
 9081{
 9082	struct niu_parent *parent = np->parent;
 9083	u8 ldg_num_map[NIU_NUM_LDG];
 9084	int first_chan, num_chan;
 9085	int i, err, ldg_rotor;
 9086	u8 port;
 9087
 9088	np->num_ldg = 1;
 9089	np->ldg[0].irq = np->dev->irq;
 9090	if (parent->plat_type == PLAT_TYPE_NIU) {
 9091		err = niu_n2_irq_init(np, ldg_num_map);
 9092		if (err)
 9093			return err;
 9094	} else
 9095		niu_try_msix(np, ldg_num_map);
 9096
 9097	port = np->port;
 9098	for (i = 0; i < np->num_ldg; i++) {
 9099		struct niu_ldg *lp = &np->ldg[i];
 9100
 9101		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9102
 9103		lp->np = np;
 9104		lp->ldg_num = ldg_num_map[i];
 9105		lp->timer = 2; /* XXX */
 9106
 9107		/* On N2 NIU the firmware has setup the SID mappings so they go
 9108		 * to the correct values that will route the LDG to the proper
 9109		 * interrupt in the NCU interrupt table.
 9110		 */
 9111		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9112			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9113			if (err)
 9114				return err;
 9115		}
 9116	}
 9117
 9118	/* We adopt the LDG assignment ordering used by the N2 NIU
 9119	 * 'interrupt' properties because that simplifies a lot of
 9120	 * things.  This ordering is:
 9121	 *
 9122	 *	MAC
 9123	 *	MIF	(if port zero)
 9124	 *	SYSERR	(if port zero)
 9125	 *	RX channels
 9126	 *	TX channels
 9127	 */
 9128
 9129	ldg_rotor = 0;
 9130
 9131	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9132				  LDN_MAC(port));
 9133	if (err)
 9134		return err;
 9135
 9136	ldg_rotor++;
 9137	if (ldg_rotor == np->num_ldg)
 9138		ldg_rotor = 0;
 9139
 9140	if (port == 0) {
 9141		err = niu_ldg_assign_ldn(np, parent,
 9142					 ldg_num_map[ldg_rotor],
 9143					 LDN_MIF);
 9144		if (err)
 9145			return err;
 9146
 9147		ldg_rotor++;
 9148		if (ldg_rotor == np->num_ldg)
 9149			ldg_rotor = 0;
 9150
 9151		err = niu_ldg_assign_ldn(np, parent,
 9152					 ldg_num_map[ldg_rotor],
 9153					 LDN_DEVICE_ERROR);
 9154		if (err)
 9155			return err;
 9156
 9157		ldg_rotor++;
 9158		if (ldg_rotor == np->num_ldg)
 9159			ldg_rotor = 0;
 9160
 9161	}
 9162
 9163	first_chan = 0;
 9164	for (i = 0; i < port; i++)
 9165		first_chan += parent->rxchan_per_port[i];
 9166	num_chan = parent->rxchan_per_port[port];
 9167
 9168	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9169		err = niu_ldg_assign_ldn(np, parent,
 9170					 ldg_num_map[ldg_rotor],
 9171					 LDN_RXDMA(i));
 9172		if (err)
 9173			return err;
 9174		ldg_rotor++;
 9175		if (ldg_rotor == np->num_ldg)
 9176			ldg_rotor = 0;
 9177	}
 9178
 9179	first_chan = 0;
 9180	for (i = 0; i < port; i++)
 9181		first_chan += parent->txchan_per_port[i];
 9182	num_chan = parent->txchan_per_port[port];
 9183	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9184		err = niu_ldg_assign_ldn(np, parent,
 9185					 ldg_num_map[ldg_rotor],
 9186					 LDN_TXDMA(i));
 9187		if (err)
 9188			return err;
 9189		ldg_rotor++;
 9190		if (ldg_rotor == np->num_ldg)
 9191			ldg_rotor = 0;
 9192	}
 9193
 9194	return 0;
 9195}
 9196
 9197static void niu_ldg_free(struct niu *np)
 9198{
 9199	if (np->flags & NIU_FLAGS_MSIX)
 9200		pci_disable_msix(np->pdev);
 9201}
 9202
 9203static int niu_get_of_props(struct niu *np)
 9204{
 9205#ifdef CONFIG_SPARC64
 9206	struct net_device *dev = np->dev;
 9207	struct device_node *dp;
 9208	const char *phy_type;
 9209	const u8 *mac_addr;
 9210	const char *model;
 9211	int prop_len;
 9212
 9213	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9214		dp = np->op->dev.of_node;
 9215	else
 9216		dp = pci_device_to_OF_node(np->pdev);
 9217
 9218	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9219	if (!phy_type) {
 9220		netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp);
 9221		return -EINVAL;
 9222	}
 9223
 9224	if (!strcmp(phy_type, "none"))
 9225		return -ENODEV;
 9226
 9227	strcpy(np->vpd.phy_type, phy_type);
 9228
 9229	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9230		netdev_err(dev, "%pOF: Illegal phy string [%s]\n",
 9231			   dp, np->vpd.phy_type);
 9232		return -EINVAL;
 9233	}
 9234
 9235	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9236	if (!mac_addr) {
 9237		netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n",
 9238			   dp);
 9239		return -EINVAL;
 9240	}
 9241	if (prop_len != dev->addr_len) {
 9242		netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
 9243			   dp, prop_len);
 9244	}
 9245	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
 9246	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 9247		netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
 9248		netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
 9249		return -EINVAL;
 9250	}
 9251
 9252	model = of_get_property(dp, "model", &prop_len);
 9253
 9254	if (model)
 9255		strcpy(np->vpd.model, model);
 9256
 9257	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9258		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9259			NIU_FLAGS_HOTPLUG_PHY);
 9260	}
 9261
 9262	return 0;
 9263#else
 9264	return -EINVAL;
 9265#endif
 9266}
 9267
 9268static int niu_get_invariants(struct niu *np)
 9269{
 9270	int err, have_props;
 9271	u32 offset;
 9272
 9273	err = niu_get_of_props(np);
 9274	if (err == -ENODEV)
 9275		return err;
 9276
 9277	have_props = !err;
 9278
 9279	err = niu_init_mac_ipp_pcs_base(np);
 9280	if (err)
 9281		return err;
 9282
 9283	if (have_props) {
 9284		err = niu_get_and_validate_port(np);
 9285		if (err)
 9286			return err;
 9287
 9288	} else  {
 9289		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9290			return -EINVAL;
 9291
 9292		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9293		offset = niu_pci_vpd_offset(np);
 9294		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9295			     "%s() VPD offset [%08x]\n", __func__, offset);
 9296		if (offset)
 9297			niu_pci_vpd_fetch(np, offset);
 9298		nw64(ESPC_PIO_EN, 0);
 9299
 9300		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9301			niu_pci_vpd_validate(np);
 9302			err = niu_get_and_validate_port(np);
 9303			if (err)
 9304				return err;
 9305		}
 9306
 9307		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9308			err = niu_get_and_validate_port(np);
 9309			if (err)
 9310				return err;
 9311			err = niu_pci_probe_sprom(np);
 9312			if (err)
 9313				return err;
 9314		}
 9315	}
 9316
 9317	err = niu_probe_ports(np);
 9318	if (err)
 9319		return err;
 9320
 9321	niu_ldg_init(np);
 9322
 9323	niu_classifier_swstate_init(np);
 9324	niu_link_config_init(np);
 9325
 9326	err = niu_determine_phy_disposition(np);
 9327	if (!err)
 9328		err = niu_init_link(np);
 9329
 9330	return err;
 9331}
 9332
 9333static LIST_HEAD(niu_parent_list);
 9334static DEFINE_MUTEX(niu_parent_lock);
 9335static int niu_parent_index;
 9336
 9337static ssize_t show_port_phy(struct device *dev,
 9338			     struct device_attribute *attr, char *buf)
 9339{
 9340	struct platform_device *plat_dev = to_platform_device(dev);
 9341	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9342	u32 port_phy = p->port_phy;
 9343	char *orig_buf = buf;
 9344	int i;
 9345
 9346	if (port_phy == PORT_PHY_UNKNOWN ||
 9347	    port_phy == PORT_PHY_INVALID)
 9348		return 0;
 9349
 9350	for (i = 0; i < p->num_ports; i++) {
 9351		const char *type_str;
 9352		int type;
 9353
 9354		type = phy_decode(port_phy, i);
 9355		if (type == PORT_TYPE_10G)
 9356			type_str = "10G";
 9357		else
 9358			type_str = "1G";
 9359		buf += sprintf(buf,
 9360			       (i == 0) ? "%s" : " %s",
 9361			       type_str);
 9362	}
 9363	buf += sprintf(buf, "\n");
 9364	return buf - orig_buf;
 9365}
 9366
 9367static ssize_t show_plat_type(struct device *dev,
 9368			      struct device_attribute *attr, char *buf)
 9369{
 9370	struct platform_device *plat_dev = to_platform_device(dev);
 9371	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9372	const char *type_str;
 9373
 9374	switch (p->plat_type) {
 9375	case PLAT_TYPE_ATLAS:
 9376		type_str = "atlas";
 9377		break;
 9378	case PLAT_TYPE_NIU:
 9379		type_str = "niu";
 9380		break;
 9381	case PLAT_TYPE_VF_P0:
 9382		type_str = "vf_p0";
 9383		break;
 9384	case PLAT_TYPE_VF_P1:
 9385		type_str = "vf_p1";
 9386		break;
 9387	default:
 9388		type_str = "unknown";
 9389		break;
 9390	}
 9391
 9392	return sprintf(buf, "%s\n", type_str);
 9393}
 9394
 9395static ssize_t __show_chan_per_port(struct device *dev,
 9396				    struct device_attribute *attr, char *buf,
 9397				    int rx)
 9398{
 9399	struct platform_device *plat_dev = to_platform_device(dev);
 9400	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9401	char *orig_buf = buf;
 9402	u8 *arr;
 9403	int i;
 9404
 9405	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9406
 9407	for (i = 0; i < p->num_ports; i++) {
 9408		buf += sprintf(buf,
 9409			       (i == 0) ? "%d" : " %d",
 9410			       arr[i]);
 9411	}
 9412	buf += sprintf(buf, "\n");
 9413
 9414	return buf - orig_buf;
 9415}
 9416
 9417static ssize_t show_rxchan_per_port(struct device *dev,
 9418				    struct device_attribute *attr, char *buf)
 9419{
 9420	return __show_chan_per_port(dev, attr, buf, 1);
 9421}
 9422
 9423static ssize_t show_txchan_per_port(struct device *dev,
 9424				    struct device_attribute *attr, char *buf)
 9425{
 9426	return __show_chan_per_port(dev, attr, buf, 1);
 9427}
 9428
 9429static ssize_t show_num_ports(struct device *dev,
 9430			      struct device_attribute *attr, char *buf)
 9431{
 9432	struct platform_device *plat_dev = to_platform_device(dev);
 9433	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9434
 9435	return sprintf(buf, "%d\n", p->num_ports);
 9436}
 9437
 9438static struct device_attribute niu_parent_attributes[] = {
 9439	__ATTR(port_phy, 0444, show_port_phy, NULL),
 9440	__ATTR(plat_type, 0444, show_plat_type, NULL),
 9441	__ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL),
 9442	__ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL),
 9443	__ATTR(num_ports, 0444, show_num_ports, NULL),
 9444	{}
 9445};
 9446
 9447static struct niu_parent *niu_new_parent(struct niu *np,
 9448					 union niu_parent_id *id, u8 ptype)
 9449{
 9450	struct platform_device *plat_dev;
 9451	struct niu_parent *p;
 9452	int i;
 9453
 9454	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9455						   NULL, 0);
 9456	if (IS_ERR(plat_dev))
 9457		return NULL;
 9458
 9459	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
 9460		int err = device_create_file(&plat_dev->dev,
 9461					     &niu_parent_attributes[i]);
 9462		if (err)
 9463			goto fail_unregister;
 9464	}
 9465
 9466	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9467	if (!p)
 9468		goto fail_unregister;
 9469
 9470	p->index = niu_parent_index++;
 9471
 9472	plat_dev->dev.platform_data = p;
 9473	p->plat_dev = plat_dev;
 9474
 9475	memcpy(&p->id, id, sizeof(*id));
 9476	p->plat_type = ptype;
 9477	INIT_LIST_HEAD(&p->list);
 9478	atomic_set(&p->refcnt, 0);
 9479	list_add(&p->list, &niu_parent_list);
 9480	spin_lock_init(&p->lock);
 9481
 9482	p->rxdma_clock_divider = 7500;
 9483
 9484	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9485	if (p->plat_type == PLAT_TYPE_NIU)
 9486		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9487
 9488	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9489		int index = i - CLASS_CODE_USER_PROG1;
 9490
 9491		p->tcam_key[index] = TCAM_KEY_TSEL;
 9492		p->flow_key[index] = (FLOW_KEY_IPSA |
 9493				      FLOW_KEY_IPDA |
 9494				      FLOW_KEY_PROTO |
 9495				      (FLOW_KEY_L4_BYTE12 <<
 9496				       FLOW_KEY_L4_0_SHIFT) |
 9497				      (FLOW_KEY_L4_BYTE12 <<
 9498				       FLOW_KEY_L4_1_SHIFT));
 9499	}
 9500
 9501	for (i = 0; i < LDN_MAX + 1; i++)
 9502		p->ldg_map[i] = LDG_INVALID;
 9503
 9504	return p;
 9505
 9506fail_unregister:
 9507	platform_device_unregister(plat_dev);
 9508	return NULL;
 9509}
 9510
 9511static struct niu_parent *niu_get_parent(struct niu *np,
 9512					 union niu_parent_id *id, u8 ptype)
 9513{
 9514	struct niu_parent *p, *tmp;
 9515	int port = np->port;
 9516
 9517	mutex_lock(&niu_parent_lock);
 9518	p = NULL;
 9519	list_for_each_entry(tmp, &niu_parent_list, list) {
 9520		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9521			p = tmp;
 9522			break;
 9523		}
 9524	}
 9525	if (!p)
 9526		p = niu_new_parent(np, id, ptype);
 9527
 9528	if (p) {
 9529		char port_name[8];
 9530		int err;
 9531
 9532		sprintf(port_name, "port%d", port);
 9533		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9534					&np->device->kobj,
 9535					port_name);
 9536		if (!err) {
 9537			p->ports[port] = np;
 9538			atomic_inc(&p->refcnt);
 9539		}
 9540	}
 9541	mutex_unlock(&niu_parent_lock);
 9542
 9543	return p;
 9544}
 9545
 9546static void niu_put_parent(struct niu *np)
 9547{
 9548	struct niu_parent *p = np->parent;
 9549	u8 port = np->port;
 9550	char port_name[8];
 9551
 9552	BUG_ON(!p || p->ports[port] != np);
 9553
 9554	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9555		     "%s() port[%u]\n", __func__, port);
 9556
 9557	sprintf(port_name, "port%d", port);
 9558
 9559	mutex_lock(&niu_parent_lock);
 9560
 9561	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9562
 9563	p->ports[port] = NULL;
 9564	np->parent = NULL;
 9565
 9566	if (atomic_dec_and_test(&p->refcnt)) {
 9567		list_del(&p->list);
 9568		platform_device_unregister(p->plat_dev);
 9569	}
 9570
 9571	mutex_unlock(&niu_parent_lock);
 9572}
 9573
 9574static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9575				    u64 *handle, gfp_t flag)
 9576{
 9577	dma_addr_t dh;
 9578	void *ret;
 9579
 9580	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9581	if (ret)
 9582		*handle = dh;
 9583	return ret;
 9584}
 9585
 9586static void niu_pci_free_coherent(struct device *dev, size_t size,
 9587				  void *cpu_addr, u64 handle)
 9588{
 9589	dma_free_coherent(dev, size, cpu_addr, handle);
 9590}
 9591
 9592static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9593			    unsigned long offset, size_t size,
 9594			    enum dma_data_direction direction)
 9595{
 9596	return dma_map_page(dev, page, offset, size, direction);
 9597}
 9598
 9599static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9600			       size_t size, enum dma_data_direction direction)
 9601{
 9602	dma_unmap_page(dev, dma_address, size, direction);
 9603}
 9604
 9605static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9606			      size_t size,
 9607			      enum dma_data_direction direction)
 9608{
 9609	return dma_map_single(dev, cpu_addr, size, direction);
 9610}
 9611
 9612static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9613				 size_t size,
 9614				 enum dma_data_direction direction)
 9615{
 9616	dma_unmap_single(dev, dma_address, size, direction);
 9617}
 9618
 9619static const struct niu_ops niu_pci_ops = {
 9620	.alloc_coherent	= niu_pci_alloc_coherent,
 9621	.free_coherent	= niu_pci_free_coherent,
 9622	.map_page	= niu_pci_map_page,
 9623	.unmap_page	= niu_pci_unmap_page,
 9624	.map_single	= niu_pci_map_single,
 9625	.unmap_single	= niu_pci_unmap_single,
 9626};
 9627
 9628static void niu_driver_version(void)
 9629{
 9630	static int niu_version_printed;
 9631
 9632	if (niu_version_printed++ == 0)
 9633		pr_info("%s", version);
 9634}
 9635
 9636static struct net_device *niu_alloc_and_init(struct device *gen_dev,
 9637					     struct pci_dev *pdev,
 9638					     struct platform_device *op,
 9639					     const struct niu_ops *ops, u8 port)
 9640{
 9641	struct net_device *dev;
 9642	struct niu *np;
 9643
 9644	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9645	if (!dev)
 9646		return NULL;
 9647
 9648	SET_NETDEV_DEV(dev, gen_dev);
 9649
 9650	np = netdev_priv(dev);
 9651	np->dev = dev;
 9652	np->pdev = pdev;
 9653	np->op = op;
 9654	np->device = gen_dev;
 9655	np->ops = ops;
 9656
 9657	np->msg_enable = niu_debug;
 9658
 9659	spin_lock_init(&np->lock);
 9660	INIT_WORK(&np->reset_task, niu_reset_task);
 9661
 9662	np->port = port;
 9663
 9664	return dev;
 9665}
 9666
 9667static const struct net_device_ops niu_netdev_ops = {
 9668	.ndo_open		= niu_open,
 9669	.ndo_stop		= niu_close,
 9670	.ndo_start_xmit		= niu_start_xmit,
 9671	.ndo_get_stats64	= niu_get_stats,
 9672	.ndo_set_rx_mode	= niu_set_rx_mode,
 9673	.ndo_validate_addr	= eth_validate_addr,
 9674	.ndo_set_mac_address	= niu_set_mac_addr,
 9675	.ndo_do_ioctl		= niu_ioctl,
 9676	.ndo_tx_timeout		= niu_tx_timeout,
 9677	.ndo_change_mtu		= niu_change_mtu,
 9678};
 9679
 9680static void niu_assign_netdev_ops(struct net_device *dev)
 9681{
 9682	dev->netdev_ops = &niu_netdev_ops;
 9683	dev->ethtool_ops = &niu_ethtool_ops;
 9684	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9685}
 9686
 9687static void niu_device_announce(struct niu *np)
 9688{
 9689	struct net_device *dev = np->dev;
 9690
 9691	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9692
 9693	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9694		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9695				dev->name,
 9696				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9697				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9698				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9699				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9700				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9701				np->vpd.phy_type);
 9702	} else {
 9703		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9704				dev->name,
 9705				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9706				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9707				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9708				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9709				  "COPPER")),
 9710				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9711				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9712				np->vpd.phy_type);
 9713	}
 9714}
 9715
 9716static void niu_set_basic_features(struct net_device *dev)
 9717{
 9718	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9719	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9720}
 9721
 9722static int niu_pci_init_one(struct pci_dev *pdev,
 9723			    const struct pci_device_id *ent)
 9724{
 9725	union niu_parent_id parent_id;
 9726	struct net_device *dev;
 9727	struct niu *np;
 9728	int err;
 9729	u64 dma_mask;
 9730
 9731	niu_driver_version();
 9732
 9733	err = pci_enable_device(pdev);
 9734	if (err) {
 9735		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9736		return err;
 9737	}
 9738
 9739	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9740	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9741		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9742		err = -ENODEV;
 9743		goto err_out_disable_pdev;
 9744	}
 9745
 9746	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9747	if (err) {
 9748		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9749		goto err_out_disable_pdev;
 9750	}
 9751
 9752	if (!pci_is_pcie(pdev)) {
 9753		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9754		err = -ENODEV;
 9755		goto err_out_free_res;
 9756	}
 9757
 9758	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9759				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9760	if (!dev) {
 9761		err = -ENOMEM;
 9762		goto err_out_free_res;
 9763	}
 9764	np = netdev_priv(dev);
 9765
 9766	memset(&parent_id, 0, sizeof(parent_id));
 9767	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9768	parent_id.pci.bus = pdev->bus->number;
 9769	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9770
 9771	np->parent = niu_get_parent(np, &parent_id,
 9772				    PLAT_TYPE_ATLAS);
 9773	if (!np->parent) {
 9774		err = -ENOMEM;
 9775		goto err_out_free_dev;
 9776	}
 9777
 9778	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 9779		PCI_EXP_DEVCTL_NOSNOOP_EN,
 9780		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
 9781		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
 9782		PCI_EXP_DEVCTL_RELAX_EN);
 9783
 9784	dma_mask = DMA_BIT_MASK(44);
 9785	err = pci_set_dma_mask(pdev, dma_mask);
 9786	if (!err) {
 9787		dev->features |= NETIF_F_HIGHDMA;
 9788		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9789		if (err) {
 9790			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9791			goto err_out_release_parent;
 9792		}
 9793	}
 9794	if (err) {
 9795		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9796		if (err) {
 9797			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9798			goto err_out_release_parent;
 9799		}
 9800	}
 9801
 9802	niu_set_basic_features(dev);
 9803
 9804	dev->priv_flags |= IFF_UNICAST_FLT;
 9805
 9806	np->regs = pci_ioremap_bar(pdev, 0);
 9807	if (!np->regs) {
 9808		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9809		err = -ENOMEM;
 9810		goto err_out_release_parent;
 9811	}
 9812
 9813	pci_set_master(pdev);
 9814	pci_save_state(pdev);
 9815
 9816	dev->irq = pdev->irq;
 9817
 9818	/* MTU range: 68 - 9216 */
 9819	dev->min_mtu = ETH_MIN_MTU;
 9820	dev->max_mtu = NIU_MAX_MTU;
 9821
 9822	niu_assign_netdev_ops(dev);
 9823
 9824	err = niu_get_invariants(np);
 9825	if (err) {
 9826		if (err != -ENODEV)
 9827			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9828		goto err_out_iounmap;
 9829	}
 9830
 9831	err = register_netdev(dev);
 9832	if (err) {
 9833		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9834		goto err_out_iounmap;
 9835	}
 9836
 9837	pci_set_drvdata(pdev, dev);
 9838
 9839	niu_device_announce(np);
 9840
 9841	return 0;
 9842
 9843err_out_iounmap:
 9844	if (np->regs) {
 9845		iounmap(np->regs);
 9846		np->regs = NULL;
 9847	}
 9848
 9849err_out_release_parent:
 9850	niu_put_parent(np);
 9851
 9852err_out_free_dev:
 9853	free_netdev(dev);
 9854
 9855err_out_free_res:
 9856	pci_release_regions(pdev);
 9857
 9858err_out_disable_pdev:
 9859	pci_disable_device(pdev);
 9860
 9861	return err;
 9862}
 9863
 9864static void niu_pci_remove_one(struct pci_dev *pdev)
 9865{
 9866	struct net_device *dev = pci_get_drvdata(pdev);
 9867
 9868	if (dev) {
 9869		struct niu *np = netdev_priv(dev);
 9870
 9871		unregister_netdev(dev);
 9872		if (np->regs) {
 9873			iounmap(np->regs);
 9874			np->regs = NULL;
 9875		}
 9876
 9877		niu_ldg_free(np);
 9878
 9879		niu_put_parent(np);
 9880
 9881		free_netdev(dev);
 9882		pci_release_regions(pdev);
 9883		pci_disable_device(pdev);
 9884	}
 9885}
 9886
 9887static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9888{
 9889	struct net_device *dev = pci_get_drvdata(pdev);
 9890	struct niu *np = netdev_priv(dev);
 9891	unsigned long flags;
 9892
 9893	if (!netif_running(dev))
 9894		return 0;
 9895
 9896	flush_work(&np->reset_task);
 9897	niu_netif_stop(np);
 9898
 9899	del_timer_sync(&np->timer);
 9900
 9901	spin_lock_irqsave(&np->lock, flags);
 9902	niu_enable_interrupts(np, 0);
 9903	spin_unlock_irqrestore(&np->lock, flags);
 9904
 9905	netif_device_detach(dev);
 9906
 9907	spin_lock_irqsave(&np->lock, flags);
 9908	niu_stop_hw(np);
 9909	spin_unlock_irqrestore(&np->lock, flags);
 9910
 9911	pci_save_state(pdev);
 9912
 9913	return 0;
 9914}
 9915
 9916static int niu_resume(struct pci_dev *pdev)
 9917{
 9918	struct net_device *dev = pci_get_drvdata(pdev);
 9919	struct niu *np = netdev_priv(dev);
 9920	unsigned long flags;
 9921	int err;
 9922
 9923	if (!netif_running(dev))
 9924		return 0;
 9925
 9926	pci_restore_state(pdev);
 9927
 9928	netif_device_attach(dev);
 9929
 9930	spin_lock_irqsave(&np->lock, flags);
 9931
 9932	err = niu_init_hw(np);
 9933	if (!err) {
 9934		np->timer.expires = jiffies + HZ;
 9935		add_timer(&np->timer);
 9936		niu_netif_start(np);
 9937	}
 9938
 9939	spin_unlock_irqrestore(&np->lock, flags);
 9940
 9941	return err;
 9942}
 9943
 9944static struct pci_driver niu_pci_driver = {
 9945	.name		= DRV_MODULE_NAME,
 9946	.id_table	= niu_pci_tbl,
 9947	.probe		= niu_pci_init_one,
 9948	.remove		= niu_pci_remove_one,
 9949	.suspend	= niu_suspend,
 9950	.resume		= niu_resume,
 9951};
 9952
 9953#ifdef CONFIG_SPARC64
 9954static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
 9955				     u64 *dma_addr, gfp_t flag)
 9956{
 9957	unsigned long order = get_order(size);
 9958	unsigned long page = __get_free_pages(flag, order);
 9959
 9960	if (page == 0UL)
 9961		return NULL;
 9962	memset((char *)page, 0, PAGE_SIZE << order);
 9963	*dma_addr = __pa(page);
 9964
 9965	return (void *) page;
 9966}
 9967
 9968static void niu_phys_free_coherent(struct device *dev, size_t size,
 9969				   void *cpu_addr, u64 handle)
 9970{
 9971	unsigned long order = get_order(size);
 9972
 9973	free_pages((unsigned long) cpu_addr, order);
 9974}
 9975
 9976static u64 niu_phys_map_page(struct device *dev, struct page *page,
 9977			     unsigned long offset, size_t size,
 9978			     enum dma_data_direction direction)
 9979{
 9980	return page_to_phys(page) + offset;
 9981}
 9982
 9983static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
 9984				size_t size, enum dma_data_direction direction)
 9985{
 9986	/* Nothing to do.  */
 9987}
 9988
 9989static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
 9990			       size_t size,
 9991			       enum dma_data_direction direction)
 9992{
 9993	return __pa(cpu_addr);
 9994}
 9995
 9996static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
 9997				  size_t size,
 9998				  enum dma_data_direction direction)
 9999{
10000	/* Nothing to do.  */
10001}
10002
10003static const struct niu_ops niu_phys_ops = {
10004	.alloc_coherent	= niu_phys_alloc_coherent,
10005	.free_coherent	= niu_phys_free_coherent,
10006	.map_page	= niu_phys_map_page,
10007	.unmap_page	= niu_phys_unmap_page,
10008	.map_single	= niu_phys_map_single,
10009	.unmap_single	= niu_phys_unmap_single,
10010};
10011
10012static int niu_of_probe(struct platform_device *op)
10013{
10014	union niu_parent_id parent_id;
10015	struct net_device *dev;
10016	struct niu *np;
10017	const u32 *reg;
10018	int err;
10019
10020	niu_driver_version();
10021
10022	reg = of_get_property(op->dev.of_node, "reg", NULL);
10023	if (!reg) {
10024		dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n",
10025			op->dev.of_node);
10026		return -ENODEV;
10027	}
10028
10029	dev = niu_alloc_and_init(&op->dev, NULL, op,
10030				 &niu_phys_ops, reg[0] & 0x1);
10031	if (!dev) {
10032		err = -ENOMEM;
10033		goto err_out;
10034	}
10035	np = netdev_priv(dev);
10036
10037	memset(&parent_id, 0, sizeof(parent_id));
10038	parent_id.of = of_get_parent(op->dev.of_node);
10039
10040	np->parent = niu_get_parent(np, &parent_id,
10041				    PLAT_TYPE_NIU);
10042	if (!np->parent) {
10043		err = -ENOMEM;
10044		goto err_out_free_dev;
10045	}
10046
10047	niu_set_basic_features(dev);
10048
10049	np->regs = of_ioremap(&op->resource[1], 0,
10050			      resource_size(&op->resource[1]),
10051			      "niu regs");
10052	if (!np->regs) {
10053		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10054		err = -ENOMEM;
10055		goto err_out_release_parent;
10056	}
10057
10058	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10059				    resource_size(&op->resource[2]),
10060				    "niu vregs-1");
10061	if (!np->vir_regs_1) {
10062		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10063		err = -ENOMEM;
10064		goto err_out_iounmap;
10065	}
10066
10067	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10068				    resource_size(&op->resource[3]),
10069				    "niu vregs-2");
10070	if (!np->vir_regs_2) {
10071		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10072		err = -ENOMEM;
10073		goto err_out_iounmap;
10074	}
10075
10076	niu_assign_netdev_ops(dev);
10077
10078	err = niu_get_invariants(np);
10079	if (err) {
10080		if (err != -ENODEV)
10081			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10082		goto err_out_iounmap;
10083	}
10084
10085	err = register_netdev(dev);
10086	if (err) {
10087		dev_err(&op->dev, "Cannot register net device, aborting\n");
10088		goto err_out_iounmap;
10089	}
10090
10091	platform_set_drvdata(op, dev);
10092
10093	niu_device_announce(np);
10094
10095	return 0;
10096
10097err_out_iounmap:
10098	if (np->vir_regs_1) {
10099		of_iounmap(&op->resource[2], np->vir_regs_1,
10100			   resource_size(&op->resource[2]));
10101		np->vir_regs_1 = NULL;
10102	}
10103
10104	if (np->vir_regs_2) {
10105		of_iounmap(&op->resource[3], np->vir_regs_2,
10106			   resource_size(&op->resource[3]));
10107		np->vir_regs_2 = NULL;
10108	}
10109
10110	if (np->regs) {
10111		of_iounmap(&op->resource[1], np->regs,
10112			   resource_size(&op->resource[1]));
10113		np->regs = NULL;
10114	}
10115
10116err_out_release_parent:
10117	niu_put_parent(np);
10118
10119err_out_free_dev:
10120	free_netdev(dev);
10121
10122err_out:
10123	return err;
10124}
10125
10126static int niu_of_remove(struct platform_device *op)
10127{
10128	struct net_device *dev = platform_get_drvdata(op);
10129
10130	if (dev) {
10131		struct niu *np = netdev_priv(dev);
10132
10133		unregister_netdev(dev);
10134
10135		if (np->vir_regs_1) {
10136			of_iounmap(&op->resource[2], np->vir_regs_1,
10137				   resource_size(&op->resource[2]));
10138			np->vir_regs_1 = NULL;
10139		}
10140
10141		if (np->vir_regs_2) {
10142			of_iounmap(&op->resource[3], np->vir_regs_2,
10143				   resource_size(&op->resource[3]));
10144			np->vir_regs_2 = NULL;
10145		}
10146
10147		if (np->regs) {
10148			of_iounmap(&op->resource[1], np->regs,
10149				   resource_size(&op->resource[1]));
10150			np->regs = NULL;
10151		}
10152
10153		niu_ldg_free(np);
10154
10155		niu_put_parent(np);
10156
10157		free_netdev(dev);
10158	}
10159	return 0;
10160}
10161
10162static const struct of_device_id niu_match[] = {
10163	{
10164		.name = "network",
10165		.compatible = "SUNW,niusl",
10166	},
10167	{},
10168};
10169MODULE_DEVICE_TABLE(of, niu_match);
10170
10171static struct platform_driver niu_of_driver = {
10172	.driver = {
10173		.name = "niu",
10174		.of_match_table = niu_match,
10175	},
10176	.probe		= niu_of_probe,
10177	.remove		= niu_of_remove,
10178};
10179
10180#endif /* CONFIG_SPARC64 */
10181
10182static int __init niu_init(void)
10183{
10184	int err = 0;
10185
10186	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10187
10188	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10189
10190#ifdef CONFIG_SPARC64
10191	err = platform_driver_register(&niu_of_driver);
10192#endif
10193
10194	if (!err) {
10195		err = pci_register_driver(&niu_pci_driver);
10196#ifdef CONFIG_SPARC64
10197		if (err)
10198			platform_driver_unregister(&niu_of_driver);
10199#endif
10200	}
10201
10202	return err;
10203}
10204
10205static void __exit niu_exit(void)
10206{
10207	pci_unregister_driver(&niu_pci_driver);
10208#ifdef CONFIG_SPARC64
10209	platform_driver_unregister(&niu_of_driver);
10210#endif
10211}
10212
10213module_init(niu_init);
10214module_exit(niu_exit);