Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1// SPDX-License-Identifier: GPL-2.0
    2/* niu.c: Neptune ethernet driver.
    3 *
    4 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    5 */
    6
    7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    8
    9#include <linux/module.h>
   10#include <linux/init.h>
   11#include <linux/interrupt.h>
   12#include <linux/pci.h>
   13#include <linux/dma-mapping.h>
   14#include <linux/netdevice.h>
   15#include <linux/ethtool.h>
   16#include <linux/etherdevice.h>
   17#include <linux/platform_device.h>
   18#include <linux/delay.h>
   19#include <linux/bitops.h>
   20#include <linux/mii.h>
   21#include <linux/if.h>
   22#include <linux/if_ether.h>
   23#include <linux/if_vlan.h>
   24#include <linux/ip.h>
   25#include <linux/in.h>
   26#include <linux/ipv6.h>
   27#include <linux/log2.h>
   28#include <linux/jiffies.h>
   29#include <linux/crc32.h>
   30#include <linux/list.h>
   31#include <linux/slab.h>
   32
   33#include <linux/io.h>
   34#include <linux/of_device.h>
   35
   36#include "niu.h"
   37
   38#define DRV_MODULE_NAME		"niu"
   39#define DRV_MODULE_VERSION	"1.1"
   40#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   41
   42static char version[] =
   43	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   44
   45MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   46MODULE_DESCRIPTION("NIU ethernet driver");
   47MODULE_LICENSE("GPL");
   48MODULE_VERSION(DRV_MODULE_VERSION);
   49
   50#ifndef readq
   51static u64 readq(void __iomem *reg)
   52{
   53	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   54}
   55
   56static void writeq(u64 val, void __iomem *reg)
   57{
   58	writel(val & 0xffffffff, reg);
   59	writel(val >> 32, reg + 0x4UL);
   60}
   61#endif
   62
   63static const struct pci_device_id niu_pci_tbl[] = {
   64	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   65	{}
   66};
   67
   68MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   69
   70#define NIU_TX_TIMEOUT			(5 * HZ)
   71
   72#define nr64(reg)		readq(np->regs + (reg))
   73#define nw64(reg, val)		writeq((val), np->regs + (reg))
   74
   75#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   76#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   77
   78#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   79#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   80
   81#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   82#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   83
   84#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   85#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   86
   87#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   88
   89static int niu_debug;
   90static int debug = -1;
   91module_param(debug, int, 0);
   92MODULE_PARM_DESC(debug, "NIU debug level");
   93
   94#define niu_lock_parent(np, flags) \
   95	spin_lock_irqsave(&np->parent->lock, flags)
   96#define niu_unlock_parent(np, flags) \
   97	spin_unlock_irqrestore(&np->parent->lock, flags)
   98
   99static int serdes_init_10g_serdes(struct niu *np);
  100
  101static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  102				     u64 bits, int limit, int delay)
  103{
  104	while (--limit >= 0) {
  105		u64 val = nr64_mac(reg);
  106
  107		if (!(val & bits))
  108			break;
  109		udelay(delay);
  110	}
  111	if (limit < 0)
  112		return -ENODEV;
  113	return 0;
  114}
  115
  116static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  117					u64 bits, int limit, int delay,
  118					const char *reg_name)
  119{
  120	int err;
  121
  122	nw64_mac(reg, bits);
  123	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  124	if (err)
  125		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  126			   (unsigned long long)bits, reg_name,
  127			   (unsigned long long)nr64_mac(reg));
  128	return err;
  129}
  130
  131#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  132({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  133	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  134})
  135
  136static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  137				     u64 bits, int limit, int delay)
  138{
  139	while (--limit >= 0) {
  140		u64 val = nr64_ipp(reg);
  141
  142		if (!(val & bits))
  143			break;
  144		udelay(delay);
  145	}
  146	if (limit < 0)
  147		return -ENODEV;
  148	return 0;
  149}
  150
  151static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  152					u64 bits, int limit, int delay,
  153					const char *reg_name)
  154{
  155	int err;
  156	u64 val;
  157
  158	val = nr64_ipp(reg);
  159	val |= bits;
  160	nw64_ipp(reg, val);
  161
  162	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  163	if (err)
  164		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  165			   (unsigned long long)bits, reg_name,
  166			   (unsigned long long)nr64_ipp(reg));
  167	return err;
  168}
  169
  170#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  171({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  172	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  173})
  174
  175static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  176				 u64 bits, int limit, int delay)
  177{
  178	while (--limit >= 0) {
  179		u64 val = nr64(reg);
  180
  181		if (!(val & bits))
  182			break;
  183		udelay(delay);
  184	}
  185	if (limit < 0)
  186		return -ENODEV;
  187	return 0;
  188}
  189
  190#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  191({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  192	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  193})
  194
  195static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  196				    u64 bits, int limit, int delay,
  197				    const char *reg_name)
  198{
  199	int err;
  200
  201	nw64(reg, bits);
  202	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  203	if (err)
  204		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  205			   (unsigned long long)bits, reg_name,
  206			   (unsigned long long)nr64(reg));
  207	return err;
  208}
  209
  210#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  211({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  212	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  213})
  214
  215static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  216{
  217	u64 val = (u64) lp->timer;
  218
  219	if (on)
  220		val |= LDG_IMGMT_ARM;
  221
  222	nw64(LDG_IMGMT(lp->ldg_num), val);
  223}
  224
  225static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  226{
  227	unsigned long mask_reg, bits;
  228	u64 val;
  229
  230	if (ldn < 0 || ldn > LDN_MAX)
  231		return -EINVAL;
  232
  233	if (ldn < 64) {
  234		mask_reg = LD_IM0(ldn);
  235		bits = LD_IM0_MASK;
  236	} else {
  237		mask_reg = LD_IM1(ldn - 64);
  238		bits = LD_IM1_MASK;
  239	}
  240
  241	val = nr64(mask_reg);
  242	if (on)
  243		val &= ~bits;
  244	else
  245		val |= bits;
  246	nw64(mask_reg, val);
  247
  248	return 0;
  249}
  250
  251static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  252{
  253	struct niu_parent *parent = np->parent;
  254	int i;
  255
  256	for (i = 0; i <= LDN_MAX; i++) {
  257		int err;
  258
  259		if (parent->ldg_map[i] != lp->ldg_num)
  260			continue;
  261
  262		err = niu_ldn_irq_enable(np, i, on);
  263		if (err)
  264			return err;
  265	}
  266	return 0;
  267}
  268
  269static int niu_enable_interrupts(struct niu *np, int on)
  270{
  271	int i;
  272
  273	for (i = 0; i < np->num_ldg; i++) {
  274		struct niu_ldg *lp = &np->ldg[i];
  275		int err;
  276
  277		err = niu_enable_ldn_in_ldg(np, lp, on);
  278		if (err)
  279			return err;
  280	}
  281	for (i = 0; i < np->num_ldg; i++)
  282		niu_ldg_rearm(np, &np->ldg[i], on);
  283
  284	return 0;
  285}
  286
  287static u32 phy_encode(u32 type, int port)
  288{
  289	return type << (port * 2);
  290}
  291
  292static u32 phy_decode(u32 val, int port)
  293{
  294	return (val >> (port * 2)) & PORT_TYPE_MASK;
  295}
  296
  297static int mdio_wait(struct niu *np)
  298{
  299	int limit = 1000;
  300	u64 val;
  301
  302	while (--limit > 0) {
  303		val = nr64(MIF_FRAME_OUTPUT);
  304		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  305			return val & MIF_FRAME_OUTPUT_DATA;
  306
  307		udelay(10);
  308	}
  309
  310	return -ENODEV;
  311}
  312
  313static int mdio_read(struct niu *np, int port, int dev, int reg)
  314{
  315	int err;
  316
  317	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  318	err = mdio_wait(np);
  319	if (err < 0)
  320		return err;
  321
  322	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  323	return mdio_wait(np);
  324}
  325
  326static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  327{
  328	int err;
  329
  330	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  331	err = mdio_wait(np);
  332	if (err < 0)
  333		return err;
  334
  335	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  336	err = mdio_wait(np);
  337	if (err < 0)
  338		return err;
  339
  340	return 0;
  341}
  342
  343static int mii_read(struct niu *np, int port, int reg)
  344{
  345	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  346	return mdio_wait(np);
  347}
  348
  349static int mii_write(struct niu *np, int port, int reg, int data)
  350{
  351	int err;
  352
  353	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  354	err = mdio_wait(np);
  355	if (err < 0)
  356		return err;
  357
  358	return 0;
  359}
  360
  361static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  362{
  363	int err;
  364
  365	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  366			 ESR2_TI_PLL_TX_CFG_L(channel),
  367			 val & 0xffff);
  368	if (!err)
  369		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  370				 ESR2_TI_PLL_TX_CFG_H(channel),
  371				 val >> 16);
  372	return err;
  373}
  374
  375static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  376{
  377	int err;
  378
  379	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  380			 ESR2_TI_PLL_RX_CFG_L(channel),
  381			 val & 0xffff);
  382	if (!err)
  383		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  384				 ESR2_TI_PLL_RX_CFG_H(channel),
  385				 val >> 16);
  386	return err;
  387}
  388
  389/* Mode is always 10G fiber.  */
  390static int serdes_init_niu_10g_fiber(struct niu *np)
  391{
  392	struct niu_link_config *lp = &np->link_config;
  393	u32 tx_cfg, rx_cfg;
  394	unsigned long i;
  395
  396	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  397	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  398		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  399		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  400
  401	if (lp->loopback_mode == LOOPBACK_PHY) {
  402		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  403
  404		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  405			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  406
  407		tx_cfg |= PLL_TX_CFG_ENTEST;
  408		rx_cfg |= PLL_RX_CFG_ENTEST;
  409	}
  410
  411	/* Initialize all 4 lanes of the SERDES.  */
  412	for (i = 0; i < 4; i++) {
  413		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  414		if (err)
  415			return err;
  416	}
  417
  418	for (i = 0; i < 4; i++) {
  419		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  420		if (err)
  421			return err;
  422	}
  423
  424	return 0;
  425}
  426
  427static int serdes_init_niu_1g_serdes(struct niu *np)
  428{
  429	struct niu_link_config *lp = &np->link_config;
  430	u16 pll_cfg, pll_sts;
  431	int max_retry = 100;
  432	u64 uninitialized_var(sig), mask, val;
  433	u32 tx_cfg, rx_cfg;
  434	unsigned long i;
  435	int err;
  436
  437	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  438		  PLL_TX_CFG_RATE_HALF);
  439	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  440		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  441		  PLL_RX_CFG_RATE_HALF);
  442
  443	if (np->port == 0)
  444		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  445
  446	if (lp->loopback_mode == LOOPBACK_PHY) {
  447		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  448
  449		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  450			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  451
  452		tx_cfg |= PLL_TX_CFG_ENTEST;
  453		rx_cfg |= PLL_RX_CFG_ENTEST;
  454	}
  455
  456	/* Initialize PLL for 1G */
  457	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  458
  459	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  460			 ESR2_TI_PLL_CFG_L, pll_cfg);
  461	if (err) {
  462		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  463			   np->port, __func__);
  464		return err;
  465	}
  466
  467	pll_sts = PLL_CFG_ENPLL;
  468
  469	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  470			 ESR2_TI_PLL_STS_L, pll_sts);
  471	if (err) {
  472		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  473			   np->port, __func__);
  474		return err;
  475	}
  476
  477	udelay(200);
  478
  479	/* Initialize all 4 lanes of the SERDES.  */
  480	for (i = 0; i < 4; i++) {
  481		err = esr2_set_tx_cfg(np, i, tx_cfg);
  482		if (err)
  483			return err;
  484	}
  485
  486	for (i = 0; i < 4; i++) {
  487		err = esr2_set_rx_cfg(np, i, rx_cfg);
  488		if (err)
  489			return err;
  490	}
  491
  492	switch (np->port) {
  493	case 0:
  494		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  495		mask = val;
  496		break;
  497
  498	case 1:
  499		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  500		mask = val;
  501		break;
  502
  503	default:
  504		return -EINVAL;
  505	}
  506
  507	while (max_retry--) {
  508		sig = nr64(ESR_INT_SIGNALS);
  509		if ((sig & mask) == val)
  510			break;
  511
  512		mdelay(500);
  513	}
  514
  515	if ((sig & mask) != val) {
  516		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  517			   np->port, (int)(sig & mask), (int)val);
  518		return -ENODEV;
  519	}
  520
  521	return 0;
  522}
  523
  524static int serdes_init_niu_10g_serdes(struct niu *np)
  525{
  526	struct niu_link_config *lp = &np->link_config;
  527	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  528	int max_retry = 100;
  529	u64 uninitialized_var(sig), mask, val;
  530	unsigned long i;
  531	int err;
  532
  533	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  534	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  535		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  536		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  537
  538	if (lp->loopback_mode == LOOPBACK_PHY) {
  539		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  540
  541		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  542			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  543
  544		tx_cfg |= PLL_TX_CFG_ENTEST;
  545		rx_cfg |= PLL_RX_CFG_ENTEST;
  546	}
  547
  548	/* Initialize PLL for 10G */
  549	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  550
  551	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  552			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  553	if (err) {
  554		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  555			   np->port, __func__);
  556		return err;
  557	}
  558
  559	pll_sts = PLL_CFG_ENPLL;
  560
  561	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  562			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  563	if (err) {
  564		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  565			   np->port, __func__);
  566		return err;
  567	}
  568
  569	udelay(200);
  570
  571	/* Initialize all 4 lanes of the SERDES.  */
  572	for (i = 0; i < 4; i++) {
  573		err = esr2_set_tx_cfg(np, i, tx_cfg);
  574		if (err)
  575			return err;
  576	}
  577
  578	for (i = 0; i < 4; i++) {
  579		err = esr2_set_rx_cfg(np, i, rx_cfg);
  580		if (err)
  581			return err;
  582	}
  583
  584	/* check if serdes is ready */
  585
  586	switch (np->port) {
  587	case 0:
  588		mask = ESR_INT_SIGNALS_P0_BITS;
  589		val = (ESR_INT_SRDY0_P0 |
  590		       ESR_INT_DET0_P0 |
  591		       ESR_INT_XSRDY_P0 |
  592		       ESR_INT_XDP_P0_CH3 |
  593		       ESR_INT_XDP_P0_CH2 |
  594		       ESR_INT_XDP_P0_CH1 |
  595		       ESR_INT_XDP_P0_CH0);
  596		break;
  597
  598	case 1:
  599		mask = ESR_INT_SIGNALS_P1_BITS;
  600		val = (ESR_INT_SRDY0_P1 |
  601		       ESR_INT_DET0_P1 |
  602		       ESR_INT_XSRDY_P1 |
  603		       ESR_INT_XDP_P1_CH3 |
  604		       ESR_INT_XDP_P1_CH2 |
  605		       ESR_INT_XDP_P1_CH1 |
  606		       ESR_INT_XDP_P1_CH0);
  607		break;
  608
  609	default:
  610		return -EINVAL;
  611	}
  612
  613	while (max_retry--) {
  614		sig = nr64(ESR_INT_SIGNALS);
  615		if ((sig & mask) == val)
  616			break;
  617
  618		mdelay(500);
  619	}
  620
  621	if ((sig & mask) != val) {
  622		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  623			np->port, (int)(sig & mask), (int)val);
  624
  625		/* 10G failed, try initializing at 1G */
  626		err = serdes_init_niu_1g_serdes(np);
  627		if (!err) {
  628			np->flags &= ~NIU_FLAGS_10G;
  629			np->mac_xcvr = MAC_XCVR_PCS;
  630		}  else {
  631			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  632				   np->port);
  633			return -ENODEV;
  634		}
  635	}
  636	return 0;
  637}
  638
  639static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  640{
  641	int err;
  642
  643	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  644	if (err >= 0) {
  645		*val = (err & 0xffff);
  646		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  647				ESR_RXTX_CTRL_H(chan));
  648		if (err >= 0)
  649			*val |= ((err & 0xffff) << 16);
  650		err = 0;
  651	}
  652	return err;
  653}
  654
  655static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  656{
  657	int err;
  658
  659	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  660			ESR_GLUE_CTRL0_L(chan));
  661	if (err >= 0) {
  662		*val = (err & 0xffff);
  663		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  664				ESR_GLUE_CTRL0_H(chan));
  665		if (err >= 0) {
  666			*val |= ((err & 0xffff) << 16);
  667			err = 0;
  668		}
  669	}
  670	return err;
  671}
  672
  673static int esr_read_reset(struct niu *np, u32 *val)
  674{
  675	int err;
  676
  677	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  678			ESR_RXTX_RESET_CTRL_L);
  679	if (err >= 0) {
  680		*val = (err & 0xffff);
  681		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  682				ESR_RXTX_RESET_CTRL_H);
  683		if (err >= 0) {
  684			*val |= ((err & 0xffff) << 16);
  685			err = 0;
  686		}
  687	}
  688	return err;
  689}
  690
  691static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  692{
  693	int err;
  694
  695	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  696			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  697	if (!err)
  698		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  699				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  700	return err;
  701}
  702
  703static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  704{
  705	int err;
  706
  707	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  708			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  709	if (!err)
  710		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  711				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  712	return err;
  713}
  714
  715static int esr_reset(struct niu *np)
  716{
  717	u32 uninitialized_var(reset);
  718	int err;
  719
  720	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  721			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  722	if (err)
  723		return err;
  724	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  725			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  726	if (err)
  727		return err;
  728	udelay(200);
  729
  730	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  731			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  732	if (err)
  733		return err;
  734	udelay(200);
  735
  736	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  737			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  738	if (err)
  739		return err;
  740	udelay(200);
  741
  742	err = esr_read_reset(np, &reset);
  743	if (err)
  744		return err;
  745	if (reset != 0) {
  746		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  747			   np->port, reset);
  748		return -ENODEV;
  749	}
  750
  751	return 0;
  752}
  753
  754static int serdes_init_10g(struct niu *np)
  755{
  756	struct niu_link_config *lp = &np->link_config;
  757	unsigned long ctrl_reg, test_cfg_reg, i;
  758	u64 ctrl_val, test_cfg_val, sig, mask, val;
  759	int err;
  760
  761	switch (np->port) {
  762	case 0:
  763		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  764		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  765		break;
  766	case 1:
  767		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  768		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  769		break;
  770
  771	default:
  772		return -EINVAL;
  773	}
  774	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  775		    ENET_SERDES_CTRL_SDET_1 |
  776		    ENET_SERDES_CTRL_SDET_2 |
  777		    ENET_SERDES_CTRL_SDET_3 |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  780		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  781		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  784		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  785		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  786	test_cfg_val = 0;
  787
  788	if (lp->loopback_mode == LOOPBACK_PHY) {
  789		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  790				  ENET_SERDES_TEST_MD_0_SHIFT) |
  791				 (ENET_TEST_MD_PAD_LOOPBACK <<
  792				  ENET_SERDES_TEST_MD_1_SHIFT) |
  793				 (ENET_TEST_MD_PAD_LOOPBACK <<
  794				  ENET_SERDES_TEST_MD_2_SHIFT) |
  795				 (ENET_TEST_MD_PAD_LOOPBACK <<
  796				  ENET_SERDES_TEST_MD_3_SHIFT));
  797	}
  798
  799	nw64(ctrl_reg, ctrl_val);
  800	nw64(test_cfg_reg, test_cfg_val);
  801
  802	/* Initialize all 4 lanes of the SERDES.  */
  803	for (i = 0; i < 4; i++) {
  804		u32 rxtx_ctrl, glue0;
  805
  806		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  807		if (err)
  808			return err;
  809		err = esr_read_glue0(np, i, &glue0);
  810		if (err)
  811			return err;
  812
  813		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  814		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  815			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  816
  817		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  818			   ESR_GLUE_CTRL0_THCNT |
  819			   ESR_GLUE_CTRL0_BLTIME);
  820		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  821			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  822			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  823			  (BLTIME_300_CYCLES <<
  824			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  825
  826		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  827		if (err)
  828			return err;
  829		err = esr_write_glue0(np, i, glue0);
  830		if (err)
  831			return err;
  832	}
  833
  834	err = esr_reset(np);
  835	if (err)
  836		return err;
  837
  838	sig = nr64(ESR_INT_SIGNALS);
  839	switch (np->port) {
  840	case 0:
  841		mask = ESR_INT_SIGNALS_P0_BITS;
  842		val = (ESR_INT_SRDY0_P0 |
  843		       ESR_INT_DET0_P0 |
  844		       ESR_INT_XSRDY_P0 |
  845		       ESR_INT_XDP_P0_CH3 |
  846		       ESR_INT_XDP_P0_CH2 |
  847		       ESR_INT_XDP_P0_CH1 |
  848		       ESR_INT_XDP_P0_CH0);
  849		break;
  850
  851	case 1:
  852		mask = ESR_INT_SIGNALS_P1_BITS;
  853		val = (ESR_INT_SRDY0_P1 |
  854		       ESR_INT_DET0_P1 |
  855		       ESR_INT_XSRDY_P1 |
  856		       ESR_INT_XDP_P1_CH3 |
  857		       ESR_INT_XDP_P1_CH2 |
  858		       ESR_INT_XDP_P1_CH1 |
  859		       ESR_INT_XDP_P1_CH0);
  860		break;
  861
  862	default:
  863		return -EINVAL;
  864	}
  865
  866	if ((sig & mask) != val) {
  867		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  868			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  869			return 0;
  870		}
  871		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  872			   np->port, (int)(sig & mask), (int)val);
  873		return -ENODEV;
  874	}
  875	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  876		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  877	return 0;
  878}
  879
  880static int serdes_init_1g(struct niu *np)
  881{
  882	u64 val;
  883
  884	val = nr64(ENET_SERDES_1_PLL_CFG);
  885	val &= ~ENET_SERDES_PLL_FBDIV2;
  886	switch (np->port) {
  887	case 0:
  888		val |= ENET_SERDES_PLL_HRATE0;
  889		break;
  890	case 1:
  891		val |= ENET_SERDES_PLL_HRATE1;
  892		break;
  893	case 2:
  894		val |= ENET_SERDES_PLL_HRATE2;
  895		break;
  896	case 3:
  897		val |= ENET_SERDES_PLL_HRATE3;
  898		break;
  899	default:
  900		return -EINVAL;
  901	}
  902	nw64(ENET_SERDES_1_PLL_CFG, val);
  903
  904	return 0;
  905}
  906
  907static int serdes_init_1g_serdes(struct niu *np)
  908{
  909	struct niu_link_config *lp = &np->link_config;
  910	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  911	u64 ctrl_val, test_cfg_val, sig, mask, val;
  912	int err;
  913	u64 reset_val, val_rd;
  914
  915	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  916		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  917		ENET_SERDES_PLL_FBDIV0;
  918	switch (np->port) {
  919	case 0:
  920		reset_val =  ENET_SERDES_RESET_0;
  921		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  922		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  923		pll_cfg = ENET_SERDES_0_PLL_CFG;
  924		break;
  925	case 1:
  926		reset_val =  ENET_SERDES_RESET_1;
  927		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  928		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  929		pll_cfg = ENET_SERDES_1_PLL_CFG;
  930		break;
  931
  932	default:
  933		return -EINVAL;
  934	}
  935	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  936		    ENET_SERDES_CTRL_SDET_1 |
  937		    ENET_SERDES_CTRL_SDET_2 |
  938		    ENET_SERDES_CTRL_SDET_3 |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  941		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  942		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  945		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  946		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  947	test_cfg_val = 0;
  948
  949	if (lp->loopback_mode == LOOPBACK_PHY) {
  950		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  951				  ENET_SERDES_TEST_MD_0_SHIFT) |
  952				 (ENET_TEST_MD_PAD_LOOPBACK <<
  953				  ENET_SERDES_TEST_MD_1_SHIFT) |
  954				 (ENET_TEST_MD_PAD_LOOPBACK <<
  955				  ENET_SERDES_TEST_MD_2_SHIFT) |
  956				 (ENET_TEST_MD_PAD_LOOPBACK <<
  957				  ENET_SERDES_TEST_MD_3_SHIFT));
  958	}
  959
  960	nw64(ENET_SERDES_RESET, reset_val);
  961	mdelay(20);
  962	val_rd = nr64(ENET_SERDES_RESET);
  963	val_rd &= ~reset_val;
  964	nw64(pll_cfg, val);
  965	nw64(ctrl_reg, ctrl_val);
  966	nw64(test_cfg_reg, test_cfg_val);
  967	nw64(ENET_SERDES_RESET, val_rd);
  968	mdelay(2000);
  969
  970	/* Initialize all 4 lanes of the SERDES.  */
  971	for (i = 0; i < 4; i++) {
  972		u32 rxtx_ctrl, glue0;
  973
  974		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  975		if (err)
  976			return err;
  977		err = esr_read_glue0(np, i, &glue0);
  978		if (err)
  979			return err;
  980
  981		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  982		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  983			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  984
  985		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  986			   ESR_GLUE_CTRL0_THCNT |
  987			   ESR_GLUE_CTRL0_BLTIME);
  988		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  989			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  990			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  991			  (BLTIME_300_CYCLES <<
  992			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  993
  994		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  995		if (err)
  996			return err;
  997		err = esr_write_glue0(np, i, glue0);
  998		if (err)
  999			return err;
 1000	}
 1001
 1002
 1003	sig = nr64(ESR_INT_SIGNALS);
 1004	switch (np->port) {
 1005	case 0:
 1006		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1007		mask = val;
 1008		break;
 1009
 1010	case 1:
 1011		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1012		mask = val;
 1013		break;
 1014
 1015	default:
 1016		return -EINVAL;
 1017	}
 1018
 1019	if ((sig & mask) != val) {
 1020		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1021			   np->port, (int)(sig & mask), (int)val);
 1022		return -ENODEV;
 1023	}
 1024
 1025	return 0;
 1026}
 1027
 1028static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1029{
 1030	struct niu_link_config *lp = &np->link_config;
 1031	int link_up;
 1032	u64 val;
 1033	u16 current_speed;
 1034	unsigned long flags;
 1035	u8 current_duplex;
 1036
 1037	link_up = 0;
 1038	current_speed = SPEED_INVALID;
 1039	current_duplex = DUPLEX_INVALID;
 1040
 1041	spin_lock_irqsave(&np->lock, flags);
 1042
 1043	val = nr64_pcs(PCS_MII_STAT);
 1044
 1045	if (val & PCS_MII_STAT_LINK_STATUS) {
 1046		link_up = 1;
 1047		current_speed = SPEED_1000;
 1048		current_duplex = DUPLEX_FULL;
 1049	}
 1050
 1051	lp->active_speed = current_speed;
 1052	lp->active_duplex = current_duplex;
 1053	spin_unlock_irqrestore(&np->lock, flags);
 1054
 1055	*link_up_p = link_up;
 1056	return 0;
 1057}
 1058
 1059static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1060{
 1061	unsigned long flags;
 1062	struct niu_link_config *lp = &np->link_config;
 1063	int link_up = 0;
 1064	int link_ok = 1;
 1065	u64 val, val2;
 1066	u16 current_speed;
 1067	u8 current_duplex;
 1068
 1069	if (!(np->flags & NIU_FLAGS_10G))
 1070		return link_status_1g_serdes(np, link_up_p);
 1071
 1072	current_speed = SPEED_INVALID;
 1073	current_duplex = DUPLEX_INVALID;
 1074	spin_lock_irqsave(&np->lock, flags);
 1075
 1076	val = nr64_xpcs(XPCS_STATUS(0));
 1077	val2 = nr64_mac(XMAC_INTER2);
 1078	if (val2 & 0x01000000)
 1079		link_ok = 0;
 1080
 1081	if ((val & 0x1000ULL) && link_ok) {
 1082		link_up = 1;
 1083		current_speed = SPEED_10000;
 1084		current_duplex = DUPLEX_FULL;
 1085	}
 1086	lp->active_speed = current_speed;
 1087	lp->active_duplex = current_duplex;
 1088	spin_unlock_irqrestore(&np->lock, flags);
 1089	*link_up_p = link_up;
 1090	return 0;
 1091}
 1092
 1093static int link_status_mii(struct niu *np, int *link_up_p)
 1094{
 1095	struct niu_link_config *lp = &np->link_config;
 1096	int err;
 1097	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1098	int supported, advertising, active_speed, active_duplex;
 1099
 1100	err = mii_read(np, np->phy_addr, MII_BMCR);
 1101	if (unlikely(err < 0))
 1102		return err;
 1103	bmcr = err;
 1104
 1105	err = mii_read(np, np->phy_addr, MII_BMSR);
 1106	if (unlikely(err < 0))
 1107		return err;
 1108	bmsr = err;
 1109
 1110	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1111	if (unlikely(err < 0))
 1112		return err;
 1113	advert = err;
 1114
 1115	err = mii_read(np, np->phy_addr, MII_LPA);
 1116	if (unlikely(err < 0))
 1117		return err;
 1118	lpa = err;
 1119
 1120	if (likely(bmsr & BMSR_ESTATEN)) {
 1121		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1122		if (unlikely(err < 0))
 1123			return err;
 1124		estatus = err;
 1125
 1126		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1127		if (unlikely(err < 0))
 1128			return err;
 1129		ctrl1000 = err;
 1130
 1131		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1132		if (unlikely(err < 0))
 1133			return err;
 1134		stat1000 = err;
 1135	} else
 1136		estatus = ctrl1000 = stat1000 = 0;
 1137
 1138	supported = 0;
 1139	if (bmsr & BMSR_ANEGCAPABLE)
 1140		supported |= SUPPORTED_Autoneg;
 1141	if (bmsr & BMSR_10HALF)
 1142		supported |= SUPPORTED_10baseT_Half;
 1143	if (bmsr & BMSR_10FULL)
 1144		supported |= SUPPORTED_10baseT_Full;
 1145	if (bmsr & BMSR_100HALF)
 1146		supported |= SUPPORTED_100baseT_Half;
 1147	if (bmsr & BMSR_100FULL)
 1148		supported |= SUPPORTED_100baseT_Full;
 1149	if (estatus & ESTATUS_1000_THALF)
 1150		supported |= SUPPORTED_1000baseT_Half;
 1151	if (estatus & ESTATUS_1000_TFULL)
 1152		supported |= SUPPORTED_1000baseT_Full;
 1153	lp->supported = supported;
 1154
 1155	advertising = mii_adv_to_ethtool_adv_t(advert);
 1156	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 1157
 1158	if (bmcr & BMCR_ANENABLE) {
 1159		int neg, neg1000;
 1160
 1161		lp->active_autoneg = 1;
 1162		advertising |= ADVERTISED_Autoneg;
 1163
 1164		neg = advert & lpa;
 1165		neg1000 = (ctrl1000 << 2) & stat1000;
 1166
 1167		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1168			active_speed = SPEED_1000;
 1169		else if (neg & LPA_100)
 1170			active_speed = SPEED_100;
 1171		else if (neg & (LPA_10HALF | LPA_10FULL))
 1172			active_speed = SPEED_10;
 1173		else
 1174			active_speed = SPEED_INVALID;
 1175
 1176		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1177			active_duplex = DUPLEX_FULL;
 1178		else if (active_speed != SPEED_INVALID)
 1179			active_duplex = DUPLEX_HALF;
 1180		else
 1181			active_duplex = DUPLEX_INVALID;
 1182	} else {
 1183		lp->active_autoneg = 0;
 1184
 1185		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1186			active_speed = SPEED_1000;
 1187		else if (bmcr & BMCR_SPEED100)
 1188			active_speed = SPEED_100;
 1189		else
 1190			active_speed = SPEED_10;
 1191
 1192		if (bmcr & BMCR_FULLDPLX)
 1193			active_duplex = DUPLEX_FULL;
 1194		else
 1195			active_duplex = DUPLEX_HALF;
 1196	}
 1197
 1198	lp->active_advertising = advertising;
 1199	lp->active_speed = active_speed;
 1200	lp->active_duplex = active_duplex;
 1201	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1202
 1203	return 0;
 1204}
 1205
 1206static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1207{
 1208	struct niu_link_config *lp = &np->link_config;
 1209	u16 current_speed, bmsr;
 1210	unsigned long flags;
 1211	u8 current_duplex;
 1212	int err, link_up;
 1213
 1214	link_up = 0;
 1215	current_speed = SPEED_INVALID;
 1216	current_duplex = DUPLEX_INVALID;
 1217
 1218	spin_lock_irqsave(&np->lock, flags);
 1219
 1220	err = mii_read(np, np->phy_addr, MII_BMSR);
 1221	if (err < 0)
 1222		goto out;
 1223
 1224	bmsr = err;
 1225	if (bmsr & BMSR_LSTATUS) {
 1226		link_up = 1;
 1227		current_speed = SPEED_1000;
 1228		current_duplex = DUPLEX_FULL;
 1229	}
 1230	lp->active_speed = current_speed;
 1231	lp->active_duplex = current_duplex;
 1232	err = 0;
 1233
 1234out:
 1235	spin_unlock_irqrestore(&np->lock, flags);
 1236
 1237	*link_up_p = link_up;
 1238	return err;
 1239}
 1240
 1241static int link_status_1g(struct niu *np, int *link_up_p)
 1242{
 1243	struct niu_link_config *lp = &np->link_config;
 1244	unsigned long flags;
 1245	int err;
 1246
 1247	spin_lock_irqsave(&np->lock, flags);
 1248
 1249	err = link_status_mii(np, link_up_p);
 1250	lp->supported |= SUPPORTED_TP;
 1251	lp->active_advertising |= ADVERTISED_TP;
 1252
 1253	spin_unlock_irqrestore(&np->lock, flags);
 1254	return err;
 1255}
 1256
 1257static int bcm8704_reset(struct niu *np)
 1258{
 1259	int err, limit;
 1260
 1261	err = mdio_read(np, np->phy_addr,
 1262			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1263	if (err < 0 || err == 0xffff)
 1264		return err;
 1265	err |= BMCR_RESET;
 1266	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1267			 MII_BMCR, err);
 1268	if (err)
 1269		return err;
 1270
 1271	limit = 1000;
 1272	while (--limit >= 0) {
 1273		err = mdio_read(np, np->phy_addr,
 1274				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1275		if (err < 0)
 1276			return err;
 1277		if (!(err & BMCR_RESET))
 1278			break;
 1279	}
 1280	if (limit < 0) {
 1281		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1282			   np->port, (err & 0xffff));
 1283		return -ENODEV;
 1284	}
 1285	return 0;
 1286}
 1287
 1288/* When written, certain PHY registers need to be read back twice
 1289 * in order for the bits to settle properly.
 1290 */
 1291static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1292{
 1293	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1294	if (err < 0)
 1295		return err;
 1296	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1297	if (err < 0)
 1298		return err;
 1299	return 0;
 1300}
 1301
 1302static int bcm8706_init_user_dev3(struct niu *np)
 1303{
 1304	int err;
 1305
 1306
 1307	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1308			BCM8704_USER_OPT_DIGITAL_CTRL);
 1309	if (err < 0)
 1310		return err;
 1311	err &= ~USER_ODIG_CTRL_GPIOS;
 1312	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1313	err |=  USER_ODIG_CTRL_RESV2;
 1314	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1315			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1316	if (err)
 1317		return err;
 1318
 1319	mdelay(1000);
 1320
 1321	return 0;
 1322}
 1323
 1324static int bcm8704_init_user_dev3(struct niu *np)
 1325{
 1326	int err;
 1327
 1328	err = mdio_write(np, np->phy_addr,
 1329			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1330			 (USER_CONTROL_OPTXRST_LVL |
 1331			  USER_CONTROL_OPBIASFLT_LVL |
 1332			  USER_CONTROL_OBTMPFLT_LVL |
 1333			  USER_CONTROL_OPPRFLT_LVL |
 1334			  USER_CONTROL_OPTXFLT_LVL |
 1335			  USER_CONTROL_OPRXLOS_LVL |
 1336			  USER_CONTROL_OPRXFLT_LVL |
 1337			  USER_CONTROL_OPTXON_LVL |
 1338			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1339	if (err)
 1340		return err;
 1341
 1342	err = mdio_write(np, np->phy_addr,
 1343			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1344			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1345			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1346			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1347			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1348	if (err)
 1349		return err;
 1350
 1351	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1352	if (err)
 1353		return err;
 1354	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1355	if (err)
 1356		return err;
 1357
 1358	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1359			BCM8704_USER_OPT_DIGITAL_CTRL);
 1360	if (err < 0)
 1361		return err;
 1362	err &= ~USER_ODIG_CTRL_GPIOS;
 1363	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1364	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1365			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1366	if (err)
 1367		return err;
 1368
 1369	mdelay(1000);
 1370
 1371	return 0;
 1372}
 1373
 1374static int mrvl88x2011_act_led(struct niu *np, int val)
 1375{
 1376	int	err;
 1377
 1378	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1379		MRVL88X2011_LED_8_TO_11_CTL);
 1380	if (err < 0)
 1381		return err;
 1382
 1383	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1384	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1385
 1386	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1387			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1388}
 1389
 1390static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1391{
 1392	int	err;
 1393
 1394	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1395			MRVL88X2011_LED_BLINK_CTL);
 1396	if (err >= 0) {
 1397		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1398		err |= (rate << 4);
 1399
 1400		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1401				 MRVL88X2011_LED_BLINK_CTL, err);
 1402	}
 1403
 1404	return err;
 1405}
 1406
 1407static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1408{
 1409	int	err;
 1410
 1411	/* Set LED functions */
 1412	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1413	if (err)
 1414		return err;
 1415
 1416	/* led activity */
 1417	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1418	if (err)
 1419		return err;
 1420
 1421	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1422			MRVL88X2011_GENERAL_CTL);
 1423	if (err < 0)
 1424		return err;
 1425
 1426	err |= MRVL88X2011_ENA_XFPREFCLK;
 1427
 1428	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1429			 MRVL88X2011_GENERAL_CTL, err);
 1430	if (err < 0)
 1431		return err;
 1432
 1433	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1434			MRVL88X2011_PMA_PMD_CTL_1);
 1435	if (err < 0)
 1436		return err;
 1437
 1438	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1439		err |= MRVL88X2011_LOOPBACK;
 1440	else
 1441		err &= ~MRVL88X2011_LOOPBACK;
 1442
 1443	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1444			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1445	if (err < 0)
 1446		return err;
 1447
 1448	/* Enable PMD  */
 1449	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1450			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1451}
 1452
 1453
 1454static int xcvr_diag_bcm870x(struct niu *np)
 1455{
 1456	u16 analog_stat0, tx_alarm_status;
 1457	int err = 0;
 1458
 1459#if 1
 1460	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1461			MII_STAT1000);
 1462	if (err < 0)
 1463		return err;
 1464	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1465
 1466	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1467	if (err < 0)
 1468		return err;
 1469	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1470
 1471	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1472			MII_NWAYTEST);
 1473	if (err < 0)
 1474		return err;
 1475	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1476#endif
 1477
 1478	/* XXX dig this out it might not be so useful XXX */
 1479	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1480			BCM8704_USER_ANALOG_STATUS0);
 1481	if (err < 0)
 1482		return err;
 1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1484			BCM8704_USER_ANALOG_STATUS0);
 1485	if (err < 0)
 1486		return err;
 1487	analog_stat0 = err;
 1488
 1489	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1490			BCM8704_USER_TX_ALARM_STATUS);
 1491	if (err < 0)
 1492		return err;
 1493	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1494			BCM8704_USER_TX_ALARM_STATUS);
 1495	if (err < 0)
 1496		return err;
 1497	tx_alarm_status = err;
 1498
 1499	if (analog_stat0 != 0x03fc) {
 1500		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1501			pr_info("Port %u cable not connected or bad cable\n",
 1502				np->port);
 1503		} else if (analog_stat0 == 0x639c) {
 1504			pr_info("Port %u optical module is bad or missing\n",
 1505				np->port);
 1506		}
 1507	}
 1508
 1509	return 0;
 1510}
 1511
 1512static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1513{
 1514	struct niu_link_config *lp = &np->link_config;
 1515	int err;
 1516
 1517	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1518			MII_BMCR);
 1519	if (err < 0)
 1520		return err;
 1521
 1522	err &= ~BMCR_LOOPBACK;
 1523
 1524	if (lp->loopback_mode == LOOPBACK_MAC)
 1525		err |= BMCR_LOOPBACK;
 1526
 1527	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1528			 MII_BMCR, err);
 1529	if (err)
 1530		return err;
 1531
 1532	return 0;
 1533}
 1534
 1535static int xcvr_init_10g_bcm8706(struct niu *np)
 1536{
 1537	int err = 0;
 1538	u64 val;
 1539
 1540	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1541	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1542			return err;
 1543
 1544	val = nr64_mac(XMAC_CONFIG);
 1545	val &= ~XMAC_CONFIG_LED_POLARITY;
 1546	val |= XMAC_CONFIG_FORCE_LED_ON;
 1547	nw64_mac(XMAC_CONFIG, val);
 1548
 1549	val = nr64(MIF_CONFIG);
 1550	val |= MIF_CONFIG_INDIRECT_MODE;
 1551	nw64(MIF_CONFIG, val);
 1552
 1553	err = bcm8704_reset(np);
 1554	if (err)
 1555		return err;
 1556
 1557	err = xcvr_10g_set_lb_bcm870x(np);
 1558	if (err)
 1559		return err;
 1560
 1561	err = bcm8706_init_user_dev3(np);
 1562	if (err)
 1563		return err;
 1564
 1565	err = xcvr_diag_bcm870x(np);
 1566	if (err)
 1567		return err;
 1568
 1569	return 0;
 1570}
 1571
 1572static int xcvr_init_10g_bcm8704(struct niu *np)
 1573{
 1574	int err;
 1575
 1576	err = bcm8704_reset(np);
 1577	if (err)
 1578		return err;
 1579
 1580	err = bcm8704_init_user_dev3(np);
 1581	if (err)
 1582		return err;
 1583
 1584	err = xcvr_10g_set_lb_bcm870x(np);
 1585	if (err)
 1586		return err;
 1587
 1588	err =  xcvr_diag_bcm870x(np);
 1589	if (err)
 1590		return err;
 1591
 1592	return 0;
 1593}
 1594
 1595static int xcvr_init_10g(struct niu *np)
 1596{
 1597	int phy_id, err;
 1598	u64 val;
 1599
 1600	val = nr64_mac(XMAC_CONFIG);
 1601	val &= ~XMAC_CONFIG_LED_POLARITY;
 1602	val |= XMAC_CONFIG_FORCE_LED_ON;
 1603	nw64_mac(XMAC_CONFIG, val);
 1604
 1605	/* XXX shared resource, lock parent XXX */
 1606	val = nr64(MIF_CONFIG);
 1607	val |= MIF_CONFIG_INDIRECT_MODE;
 1608	nw64(MIF_CONFIG, val);
 1609
 1610	phy_id = phy_decode(np->parent->port_phy, np->port);
 1611	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1612
 1613	/* handle different phy types */
 1614	switch (phy_id & NIU_PHY_ID_MASK) {
 1615	case NIU_PHY_ID_MRVL88X2011:
 1616		err = xcvr_init_10g_mrvl88x2011(np);
 1617		break;
 1618
 1619	default: /* bcom 8704 */
 1620		err = xcvr_init_10g_bcm8704(np);
 1621		break;
 1622	}
 1623
 1624	return err;
 1625}
 1626
 1627static int mii_reset(struct niu *np)
 1628{
 1629	int limit, err;
 1630
 1631	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1632	if (err)
 1633		return err;
 1634
 1635	limit = 1000;
 1636	while (--limit >= 0) {
 1637		udelay(500);
 1638		err = mii_read(np, np->phy_addr, MII_BMCR);
 1639		if (err < 0)
 1640			return err;
 1641		if (!(err & BMCR_RESET))
 1642			break;
 1643	}
 1644	if (limit < 0) {
 1645		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1646			   np->port, err);
 1647		return -ENODEV;
 1648	}
 1649
 1650	return 0;
 1651}
 1652
 1653static int xcvr_init_1g_rgmii(struct niu *np)
 1654{
 1655	int err;
 1656	u64 val;
 1657	u16 bmcr, bmsr, estat;
 1658
 1659	val = nr64(MIF_CONFIG);
 1660	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1661	nw64(MIF_CONFIG, val);
 1662
 1663	err = mii_reset(np);
 1664	if (err)
 1665		return err;
 1666
 1667	err = mii_read(np, np->phy_addr, MII_BMSR);
 1668	if (err < 0)
 1669		return err;
 1670	bmsr = err;
 1671
 1672	estat = 0;
 1673	if (bmsr & BMSR_ESTATEN) {
 1674		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1675		if (err < 0)
 1676			return err;
 1677		estat = err;
 1678	}
 1679
 1680	bmcr = 0;
 1681	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1682	if (err)
 1683		return err;
 1684
 1685	if (bmsr & BMSR_ESTATEN) {
 1686		u16 ctrl1000 = 0;
 1687
 1688		if (estat & ESTATUS_1000_TFULL)
 1689			ctrl1000 |= ADVERTISE_1000FULL;
 1690		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1691		if (err)
 1692			return err;
 1693	}
 1694
 1695	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1696
 1697	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1698	if (err)
 1699		return err;
 1700
 1701	err = mii_read(np, np->phy_addr, MII_BMCR);
 1702	if (err < 0)
 1703		return err;
 1704	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1705
 1706	err = mii_read(np, np->phy_addr, MII_BMSR);
 1707	if (err < 0)
 1708		return err;
 1709
 1710	return 0;
 1711}
 1712
 1713static int mii_init_common(struct niu *np)
 1714{
 1715	struct niu_link_config *lp = &np->link_config;
 1716	u16 bmcr, bmsr, adv, estat;
 1717	int err;
 1718
 1719	err = mii_reset(np);
 1720	if (err)
 1721		return err;
 1722
 1723	err = mii_read(np, np->phy_addr, MII_BMSR);
 1724	if (err < 0)
 1725		return err;
 1726	bmsr = err;
 1727
 1728	estat = 0;
 1729	if (bmsr & BMSR_ESTATEN) {
 1730		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1731		if (err < 0)
 1732			return err;
 1733		estat = err;
 1734	}
 1735
 1736	bmcr = 0;
 1737	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1738	if (err)
 1739		return err;
 1740
 1741	if (lp->loopback_mode == LOOPBACK_MAC) {
 1742		bmcr |= BMCR_LOOPBACK;
 1743		if (lp->active_speed == SPEED_1000)
 1744			bmcr |= BMCR_SPEED1000;
 1745		if (lp->active_duplex == DUPLEX_FULL)
 1746			bmcr |= BMCR_FULLDPLX;
 1747	}
 1748
 1749	if (lp->loopback_mode == LOOPBACK_PHY) {
 1750		u16 aux;
 1751
 1752		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1753		       BCM5464R_AUX_CTL_WRITE_1);
 1754		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1755		if (err)
 1756			return err;
 1757	}
 1758
 1759	if (lp->autoneg) {
 1760		u16 ctrl1000;
 1761
 1762		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1763		if ((bmsr & BMSR_10HALF) &&
 1764			(lp->advertising & ADVERTISED_10baseT_Half))
 1765			adv |= ADVERTISE_10HALF;
 1766		if ((bmsr & BMSR_10FULL) &&
 1767			(lp->advertising & ADVERTISED_10baseT_Full))
 1768			adv |= ADVERTISE_10FULL;
 1769		if ((bmsr & BMSR_100HALF) &&
 1770			(lp->advertising & ADVERTISED_100baseT_Half))
 1771			adv |= ADVERTISE_100HALF;
 1772		if ((bmsr & BMSR_100FULL) &&
 1773			(lp->advertising & ADVERTISED_100baseT_Full))
 1774			adv |= ADVERTISE_100FULL;
 1775		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1776		if (err)
 1777			return err;
 1778
 1779		if (likely(bmsr & BMSR_ESTATEN)) {
 1780			ctrl1000 = 0;
 1781			if ((estat & ESTATUS_1000_THALF) &&
 1782				(lp->advertising & ADVERTISED_1000baseT_Half))
 1783				ctrl1000 |= ADVERTISE_1000HALF;
 1784			if ((estat & ESTATUS_1000_TFULL) &&
 1785				(lp->advertising & ADVERTISED_1000baseT_Full))
 1786				ctrl1000 |= ADVERTISE_1000FULL;
 1787			err = mii_write(np, np->phy_addr,
 1788					MII_CTRL1000, ctrl1000);
 1789			if (err)
 1790				return err;
 1791		}
 1792
 1793		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1794	} else {
 1795		/* !lp->autoneg */
 1796		int fulldpx;
 1797
 1798		if (lp->duplex == DUPLEX_FULL) {
 1799			bmcr |= BMCR_FULLDPLX;
 1800			fulldpx = 1;
 1801		} else if (lp->duplex == DUPLEX_HALF)
 1802			fulldpx = 0;
 1803		else
 1804			return -EINVAL;
 1805
 1806		if (lp->speed == SPEED_1000) {
 1807			/* if X-full requested while not supported, or
 1808			   X-half requested while not supported... */
 1809			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1810				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1811				return -EINVAL;
 1812			bmcr |= BMCR_SPEED1000;
 1813		} else if (lp->speed == SPEED_100) {
 1814			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1815				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1816				return -EINVAL;
 1817			bmcr |= BMCR_SPEED100;
 1818		} else if (lp->speed == SPEED_10) {
 1819			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1820				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1821				return -EINVAL;
 1822		} else
 1823			return -EINVAL;
 1824	}
 1825
 1826	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1827	if (err)
 1828		return err;
 1829
 1830#if 0
 1831	err = mii_read(np, np->phy_addr, MII_BMCR);
 1832	if (err < 0)
 1833		return err;
 1834	bmcr = err;
 1835
 1836	err = mii_read(np, np->phy_addr, MII_BMSR);
 1837	if (err < 0)
 1838		return err;
 1839	bmsr = err;
 1840
 1841	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1842		np->port, bmcr, bmsr);
 1843#endif
 1844
 1845	return 0;
 1846}
 1847
 1848static int xcvr_init_1g(struct niu *np)
 1849{
 1850	u64 val;
 1851
 1852	/* XXX shared resource, lock parent XXX */
 1853	val = nr64(MIF_CONFIG);
 1854	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1855	nw64(MIF_CONFIG, val);
 1856
 1857	return mii_init_common(np);
 1858}
 1859
 1860static int niu_xcvr_init(struct niu *np)
 1861{
 1862	const struct niu_phy_ops *ops = np->phy_ops;
 1863	int err;
 1864
 1865	err = 0;
 1866	if (ops->xcvr_init)
 1867		err = ops->xcvr_init(np);
 1868
 1869	return err;
 1870}
 1871
 1872static int niu_serdes_init(struct niu *np)
 1873{
 1874	const struct niu_phy_ops *ops = np->phy_ops;
 1875	int err;
 1876
 1877	err = 0;
 1878	if (ops->serdes_init)
 1879		err = ops->serdes_init(np);
 1880
 1881	return err;
 1882}
 1883
 1884static void niu_init_xif(struct niu *);
 1885static void niu_handle_led(struct niu *, int status);
 1886
 1887static int niu_link_status_common(struct niu *np, int link_up)
 1888{
 1889	struct niu_link_config *lp = &np->link_config;
 1890	struct net_device *dev = np->dev;
 1891	unsigned long flags;
 1892
 1893	if (!netif_carrier_ok(dev) && link_up) {
 1894		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1895			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1896			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1897			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1898			   "10Mbit/sec",
 1899			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1900
 1901		spin_lock_irqsave(&np->lock, flags);
 1902		niu_init_xif(np);
 1903		niu_handle_led(np, 1);
 1904		spin_unlock_irqrestore(&np->lock, flags);
 1905
 1906		netif_carrier_on(dev);
 1907	} else if (netif_carrier_ok(dev) && !link_up) {
 1908		netif_warn(np, link, dev, "Link is down\n");
 1909		spin_lock_irqsave(&np->lock, flags);
 1910		niu_handle_led(np, 0);
 1911		spin_unlock_irqrestore(&np->lock, flags);
 1912		netif_carrier_off(dev);
 1913	}
 1914
 1915	return 0;
 1916}
 1917
 1918static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1919{
 1920	int err, link_up, pma_status, pcs_status;
 1921
 1922	link_up = 0;
 1923
 1924	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1925			MRVL88X2011_10G_PMD_STATUS_2);
 1926	if (err < 0)
 1927		goto out;
 1928
 1929	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1930	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1931			MRVL88X2011_PMA_PMD_STATUS_1);
 1932	if (err < 0)
 1933		goto out;
 1934
 1935	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1936
 1937        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1938	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1939			MRVL88X2011_PMA_PMD_STATUS_1);
 1940	if (err < 0)
 1941		goto out;
 1942
 1943	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1944			MRVL88X2011_PMA_PMD_STATUS_1);
 1945	if (err < 0)
 1946		goto out;
 1947
 1948	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1949
 1950        /* Check XGXS Register : 4.0018.[0-3,12] */
 1951	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1952			MRVL88X2011_10G_XGXS_LANE_STAT);
 1953	if (err < 0)
 1954		goto out;
 1955
 1956	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1957		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1958		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1959		    0x800))
 1960		link_up = (pma_status && pcs_status) ? 1 : 0;
 1961
 1962	np->link_config.active_speed = SPEED_10000;
 1963	np->link_config.active_duplex = DUPLEX_FULL;
 1964	err = 0;
 1965out:
 1966	mrvl88x2011_act_led(np, (link_up ?
 1967				 MRVL88X2011_LED_CTL_PCS_ACT :
 1968				 MRVL88X2011_LED_CTL_OFF));
 1969
 1970	*link_up_p = link_up;
 1971	return err;
 1972}
 1973
 1974static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 1975{
 1976	int err, link_up;
 1977	link_up = 0;
 1978
 1979	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1980			BCM8704_PMD_RCV_SIGDET);
 1981	if (err < 0 || err == 0xffff)
 1982		goto out;
 1983	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 1984		err = 0;
 1985		goto out;
 1986	}
 1987
 1988	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1989			BCM8704_PCS_10G_R_STATUS);
 1990	if (err < 0)
 1991		goto out;
 1992
 1993	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 1994		err = 0;
 1995		goto out;
 1996	}
 1997
 1998	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1999			BCM8704_PHYXS_XGXS_LANE_STAT);
 2000	if (err < 0)
 2001		goto out;
 2002	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2003		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2004		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2005		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2006		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2007		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2008		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2009		err = 0;
 2010		np->link_config.active_speed = SPEED_INVALID;
 2011		np->link_config.active_duplex = DUPLEX_INVALID;
 2012		goto out;
 2013	}
 2014
 2015	link_up = 1;
 2016	np->link_config.active_speed = SPEED_10000;
 2017	np->link_config.active_duplex = DUPLEX_FULL;
 2018	err = 0;
 2019
 2020out:
 2021	*link_up_p = link_up;
 2022	return err;
 2023}
 2024
 2025static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2026{
 2027	int err, link_up;
 2028
 2029	link_up = 0;
 2030
 2031	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2032			BCM8704_PMD_RCV_SIGDET);
 2033	if (err < 0)
 2034		goto out;
 2035	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2036		err = 0;
 2037		goto out;
 2038	}
 2039
 2040	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2041			BCM8704_PCS_10G_R_STATUS);
 2042	if (err < 0)
 2043		goto out;
 2044	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2045		err = 0;
 2046		goto out;
 2047	}
 2048
 2049	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2050			BCM8704_PHYXS_XGXS_LANE_STAT);
 2051	if (err < 0)
 2052		goto out;
 2053
 2054	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2055		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2056		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2057		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2058		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2059		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2060		err = 0;
 2061		goto out;
 2062	}
 2063
 2064	link_up = 1;
 2065	np->link_config.active_speed = SPEED_10000;
 2066	np->link_config.active_duplex = DUPLEX_FULL;
 2067	err = 0;
 2068
 2069out:
 2070	*link_up_p = link_up;
 2071	return err;
 2072}
 2073
 2074static int link_status_10g(struct niu *np, int *link_up_p)
 2075{
 2076	unsigned long flags;
 2077	int err = -EINVAL;
 2078
 2079	spin_lock_irqsave(&np->lock, flags);
 2080
 2081	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2082		int phy_id;
 2083
 2084		phy_id = phy_decode(np->parent->port_phy, np->port);
 2085		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2086
 2087		/* handle different phy types */
 2088		switch (phy_id & NIU_PHY_ID_MASK) {
 2089		case NIU_PHY_ID_MRVL88X2011:
 2090			err = link_status_10g_mrvl(np, link_up_p);
 2091			break;
 2092
 2093		default: /* bcom 8704 */
 2094			err = link_status_10g_bcom(np, link_up_p);
 2095			break;
 2096		}
 2097	}
 2098
 2099	spin_unlock_irqrestore(&np->lock, flags);
 2100
 2101	return err;
 2102}
 2103
 2104static int niu_10g_phy_present(struct niu *np)
 2105{
 2106	u64 sig, mask, val;
 2107
 2108	sig = nr64(ESR_INT_SIGNALS);
 2109	switch (np->port) {
 2110	case 0:
 2111		mask = ESR_INT_SIGNALS_P0_BITS;
 2112		val = (ESR_INT_SRDY0_P0 |
 2113		       ESR_INT_DET0_P0 |
 2114		       ESR_INT_XSRDY_P0 |
 2115		       ESR_INT_XDP_P0_CH3 |
 2116		       ESR_INT_XDP_P0_CH2 |
 2117		       ESR_INT_XDP_P0_CH1 |
 2118		       ESR_INT_XDP_P0_CH0);
 2119		break;
 2120
 2121	case 1:
 2122		mask = ESR_INT_SIGNALS_P1_BITS;
 2123		val = (ESR_INT_SRDY0_P1 |
 2124		       ESR_INT_DET0_P1 |
 2125		       ESR_INT_XSRDY_P1 |
 2126		       ESR_INT_XDP_P1_CH3 |
 2127		       ESR_INT_XDP_P1_CH2 |
 2128		       ESR_INT_XDP_P1_CH1 |
 2129		       ESR_INT_XDP_P1_CH0);
 2130		break;
 2131
 2132	default:
 2133		return 0;
 2134	}
 2135
 2136	if ((sig & mask) != val)
 2137		return 0;
 2138	return 1;
 2139}
 2140
 2141static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2142{
 2143	unsigned long flags;
 2144	int err = 0;
 2145	int phy_present;
 2146	int phy_present_prev;
 2147
 2148	spin_lock_irqsave(&np->lock, flags);
 2149
 2150	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2151		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2152			1 : 0;
 2153		phy_present = niu_10g_phy_present(np);
 2154		if (phy_present != phy_present_prev) {
 2155			/* state change */
 2156			if (phy_present) {
 2157				/* A NEM was just plugged in */
 2158				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2159				if (np->phy_ops->xcvr_init)
 2160					err = np->phy_ops->xcvr_init(np);
 2161				if (err) {
 2162					err = mdio_read(np, np->phy_addr,
 2163						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2164					if (err == 0xffff) {
 2165						/* No mdio, back-to-back XAUI */
 2166						goto out;
 2167					}
 2168					/* debounce */
 2169					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2170				}
 2171			} else {
 2172				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2173				*link_up_p = 0;
 2174				netif_warn(np, link, np->dev,
 2175					   "Hotplug PHY Removed\n");
 2176			}
 2177		}
 2178out:
 2179		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2180			err = link_status_10g_bcm8706(np, link_up_p);
 2181			if (err == 0xffff) {
 2182				/* No mdio, back-to-back XAUI: it is C10NEM */
 2183				*link_up_p = 1;
 2184				np->link_config.active_speed = SPEED_10000;
 2185				np->link_config.active_duplex = DUPLEX_FULL;
 2186			}
 2187		}
 2188	}
 2189
 2190	spin_unlock_irqrestore(&np->lock, flags);
 2191
 2192	return 0;
 2193}
 2194
 2195static int niu_link_status(struct niu *np, int *link_up_p)
 2196{
 2197	const struct niu_phy_ops *ops = np->phy_ops;
 2198	int err;
 2199
 2200	err = 0;
 2201	if (ops->link_status)
 2202		err = ops->link_status(np, link_up_p);
 2203
 2204	return err;
 2205}
 2206
 2207static void niu_timer(struct timer_list *t)
 2208{
 2209	struct niu *np = from_timer(np, t, timer);
 2210	unsigned long off;
 2211	int err, link_up;
 2212
 2213	err = niu_link_status(np, &link_up);
 2214	if (!err)
 2215		niu_link_status_common(np, link_up);
 2216
 2217	if (netif_carrier_ok(np->dev))
 2218		off = 5 * HZ;
 2219	else
 2220		off = 1 * HZ;
 2221	np->timer.expires = jiffies + off;
 2222
 2223	add_timer(&np->timer);
 2224}
 2225
 2226static const struct niu_phy_ops phy_ops_10g_serdes = {
 2227	.serdes_init		= serdes_init_10g_serdes,
 2228	.link_status		= link_status_10g_serdes,
 2229};
 2230
 2231static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2232	.serdes_init		= serdes_init_niu_10g_serdes,
 2233	.link_status		= link_status_10g_serdes,
 2234};
 2235
 2236static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2237	.serdes_init		= serdes_init_niu_1g_serdes,
 2238	.link_status		= link_status_1g_serdes,
 2239};
 2240
 2241static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2242	.xcvr_init		= xcvr_init_1g_rgmii,
 2243	.link_status		= link_status_1g_rgmii,
 2244};
 2245
 2246static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2247	.serdes_init		= serdes_init_niu_10g_fiber,
 2248	.xcvr_init		= xcvr_init_10g,
 2249	.link_status		= link_status_10g,
 2250};
 2251
 2252static const struct niu_phy_ops phy_ops_10g_fiber = {
 2253	.serdes_init		= serdes_init_10g,
 2254	.xcvr_init		= xcvr_init_10g,
 2255	.link_status		= link_status_10g,
 2256};
 2257
 2258static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2259	.serdes_init		= serdes_init_10g,
 2260	.xcvr_init		= xcvr_init_10g_bcm8706,
 2261	.link_status		= link_status_10g_hotplug,
 2262};
 2263
 2264static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2265	.serdes_init		= serdes_init_niu_10g_fiber,
 2266	.xcvr_init		= xcvr_init_10g_bcm8706,
 2267	.link_status		= link_status_10g_hotplug,
 2268};
 2269
 2270static const struct niu_phy_ops phy_ops_10g_copper = {
 2271	.serdes_init		= serdes_init_10g,
 2272	.link_status		= link_status_10g, /* XXX */
 2273};
 2274
 2275static const struct niu_phy_ops phy_ops_1g_fiber = {
 2276	.serdes_init		= serdes_init_1g,
 2277	.xcvr_init		= xcvr_init_1g,
 2278	.link_status		= link_status_1g,
 2279};
 2280
 2281static const struct niu_phy_ops phy_ops_1g_copper = {
 2282	.xcvr_init		= xcvr_init_1g,
 2283	.link_status		= link_status_1g,
 2284};
 2285
 2286struct niu_phy_template {
 2287	const struct niu_phy_ops	*ops;
 2288	u32				phy_addr_base;
 2289};
 2290
 2291static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2292	.ops		= &phy_ops_10g_fiber_niu,
 2293	.phy_addr_base	= 16,
 2294};
 2295
 2296static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2297	.ops		= &phy_ops_10g_serdes_niu,
 2298	.phy_addr_base	= 0,
 2299};
 2300
 2301static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2302	.ops		= &phy_ops_1g_serdes_niu,
 2303	.phy_addr_base	= 0,
 2304};
 2305
 2306static const struct niu_phy_template phy_template_10g_fiber = {
 2307	.ops		= &phy_ops_10g_fiber,
 2308	.phy_addr_base	= 8,
 2309};
 2310
 2311static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2312	.ops		= &phy_ops_10g_fiber_hotplug,
 2313	.phy_addr_base	= 8,
 2314};
 2315
 2316static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2317	.ops		= &phy_ops_niu_10g_hotplug,
 2318	.phy_addr_base	= 8,
 2319};
 2320
 2321static const struct niu_phy_template phy_template_10g_copper = {
 2322	.ops		= &phy_ops_10g_copper,
 2323	.phy_addr_base	= 10,
 2324};
 2325
 2326static const struct niu_phy_template phy_template_1g_fiber = {
 2327	.ops		= &phy_ops_1g_fiber,
 2328	.phy_addr_base	= 0,
 2329};
 2330
 2331static const struct niu_phy_template phy_template_1g_copper = {
 2332	.ops		= &phy_ops_1g_copper,
 2333	.phy_addr_base	= 0,
 2334};
 2335
 2336static const struct niu_phy_template phy_template_1g_rgmii = {
 2337	.ops		= &phy_ops_1g_rgmii,
 2338	.phy_addr_base	= 0,
 2339};
 2340
 2341static const struct niu_phy_template phy_template_10g_serdes = {
 2342	.ops		= &phy_ops_10g_serdes,
 2343	.phy_addr_base	= 0,
 2344};
 2345
 2346static int niu_atca_port_num[4] = {
 2347	0, 0,  11, 10
 2348};
 2349
 2350static int serdes_init_10g_serdes(struct niu *np)
 2351{
 2352	struct niu_link_config *lp = &np->link_config;
 2353	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2354	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2355
 2356	switch (np->port) {
 2357	case 0:
 2358		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2359		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2360		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2361		break;
 2362	case 1:
 2363		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2364		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2365		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2366		break;
 2367
 2368	default:
 2369		return -EINVAL;
 2370	}
 2371	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2372		    ENET_SERDES_CTRL_SDET_1 |
 2373		    ENET_SERDES_CTRL_SDET_2 |
 2374		    ENET_SERDES_CTRL_SDET_3 |
 2375		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2376		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2377		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2378		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2379		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2380		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2381		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2382		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2383	test_cfg_val = 0;
 2384
 2385	if (lp->loopback_mode == LOOPBACK_PHY) {
 2386		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2387				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2388				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2389				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2390				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2391				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2392				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2393				  ENET_SERDES_TEST_MD_3_SHIFT));
 2394	}
 2395
 2396	esr_reset(np);
 2397	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2398	nw64(ctrl_reg, ctrl_val);
 2399	nw64(test_cfg_reg, test_cfg_val);
 2400
 2401	/* Initialize all 4 lanes of the SERDES.  */
 2402	for (i = 0; i < 4; i++) {
 2403		u32 rxtx_ctrl, glue0;
 2404		int err;
 2405
 2406		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2407		if (err)
 2408			return err;
 2409		err = esr_read_glue0(np, i, &glue0);
 2410		if (err)
 2411			return err;
 2412
 2413		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2414		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2415			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2416
 2417		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2418			   ESR_GLUE_CTRL0_THCNT |
 2419			   ESR_GLUE_CTRL0_BLTIME);
 2420		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2421			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2422			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2423			  (BLTIME_300_CYCLES <<
 2424			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2425
 2426		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2427		if (err)
 2428			return err;
 2429		err = esr_write_glue0(np, i, glue0);
 2430		if (err)
 2431			return err;
 2432	}
 2433
 2434
 2435	sig = nr64(ESR_INT_SIGNALS);
 2436	switch (np->port) {
 2437	case 0:
 2438		mask = ESR_INT_SIGNALS_P0_BITS;
 2439		val = (ESR_INT_SRDY0_P0 |
 2440		       ESR_INT_DET0_P0 |
 2441		       ESR_INT_XSRDY_P0 |
 2442		       ESR_INT_XDP_P0_CH3 |
 2443		       ESR_INT_XDP_P0_CH2 |
 2444		       ESR_INT_XDP_P0_CH1 |
 2445		       ESR_INT_XDP_P0_CH0);
 2446		break;
 2447
 2448	case 1:
 2449		mask = ESR_INT_SIGNALS_P1_BITS;
 2450		val = (ESR_INT_SRDY0_P1 |
 2451		       ESR_INT_DET0_P1 |
 2452		       ESR_INT_XSRDY_P1 |
 2453		       ESR_INT_XDP_P1_CH3 |
 2454		       ESR_INT_XDP_P1_CH2 |
 2455		       ESR_INT_XDP_P1_CH1 |
 2456		       ESR_INT_XDP_P1_CH0);
 2457		break;
 2458
 2459	default:
 2460		return -EINVAL;
 2461	}
 2462
 2463	if ((sig & mask) != val) {
 2464		int err;
 2465		err = serdes_init_1g_serdes(np);
 2466		if (!err) {
 2467			np->flags &= ~NIU_FLAGS_10G;
 2468			np->mac_xcvr = MAC_XCVR_PCS;
 2469		}  else {
 2470			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2471				   np->port);
 2472			return -ENODEV;
 2473		}
 2474	}
 2475
 2476	return 0;
 2477}
 2478
 2479static int niu_determine_phy_disposition(struct niu *np)
 2480{
 2481	struct niu_parent *parent = np->parent;
 2482	u8 plat_type = parent->plat_type;
 2483	const struct niu_phy_template *tp;
 2484	u32 phy_addr_off = 0;
 2485
 2486	if (plat_type == PLAT_TYPE_NIU) {
 2487		switch (np->flags &
 2488			(NIU_FLAGS_10G |
 2489			 NIU_FLAGS_FIBER |
 2490			 NIU_FLAGS_XCVR_SERDES)) {
 2491		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2492			/* 10G Serdes */
 2493			tp = &phy_template_niu_10g_serdes;
 2494			break;
 2495		case NIU_FLAGS_XCVR_SERDES:
 2496			/* 1G Serdes */
 2497			tp = &phy_template_niu_1g_serdes;
 2498			break;
 2499		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2500			/* 10G Fiber */
 2501		default:
 2502			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2503				tp = &phy_template_niu_10g_hotplug;
 2504				if (np->port == 0)
 2505					phy_addr_off = 8;
 2506				if (np->port == 1)
 2507					phy_addr_off = 12;
 2508			} else {
 2509				tp = &phy_template_niu_10g_fiber;
 2510				phy_addr_off += np->port;
 2511			}
 2512			break;
 2513		}
 2514	} else {
 2515		switch (np->flags &
 2516			(NIU_FLAGS_10G |
 2517			 NIU_FLAGS_FIBER |
 2518			 NIU_FLAGS_XCVR_SERDES)) {
 2519		case 0:
 2520			/* 1G copper */
 2521			tp = &phy_template_1g_copper;
 2522			if (plat_type == PLAT_TYPE_VF_P0)
 2523				phy_addr_off = 10;
 2524			else if (plat_type == PLAT_TYPE_VF_P1)
 2525				phy_addr_off = 26;
 2526
 2527			phy_addr_off += (np->port ^ 0x3);
 2528			break;
 2529
 2530		case NIU_FLAGS_10G:
 2531			/* 10G copper */
 2532			tp = &phy_template_10g_copper;
 2533			break;
 2534
 2535		case NIU_FLAGS_FIBER:
 2536			/* 1G fiber */
 2537			tp = &phy_template_1g_fiber;
 2538			break;
 2539
 2540		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2541			/* 10G fiber */
 2542			tp = &phy_template_10g_fiber;
 2543			if (plat_type == PLAT_TYPE_VF_P0 ||
 2544			    plat_type == PLAT_TYPE_VF_P1)
 2545				phy_addr_off = 8;
 2546			phy_addr_off += np->port;
 2547			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2548				tp = &phy_template_10g_fiber_hotplug;
 2549				if (np->port == 0)
 2550					phy_addr_off = 8;
 2551				if (np->port == 1)
 2552					phy_addr_off = 12;
 2553			}
 2554			break;
 2555
 2556		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2557		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2558		case NIU_FLAGS_XCVR_SERDES:
 2559			switch(np->port) {
 2560			case 0:
 2561			case 1:
 2562				tp = &phy_template_10g_serdes;
 2563				break;
 2564			case 2:
 2565			case 3:
 2566				tp = &phy_template_1g_rgmii;
 2567				break;
 2568			default:
 2569				return -EINVAL;
 2570			}
 2571			phy_addr_off = niu_atca_port_num[np->port];
 2572			break;
 2573
 2574		default:
 2575			return -EINVAL;
 2576		}
 2577	}
 2578
 2579	np->phy_ops = tp->ops;
 2580	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2581
 2582	return 0;
 2583}
 2584
 2585static int niu_init_link(struct niu *np)
 2586{
 2587	struct niu_parent *parent = np->parent;
 2588	int err, ignore;
 2589
 2590	if (parent->plat_type == PLAT_TYPE_NIU) {
 2591		err = niu_xcvr_init(np);
 2592		if (err)
 2593			return err;
 2594		msleep(200);
 2595	}
 2596	err = niu_serdes_init(np);
 2597	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2598		return err;
 2599	msleep(200);
 2600	err = niu_xcvr_init(np);
 2601	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2602		niu_link_status(np, &ignore);
 2603	return 0;
 2604}
 2605
 2606static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2607{
 2608	u16 reg0 = addr[4] << 8 | addr[5];
 2609	u16 reg1 = addr[2] << 8 | addr[3];
 2610	u16 reg2 = addr[0] << 8 | addr[1];
 2611
 2612	if (np->flags & NIU_FLAGS_XMAC) {
 2613		nw64_mac(XMAC_ADDR0, reg0);
 2614		nw64_mac(XMAC_ADDR1, reg1);
 2615		nw64_mac(XMAC_ADDR2, reg2);
 2616	} else {
 2617		nw64_mac(BMAC_ADDR0, reg0);
 2618		nw64_mac(BMAC_ADDR1, reg1);
 2619		nw64_mac(BMAC_ADDR2, reg2);
 2620	}
 2621}
 2622
 2623static int niu_num_alt_addr(struct niu *np)
 2624{
 2625	if (np->flags & NIU_FLAGS_XMAC)
 2626		return XMAC_NUM_ALT_ADDR;
 2627	else
 2628		return BMAC_NUM_ALT_ADDR;
 2629}
 2630
 2631static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2632{
 2633	u16 reg0 = addr[4] << 8 | addr[5];
 2634	u16 reg1 = addr[2] << 8 | addr[3];
 2635	u16 reg2 = addr[0] << 8 | addr[1];
 2636
 2637	if (index >= niu_num_alt_addr(np))
 2638		return -EINVAL;
 2639
 2640	if (np->flags & NIU_FLAGS_XMAC) {
 2641		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2642		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2643		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2644	} else {
 2645		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2646		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2647		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2648	}
 2649
 2650	return 0;
 2651}
 2652
 2653static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2654{
 2655	unsigned long reg;
 2656	u64 val, mask;
 2657
 2658	if (index >= niu_num_alt_addr(np))
 2659		return -EINVAL;
 2660
 2661	if (np->flags & NIU_FLAGS_XMAC) {
 2662		reg = XMAC_ADDR_CMPEN;
 2663		mask = 1 << index;
 2664	} else {
 2665		reg = BMAC_ADDR_CMPEN;
 2666		mask = 1 << (index + 1);
 2667	}
 2668
 2669	val = nr64_mac(reg);
 2670	if (on)
 2671		val |= mask;
 2672	else
 2673		val &= ~mask;
 2674	nw64_mac(reg, val);
 2675
 2676	return 0;
 2677}
 2678
 2679static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2680				   int num, int mac_pref)
 2681{
 2682	u64 val = nr64_mac(reg);
 2683	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2684	val |= num;
 2685	if (mac_pref)
 2686		val |= HOST_INFO_MPR;
 2687	nw64_mac(reg, val);
 2688}
 2689
 2690static int __set_rdc_table_num(struct niu *np,
 2691			       int xmac_index, int bmac_index,
 2692			       int rdc_table_num, int mac_pref)
 2693{
 2694	unsigned long reg;
 2695
 2696	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2697		return -EINVAL;
 2698	if (np->flags & NIU_FLAGS_XMAC)
 2699		reg = XMAC_HOST_INFO(xmac_index);
 2700	else
 2701		reg = BMAC_HOST_INFO(bmac_index);
 2702	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2703	return 0;
 2704}
 2705
 2706static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2707					 int mac_pref)
 2708{
 2709	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2710}
 2711
 2712static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2713					   int mac_pref)
 2714{
 2715	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2716}
 2717
 2718static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2719				     int table_num, int mac_pref)
 2720{
 2721	if (idx >= niu_num_alt_addr(np))
 2722		return -EINVAL;
 2723	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2724}
 2725
 2726static u64 vlan_entry_set_parity(u64 reg_val)
 2727{
 2728	u64 port01_mask;
 2729	u64 port23_mask;
 2730
 2731	port01_mask = 0x00ff;
 2732	port23_mask = 0xff00;
 2733
 2734	if (hweight64(reg_val & port01_mask) & 1)
 2735		reg_val |= ENET_VLAN_TBL_PARITY0;
 2736	else
 2737		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2738
 2739	if (hweight64(reg_val & port23_mask) & 1)
 2740		reg_val |= ENET_VLAN_TBL_PARITY1;
 2741	else
 2742		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2743
 2744	return reg_val;
 2745}
 2746
 2747static void vlan_tbl_write(struct niu *np, unsigned long index,
 2748			   int port, int vpr, int rdc_table)
 2749{
 2750	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2751
 2752	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2753		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2754		     ENET_VLAN_TBL_SHIFT(port));
 2755	if (vpr)
 2756		reg_val |= (ENET_VLAN_TBL_VPR <<
 2757			    ENET_VLAN_TBL_SHIFT(port));
 2758	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2759
 2760	reg_val = vlan_entry_set_parity(reg_val);
 2761
 2762	nw64(ENET_VLAN_TBL(index), reg_val);
 2763}
 2764
 2765static void vlan_tbl_clear(struct niu *np)
 2766{
 2767	int i;
 2768
 2769	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2770		nw64(ENET_VLAN_TBL(i), 0);
 2771}
 2772
 2773static int tcam_wait_bit(struct niu *np, u64 bit)
 2774{
 2775	int limit = 1000;
 2776
 2777	while (--limit > 0) {
 2778		if (nr64(TCAM_CTL) & bit)
 2779			break;
 2780		udelay(1);
 2781	}
 2782	if (limit <= 0)
 2783		return -ENODEV;
 2784
 2785	return 0;
 2786}
 2787
 2788static int tcam_flush(struct niu *np, int index)
 2789{
 2790	nw64(TCAM_KEY_0, 0x00);
 2791	nw64(TCAM_KEY_MASK_0, 0xff);
 2792	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2793
 2794	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2795}
 2796
 2797#if 0
 2798static int tcam_read(struct niu *np, int index,
 2799		     u64 *key, u64 *mask)
 2800{
 2801	int err;
 2802
 2803	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2804	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2805	if (!err) {
 2806		key[0] = nr64(TCAM_KEY_0);
 2807		key[1] = nr64(TCAM_KEY_1);
 2808		key[2] = nr64(TCAM_KEY_2);
 2809		key[3] = nr64(TCAM_KEY_3);
 2810		mask[0] = nr64(TCAM_KEY_MASK_0);
 2811		mask[1] = nr64(TCAM_KEY_MASK_1);
 2812		mask[2] = nr64(TCAM_KEY_MASK_2);
 2813		mask[3] = nr64(TCAM_KEY_MASK_3);
 2814	}
 2815	return err;
 2816}
 2817#endif
 2818
 2819static int tcam_write(struct niu *np, int index,
 2820		      u64 *key, u64 *mask)
 2821{
 2822	nw64(TCAM_KEY_0, key[0]);
 2823	nw64(TCAM_KEY_1, key[1]);
 2824	nw64(TCAM_KEY_2, key[2]);
 2825	nw64(TCAM_KEY_3, key[3]);
 2826	nw64(TCAM_KEY_MASK_0, mask[0]);
 2827	nw64(TCAM_KEY_MASK_1, mask[1]);
 2828	nw64(TCAM_KEY_MASK_2, mask[2]);
 2829	nw64(TCAM_KEY_MASK_3, mask[3]);
 2830	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2831
 2832	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2833}
 2834
 2835#if 0
 2836static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2837{
 2838	int err;
 2839
 2840	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2841	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2842	if (!err)
 2843		*data = nr64(TCAM_KEY_1);
 2844
 2845	return err;
 2846}
 2847#endif
 2848
 2849static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2850{
 2851	nw64(TCAM_KEY_1, assoc_data);
 2852	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2853
 2854	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2855}
 2856
 2857static void tcam_enable(struct niu *np, int on)
 2858{
 2859	u64 val = nr64(FFLP_CFG_1);
 2860
 2861	if (on)
 2862		val &= ~FFLP_CFG_1_TCAM_DIS;
 2863	else
 2864		val |= FFLP_CFG_1_TCAM_DIS;
 2865	nw64(FFLP_CFG_1, val);
 2866}
 2867
 2868static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2869{
 2870	u64 val = nr64(FFLP_CFG_1);
 2871
 2872	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2873		 FFLP_CFG_1_CAMLAT |
 2874		 FFLP_CFG_1_CAMRATIO);
 2875	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2876	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2877	nw64(FFLP_CFG_1, val);
 2878
 2879	val = nr64(FFLP_CFG_1);
 2880	val |= FFLP_CFG_1_FFLPINITDONE;
 2881	nw64(FFLP_CFG_1, val);
 2882}
 2883
 2884static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2885				      int on)
 2886{
 2887	unsigned long reg;
 2888	u64 val;
 2889
 2890	if (class < CLASS_CODE_ETHERTYPE1 ||
 2891	    class > CLASS_CODE_ETHERTYPE2)
 2892		return -EINVAL;
 2893
 2894	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2895	val = nr64(reg);
 2896	if (on)
 2897		val |= L2_CLS_VLD;
 2898	else
 2899		val &= ~L2_CLS_VLD;
 2900	nw64(reg, val);
 2901
 2902	return 0;
 2903}
 2904
 2905#if 0
 2906static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2907				   u64 ether_type)
 2908{
 2909	unsigned long reg;
 2910	u64 val;
 2911
 2912	if (class < CLASS_CODE_ETHERTYPE1 ||
 2913	    class > CLASS_CODE_ETHERTYPE2 ||
 2914	    (ether_type & ~(u64)0xffff) != 0)
 2915		return -EINVAL;
 2916
 2917	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2918	val = nr64(reg);
 2919	val &= ~L2_CLS_ETYPE;
 2920	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2921	nw64(reg, val);
 2922
 2923	return 0;
 2924}
 2925#endif
 2926
 2927static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2928				     int on)
 2929{
 2930	unsigned long reg;
 2931	u64 val;
 2932
 2933	if (class < CLASS_CODE_USER_PROG1 ||
 2934	    class > CLASS_CODE_USER_PROG4)
 2935		return -EINVAL;
 2936
 2937	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2938	val = nr64(reg);
 2939	if (on)
 2940		val |= L3_CLS_VALID;
 2941	else
 2942		val &= ~L3_CLS_VALID;
 2943	nw64(reg, val);
 2944
 2945	return 0;
 2946}
 2947
 2948static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2949				  int ipv6, u64 protocol_id,
 2950				  u64 tos_mask, u64 tos_val)
 2951{
 2952	unsigned long reg;
 2953	u64 val;
 2954
 2955	if (class < CLASS_CODE_USER_PROG1 ||
 2956	    class > CLASS_CODE_USER_PROG4 ||
 2957	    (protocol_id & ~(u64)0xff) != 0 ||
 2958	    (tos_mask & ~(u64)0xff) != 0 ||
 2959	    (tos_val & ~(u64)0xff) != 0)
 2960		return -EINVAL;
 2961
 2962	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2963	val = nr64(reg);
 2964	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2965		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2966	if (ipv6)
 2967		val |= L3_CLS_IPVER;
 2968	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2969	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2970	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2971	nw64(reg, val);
 2972
 2973	return 0;
 2974}
 2975
 2976static int tcam_early_init(struct niu *np)
 2977{
 2978	unsigned long i;
 2979	int err;
 2980
 2981	tcam_enable(np, 0);
 2982	tcam_set_lat_and_ratio(np,
 2983			       DEFAULT_TCAM_LATENCY,
 2984			       DEFAULT_TCAM_ACCESS_RATIO);
 2985	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 2986		err = tcam_user_eth_class_enable(np, i, 0);
 2987		if (err)
 2988			return err;
 2989	}
 2990	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 2991		err = tcam_user_ip_class_enable(np, i, 0);
 2992		if (err)
 2993			return err;
 2994	}
 2995
 2996	return 0;
 2997}
 2998
 2999static int tcam_flush_all(struct niu *np)
 3000{
 3001	unsigned long i;
 3002
 3003	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3004		int err = tcam_flush(np, i);
 3005		if (err)
 3006			return err;
 3007	}
 3008	return 0;
 3009}
 3010
 3011static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3012{
 3013	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3014}
 3015
 3016#if 0
 3017static int hash_read(struct niu *np, unsigned long partition,
 3018		     unsigned long index, unsigned long num_entries,
 3019		     u64 *data)
 3020{
 3021	u64 val = hash_addr_regval(index, num_entries);
 3022	unsigned long i;
 3023
 3024	if (partition >= FCRAM_NUM_PARTITIONS ||
 3025	    index + num_entries > FCRAM_SIZE)
 3026		return -EINVAL;
 3027
 3028	nw64(HASH_TBL_ADDR(partition), val);
 3029	for (i = 0; i < num_entries; i++)
 3030		data[i] = nr64(HASH_TBL_DATA(partition));
 3031
 3032	return 0;
 3033}
 3034#endif
 3035
 3036static int hash_write(struct niu *np, unsigned long partition,
 3037		      unsigned long index, unsigned long num_entries,
 3038		      u64 *data)
 3039{
 3040	u64 val = hash_addr_regval(index, num_entries);
 3041	unsigned long i;
 3042
 3043	if (partition >= FCRAM_NUM_PARTITIONS ||
 3044	    index + (num_entries * 8) > FCRAM_SIZE)
 3045		return -EINVAL;
 3046
 3047	nw64(HASH_TBL_ADDR(partition), val);
 3048	for (i = 0; i < num_entries; i++)
 3049		nw64(HASH_TBL_DATA(partition), data[i]);
 3050
 3051	return 0;
 3052}
 3053
 3054static void fflp_reset(struct niu *np)
 3055{
 3056	u64 val;
 3057
 3058	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3059	udelay(10);
 3060	nw64(FFLP_CFG_1, 0);
 3061
 3062	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3063	nw64(FFLP_CFG_1, val);
 3064}
 3065
 3066static void fflp_set_timings(struct niu *np)
 3067{
 3068	u64 val = nr64(FFLP_CFG_1);
 3069
 3070	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3071	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3072	nw64(FFLP_CFG_1, val);
 3073
 3074	val = nr64(FFLP_CFG_1);
 3075	val |= FFLP_CFG_1_FFLPINITDONE;
 3076	nw64(FFLP_CFG_1, val);
 3077
 3078	val = nr64(FCRAM_REF_TMR);
 3079	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3080	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3081	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3082	nw64(FCRAM_REF_TMR, val);
 3083}
 3084
 3085static int fflp_set_partition(struct niu *np, u64 partition,
 3086			      u64 mask, u64 base, int enable)
 3087{
 3088	unsigned long reg;
 3089	u64 val;
 3090
 3091	if (partition >= FCRAM_NUM_PARTITIONS ||
 3092	    (mask & ~(u64)0x1f) != 0 ||
 3093	    (base & ~(u64)0x1f) != 0)
 3094		return -EINVAL;
 3095
 3096	reg = FLW_PRT_SEL(partition);
 3097
 3098	val = nr64(reg);
 3099	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3100	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3101	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3102	if (enable)
 3103		val |= FLW_PRT_SEL_EXT;
 3104	nw64(reg, val);
 3105
 3106	return 0;
 3107}
 3108
 3109static int fflp_disable_all_partitions(struct niu *np)
 3110{
 3111	unsigned long i;
 3112
 3113	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3114		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3115		if (err)
 3116			return err;
 3117	}
 3118	return 0;
 3119}
 3120
 3121static void fflp_llcsnap_enable(struct niu *np, int on)
 3122{
 3123	u64 val = nr64(FFLP_CFG_1);
 3124
 3125	if (on)
 3126		val |= FFLP_CFG_1_LLCSNAP;
 3127	else
 3128		val &= ~FFLP_CFG_1_LLCSNAP;
 3129	nw64(FFLP_CFG_1, val);
 3130}
 3131
 3132static void fflp_errors_enable(struct niu *np, int on)
 3133{
 3134	u64 val = nr64(FFLP_CFG_1);
 3135
 3136	if (on)
 3137		val &= ~FFLP_CFG_1_ERRORDIS;
 3138	else
 3139		val |= FFLP_CFG_1_ERRORDIS;
 3140	nw64(FFLP_CFG_1, val);
 3141}
 3142
 3143static int fflp_hash_clear(struct niu *np)
 3144{
 3145	struct fcram_hash_ipv4 ent;
 3146	unsigned long i;
 3147
 3148	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3149	memset(&ent, 0, sizeof(ent));
 3150	ent.header = HASH_HEADER_EXT;
 3151
 3152	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3153		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3154		if (err)
 3155			return err;
 3156	}
 3157	return 0;
 3158}
 3159
 3160static int fflp_early_init(struct niu *np)
 3161{
 3162	struct niu_parent *parent;
 3163	unsigned long flags;
 3164	int err;
 3165
 3166	niu_lock_parent(np, flags);
 3167
 3168	parent = np->parent;
 3169	err = 0;
 3170	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3171		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3172			fflp_reset(np);
 3173			fflp_set_timings(np);
 3174			err = fflp_disable_all_partitions(np);
 3175			if (err) {
 3176				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3177					     "fflp_disable_all_partitions failed, err=%d\n",
 3178					     err);
 3179				goto out;
 3180			}
 3181		}
 3182
 3183		err = tcam_early_init(np);
 3184		if (err) {
 3185			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3186				     "tcam_early_init failed, err=%d\n", err);
 3187			goto out;
 3188		}
 3189		fflp_llcsnap_enable(np, 1);
 3190		fflp_errors_enable(np, 0);
 3191		nw64(H1POLY, 0);
 3192		nw64(H2POLY, 0);
 3193
 3194		err = tcam_flush_all(np);
 3195		if (err) {
 3196			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3197				     "tcam_flush_all failed, err=%d\n", err);
 3198			goto out;
 3199		}
 3200		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3201			err = fflp_hash_clear(np);
 3202			if (err) {
 3203				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3204					     "fflp_hash_clear failed, err=%d\n",
 3205					     err);
 3206				goto out;
 3207			}
 3208		}
 3209
 3210		vlan_tbl_clear(np);
 3211
 3212		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3213	}
 3214out:
 3215	niu_unlock_parent(np, flags);
 3216	return err;
 3217}
 3218
 3219static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3220{
 3221	if (class_code < CLASS_CODE_USER_PROG1 ||
 3222	    class_code > CLASS_CODE_SCTP_IPV6)
 3223		return -EINVAL;
 3224
 3225	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3226	return 0;
 3227}
 3228
 3229static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3230{
 3231	if (class_code < CLASS_CODE_USER_PROG1 ||
 3232	    class_code > CLASS_CODE_SCTP_IPV6)
 3233		return -EINVAL;
 3234
 3235	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3236	return 0;
 3237}
 3238
 3239/* Entries for the ports are interleaved in the TCAM */
 3240static u16 tcam_get_index(struct niu *np, u16 idx)
 3241{
 3242	/* One entry reserved for IP fragment rule */
 3243	if (idx >= (np->clas.tcam_sz - 1))
 3244		idx = 0;
 3245	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3246}
 3247
 3248static u16 tcam_get_size(struct niu *np)
 3249{
 3250	/* One entry reserved for IP fragment rule */
 3251	return np->clas.tcam_sz - 1;
 3252}
 3253
 3254static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3255{
 3256	/* One entry reserved for IP fragment rule */
 3257	return np->clas.tcam_valid_entries - 1;
 3258}
 3259
 3260static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3261			      u32 offset, u32 size, u32 truesize)
 3262{
 3263	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
 3264
 3265	skb->len += size;
 3266	skb->data_len += size;
 3267	skb->truesize += truesize;
 3268}
 3269
 3270static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3271{
 3272	a >>= PAGE_SHIFT;
 3273	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3274
 3275	return a & (MAX_RBR_RING_SIZE - 1);
 3276}
 3277
 3278static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3279				    struct page ***link)
 3280{
 3281	unsigned int h = niu_hash_rxaddr(rp, addr);
 3282	struct page *p, **pp;
 3283
 3284	addr &= PAGE_MASK;
 3285	pp = &rp->rxhash[h];
 3286	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3287		if (p->index == addr) {
 3288			*link = pp;
 3289			goto found;
 3290		}
 3291	}
 3292	BUG();
 3293
 3294found:
 3295	return p;
 3296}
 3297
 3298static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3299{
 3300	unsigned int h = niu_hash_rxaddr(rp, base);
 3301
 3302	page->index = base;
 3303	page->mapping = (struct address_space *) rp->rxhash[h];
 3304	rp->rxhash[h] = page;
 3305}
 3306
 3307static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3308			    gfp_t mask, int start_index)
 3309{
 3310	struct page *page;
 3311	u64 addr;
 3312	int i;
 3313
 3314	page = alloc_page(mask);
 3315	if (!page)
 3316		return -ENOMEM;
 3317
 3318	addr = np->ops->map_page(np->device, page, 0,
 3319				 PAGE_SIZE, DMA_FROM_DEVICE);
 3320	if (!addr) {
 3321		__free_page(page);
 3322		return -ENOMEM;
 3323	}
 3324
 3325	niu_hash_page(rp, page, addr);
 3326	if (rp->rbr_blocks_per_page > 1)
 3327		page_ref_add(page, rp->rbr_blocks_per_page - 1);
 3328
 3329	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3330		__le32 *rbr = &rp->rbr[start_index + i];
 3331
 3332		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3333		addr += rp->rbr_block_size;
 3334	}
 3335
 3336	return 0;
 3337}
 3338
 3339static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3340{
 3341	int index = rp->rbr_index;
 3342
 3343	rp->rbr_pending++;
 3344	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3345		int err = niu_rbr_add_page(np, rp, mask, index);
 3346
 3347		if (unlikely(err)) {
 3348			rp->rbr_pending--;
 3349			return;
 3350		}
 3351
 3352		rp->rbr_index += rp->rbr_blocks_per_page;
 3353		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3354		if (rp->rbr_index == rp->rbr_table_size)
 3355			rp->rbr_index = 0;
 3356
 3357		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3358			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3359			rp->rbr_pending = 0;
 3360		}
 3361	}
 3362}
 3363
 3364static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3365{
 3366	unsigned int index = rp->rcr_index;
 3367	int num_rcr = 0;
 3368
 3369	rp->rx_dropped++;
 3370	while (1) {
 3371		struct page *page, **link;
 3372		u64 addr, val;
 3373		u32 rcr_size;
 3374
 3375		num_rcr++;
 3376
 3377		val = le64_to_cpup(&rp->rcr[index]);
 3378		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3379			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3380		page = niu_find_rxpage(rp, addr, &link);
 3381
 3382		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3383					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3384		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3385			*link = (struct page *) page->mapping;
 3386			np->ops->unmap_page(np->device, page->index,
 3387					    PAGE_SIZE, DMA_FROM_DEVICE);
 3388			page->index = 0;
 3389			page->mapping = NULL;
 3390			__free_page(page);
 3391			rp->rbr_refill_pending++;
 3392		}
 3393
 3394		index = NEXT_RCR(rp, index);
 3395		if (!(val & RCR_ENTRY_MULTI))
 3396			break;
 3397
 3398	}
 3399	rp->rcr_index = index;
 3400
 3401	return num_rcr;
 3402}
 3403
 3404static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3405			      struct rx_ring_info *rp)
 3406{
 3407	unsigned int index = rp->rcr_index;
 3408	struct rx_pkt_hdr1 *rh;
 3409	struct sk_buff *skb;
 3410	int len, num_rcr;
 3411
 3412	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3413	if (unlikely(!skb))
 3414		return niu_rx_pkt_ignore(np, rp);
 3415
 3416	num_rcr = 0;
 3417	while (1) {
 3418		struct page *page, **link;
 3419		u32 rcr_size, append_size;
 3420		u64 addr, val, off;
 3421
 3422		num_rcr++;
 3423
 3424		val = le64_to_cpup(&rp->rcr[index]);
 3425
 3426		len = (val & RCR_ENTRY_L2_LEN) >>
 3427			RCR_ENTRY_L2_LEN_SHIFT;
 3428		append_size = len + ETH_HLEN + ETH_FCS_LEN;
 3429
 3430		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3431			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3432		page = niu_find_rxpage(rp, addr, &link);
 3433
 3434		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3435					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3436
 3437		off = addr & ~PAGE_MASK;
 3438		if (num_rcr == 1) {
 3439			int ptype;
 3440
 3441			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3442			if ((ptype == RCR_PKT_TYPE_TCP ||
 3443			     ptype == RCR_PKT_TYPE_UDP) &&
 3444			    !(val & (RCR_ENTRY_NOPORT |
 3445				     RCR_ENTRY_ERROR)))
 3446				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3447			else
 3448				skb_checksum_none_assert(skb);
 3449		} else if (!(val & RCR_ENTRY_MULTI))
 3450			append_size = append_size - skb->len;
 3451
 3452		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 3453		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3454			*link = (struct page *) page->mapping;
 3455			np->ops->unmap_page(np->device, page->index,
 3456					    PAGE_SIZE, DMA_FROM_DEVICE);
 3457			page->index = 0;
 3458			page->mapping = NULL;
 3459			rp->rbr_refill_pending++;
 3460		} else
 3461			get_page(page);
 3462
 3463		index = NEXT_RCR(rp, index);
 3464		if (!(val & RCR_ENTRY_MULTI))
 3465			break;
 3466
 3467	}
 3468	rp->rcr_index = index;
 3469
 3470	len += sizeof(*rh);
 3471	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3472	__pskb_pull_tail(skb, len);
 3473
 3474	rh = (struct rx_pkt_hdr1 *) skb->data;
 3475	if (np->dev->features & NETIF_F_RXHASH)
 3476		skb_set_hash(skb,
 3477			     ((u32)rh->hashval2_0 << 24 |
 3478			      (u32)rh->hashval2_1 << 16 |
 3479			      (u32)rh->hashval1_1 << 8 |
 3480			      (u32)rh->hashval1_2 << 0),
 3481			     PKT_HASH_TYPE_L3);
 3482	skb_pull(skb, sizeof(*rh));
 3483
 3484	rp->rx_packets++;
 3485	rp->rx_bytes += skb->len;
 3486
 3487	skb->protocol = eth_type_trans(skb, np->dev);
 3488	skb_record_rx_queue(skb, rp->rx_channel);
 3489	napi_gro_receive(napi, skb);
 3490
 3491	return num_rcr;
 3492}
 3493
 3494static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3495{
 3496	int blocks_per_page = rp->rbr_blocks_per_page;
 3497	int err, index = rp->rbr_index;
 3498
 3499	err = 0;
 3500	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3501		err = niu_rbr_add_page(np, rp, mask, index);
 3502		if (unlikely(err))
 3503			break;
 3504
 3505		index += blocks_per_page;
 3506	}
 3507
 3508	rp->rbr_index = index;
 3509	return err;
 3510}
 3511
 3512static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3513{
 3514	int i;
 3515
 3516	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3517		struct page *page;
 3518
 3519		page = rp->rxhash[i];
 3520		while (page) {
 3521			struct page *next = (struct page *) page->mapping;
 3522			u64 base = page->index;
 3523
 3524			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3525					    DMA_FROM_DEVICE);
 3526			page->index = 0;
 3527			page->mapping = NULL;
 3528
 3529			__free_page(page);
 3530
 3531			page = next;
 3532		}
 3533	}
 3534
 3535	for (i = 0; i < rp->rbr_table_size; i++)
 3536		rp->rbr[i] = cpu_to_le32(0);
 3537	rp->rbr_index = 0;
 3538}
 3539
 3540static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3541{
 3542	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3543	struct sk_buff *skb = tb->skb;
 3544	struct tx_pkt_hdr *tp;
 3545	u64 tx_flags;
 3546	int i, len;
 3547
 3548	tp = (struct tx_pkt_hdr *) skb->data;
 3549	tx_flags = le64_to_cpup(&tp->flags);
 3550
 3551	rp->tx_packets++;
 3552	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3553			 ((tx_flags & TXHDR_PAD) / 2));
 3554
 3555	len = skb_headlen(skb);
 3556	np->ops->unmap_single(np->device, tb->mapping,
 3557			      len, DMA_TO_DEVICE);
 3558
 3559	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3560		rp->mark_pending--;
 3561
 3562	tb->skb = NULL;
 3563	do {
 3564		idx = NEXT_TX(rp, idx);
 3565		len -= MAX_TX_DESC_LEN;
 3566	} while (len > 0);
 3567
 3568	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3569		tb = &rp->tx_buffs[idx];
 3570		BUG_ON(tb->skb != NULL);
 3571		np->ops->unmap_page(np->device, tb->mapping,
 3572				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
 3573				    DMA_TO_DEVICE);
 3574		idx = NEXT_TX(rp, idx);
 3575	}
 3576
 3577	dev_kfree_skb(skb);
 3578
 3579	return idx;
 3580}
 3581
 3582#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3583
 3584static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3585{
 3586	struct netdev_queue *txq;
 3587	u16 pkt_cnt, tmp;
 3588	int cons, index;
 3589	u64 cs;
 3590
 3591	index = (rp - np->tx_rings);
 3592	txq = netdev_get_tx_queue(np->dev, index);
 3593
 3594	cs = rp->tx_cs;
 3595	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3596		goto out;
 3597
 3598	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3599	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3600		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3601
 3602	rp->last_pkt_cnt = tmp;
 3603
 3604	cons = rp->cons;
 3605
 3606	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3607		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3608
 3609	while (pkt_cnt--)
 3610		cons = release_tx_packet(np, rp, cons);
 3611
 3612	rp->cons = cons;
 3613	smp_mb();
 3614
 3615out:
 3616	if (unlikely(netif_tx_queue_stopped(txq) &&
 3617		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3618		__netif_tx_lock(txq, smp_processor_id());
 3619		if (netif_tx_queue_stopped(txq) &&
 3620		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3621			netif_tx_wake_queue(txq);
 3622		__netif_tx_unlock(txq);
 3623	}
 3624}
 3625
 3626static inline void niu_sync_rx_discard_stats(struct niu *np,
 3627					     struct rx_ring_info *rp,
 3628					     const int limit)
 3629{
 3630	/* This elaborate scheme is needed for reading the RX discard
 3631	 * counters, as they are only 16-bit and can overflow quickly,
 3632	 * and because the overflow indication bit is not usable as
 3633	 * the counter value does not wrap, but remains at max value
 3634	 * 0xFFFF.
 3635	 *
 3636	 * In theory and in practice counters can be lost in between
 3637	 * reading nr64() and clearing the counter nw64().  For this
 3638	 * reason, the number of counter clearings nw64() is
 3639	 * limited/reduced though the limit parameter.
 3640	 */
 3641	int rx_channel = rp->rx_channel;
 3642	u32 misc, wred;
 3643
 3644	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3645	 * following discard events: IPP (Input Port Process),
 3646	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3647	 * Block Ring) prefetch buffer is empty.
 3648	 */
 3649	misc = nr64(RXMISC(rx_channel));
 3650	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3651		nw64(RXMISC(rx_channel), 0);
 3652		rp->rx_errors += misc & RXMISC_COUNT;
 3653
 3654		if (unlikely(misc & RXMISC_OFLOW))
 3655			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3656				rx_channel);
 3657
 3658		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3659			     "rx-%d: MISC drop=%u over=%u\n",
 3660			     rx_channel, misc, misc-limit);
 3661	}
 3662
 3663	/* WRED (Weighted Random Early Discard) by hardware */
 3664	wred = nr64(RED_DIS_CNT(rx_channel));
 3665	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3666		nw64(RED_DIS_CNT(rx_channel), 0);
 3667		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3668
 3669		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3670			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3671
 3672		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3673			     "rx-%d: WRED drop=%u over=%u\n",
 3674			     rx_channel, wred, wred-limit);
 3675	}
 3676}
 3677
 3678static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3679		       struct rx_ring_info *rp, int budget)
 3680{
 3681	int qlen, rcr_done = 0, work_done = 0;
 3682	struct rxdma_mailbox *mbox = rp->mbox;
 3683	u64 stat;
 3684
 3685#if 1
 3686	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3687	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3688#else
 3689	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3690	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3691#endif
 3692	mbox->rx_dma_ctl_stat = 0;
 3693	mbox->rcrstat_a = 0;
 3694
 3695	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3696		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3697		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3698
 3699	rcr_done = work_done = 0;
 3700	qlen = min(qlen, budget);
 3701	while (work_done < qlen) {
 3702		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3703		work_done++;
 3704	}
 3705
 3706	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3707		unsigned int i;
 3708
 3709		for (i = 0; i < rp->rbr_refill_pending; i++)
 3710			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3711		rp->rbr_refill_pending = 0;
 3712	}
 3713
 3714	stat = (RX_DMA_CTL_STAT_MEX |
 3715		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3716		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3717
 3718	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3719
 3720	/* Only sync discards stats when qlen indicate potential for drops */
 3721	if (qlen > 10)
 3722		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3723
 3724	return work_done;
 3725}
 3726
 3727static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3728{
 3729	u64 v0 = lp->v0;
 3730	u32 tx_vec = (v0 >> 32);
 3731	u32 rx_vec = (v0 & 0xffffffff);
 3732	int i, work_done = 0;
 3733
 3734	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3735		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3736
 3737	for (i = 0; i < np->num_tx_rings; i++) {
 3738		struct tx_ring_info *rp = &np->tx_rings[i];
 3739		if (tx_vec & (1 << rp->tx_channel))
 3740			niu_tx_work(np, rp);
 3741		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3742	}
 3743
 3744	for (i = 0; i < np->num_rx_rings; i++) {
 3745		struct rx_ring_info *rp = &np->rx_rings[i];
 3746
 3747		if (rx_vec & (1 << rp->rx_channel)) {
 3748			int this_work_done;
 3749
 3750			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3751						     budget);
 3752
 3753			budget -= this_work_done;
 3754			work_done += this_work_done;
 3755		}
 3756		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3757	}
 3758
 3759	return work_done;
 3760}
 3761
 3762static int niu_poll(struct napi_struct *napi, int budget)
 3763{
 3764	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3765	struct niu *np = lp->np;
 3766	int work_done;
 3767
 3768	work_done = niu_poll_core(np, lp, budget);
 3769
 3770	if (work_done < budget) {
 3771		napi_complete_done(napi, work_done);
 3772		niu_ldg_rearm(np, lp, 1);
 3773	}
 3774	return work_done;
 3775}
 3776
 3777static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3778				  u64 stat)
 3779{
 3780	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3781
 3782	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3783		pr_cont("RBR_TMOUT ");
 3784	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3785		pr_cont("RSP_CNT ");
 3786	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3787		pr_cont("BYTE_EN_BUS ");
 3788	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3789		pr_cont("RSP_DAT ");
 3790	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3791		pr_cont("RCR_ACK ");
 3792	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3793		pr_cont("RCR_SHA_PAR ");
 3794	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3795		pr_cont("RBR_PRE_PAR ");
 3796	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3797		pr_cont("CONFIG ");
 3798	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3799		pr_cont("RCRINCON ");
 3800	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3801		pr_cont("RCRFULL ");
 3802	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3803		pr_cont("RBRFULL ");
 3804	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3805		pr_cont("RBRLOGPAGE ");
 3806	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3807		pr_cont("CFIGLOGPAGE ");
 3808	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3809		pr_cont("DC_FIDO ");
 3810
 3811	pr_cont(")\n");
 3812}
 3813
 3814static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3815{
 3816	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3817	int err = 0;
 3818
 3819
 3820	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3821		    RX_DMA_CTL_STAT_PORT_FATAL))
 3822		err = -EINVAL;
 3823
 3824	if (err) {
 3825		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3826			   rp->rx_channel,
 3827			   (unsigned long long) stat);
 3828
 3829		niu_log_rxchan_errors(np, rp, stat);
 3830	}
 3831
 3832	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3833	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3834
 3835	return err;
 3836}
 3837
 3838static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3839				  u64 cs)
 3840{
 3841	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3842
 3843	if (cs & TX_CS_MBOX_ERR)
 3844		pr_cont("MBOX ");
 3845	if (cs & TX_CS_PKT_SIZE_ERR)
 3846		pr_cont("PKT_SIZE ");
 3847	if (cs & TX_CS_TX_RING_OFLOW)
 3848		pr_cont("TX_RING_OFLOW ");
 3849	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3850		pr_cont("PREF_BUF_PAR ");
 3851	if (cs & TX_CS_NACK_PREF)
 3852		pr_cont("NACK_PREF ");
 3853	if (cs & TX_CS_NACK_PKT_RD)
 3854		pr_cont("NACK_PKT_RD ");
 3855	if (cs & TX_CS_CONF_PART_ERR)
 3856		pr_cont("CONF_PART ");
 3857	if (cs & TX_CS_PKT_PRT_ERR)
 3858		pr_cont("PKT_PTR ");
 3859
 3860	pr_cont(")\n");
 3861}
 3862
 3863static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3864{
 3865	u64 cs, logh, logl;
 3866
 3867	cs = nr64(TX_CS(rp->tx_channel));
 3868	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3869	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3870
 3871	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3872		   rp->tx_channel,
 3873		   (unsigned long long)cs,
 3874		   (unsigned long long)logh,
 3875		   (unsigned long long)logl);
 3876
 3877	niu_log_txchan_errors(np, rp, cs);
 3878
 3879	return -ENODEV;
 3880}
 3881
 3882static int niu_mif_interrupt(struct niu *np)
 3883{
 3884	u64 mif_status = nr64(MIF_STATUS);
 3885	int phy_mdint = 0;
 3886
 3887	if (np->flags & NIU_FLAGS_XMAC) {
 3888		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3889
 3890		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3891			phy_mdint = 1;
 3892	}
 3893
 3894	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3895		   (unsigned long long)mif_status, phy_mdint);
 3896
 3897	return -ENODEV;
 3898}
 3899
 3900static void niu_xmac_interrupt(struct niu *np)
 3901{
 3902	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3903	u64 val;
 3904
 3905	val = nr64_mac(XTXMAC_STATUS);
 3906	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3907		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3908	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3909		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3910	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3911		mp->tx_fifo_errors++;
 3912	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3913		mp->tx_overflow_errors++;
 3914	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3915		mp->tx_max_pkt_size_errors++;
 3916	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3917		mp->tx_underflow_errors++;
 3918
 3919	val = nr64_mac(XRXMAC_STATUS);
 3920	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3921		mp->rx_local_faults++;
 3922	if (val & XRXMAC_STATUS_RFLT_DET)
 3923		mp->rx_remote_faults++;
 3924	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3925		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3926	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3927		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3928	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3929		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3930	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3931		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3932	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3933		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3934	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3935		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3936	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3937		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3938	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3939		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3940	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3941		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3942	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3943		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3944	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3945		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3946	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3947		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3948	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3949		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3950	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3951		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3952	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3953		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3954	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3955		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3956	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3957		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3958	if (val & XRXMAC_STATUS_RXUFLOW)
 3959		mp->rx_underflows++;
 3960	if (val & XRXMAC_STATUS_RXOFLOW)
 3961		mp->rx_overflows++;
 3962
 3963	val = nr64_mac(XMAC_FC_STAT);
 3964	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3965		mp->pause_off_state++;
 3966	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3967		mp->pause_on_state++;
 3968	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 3969		mp->pause_received++;
 3970}
 3971
 3972static void niu_bmac_interrupt(struct niu *np)
 3973{
 3974	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 3975	u64 val;
 3976
 3977	val = nr64_mac(BTXMAC_STATUS);
 3978	if (val & BTXMAC_STATUS_UNDERRUN)
 3979		mp->tx_underflow_errors++;
 3980	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 3981		mp->tx_max_pkt_size_errors++;
 3982	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 3983		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 3984	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 3985		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 3986
 3987	val = nr64_mac(BRXMAC_STATUS);
 3988	if (val & BRXMAC_STATUS_OVERFLOW)
 3989		mp->rx_overflows++;
 3990	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 3991		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 3992	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 3993		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 3994	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 3995		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 3996	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 3997		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 3998
 3999	val = nr64_mac(BMAC_CTRL_STATUS);
 4000	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4001		mp->pause_off_state++;
 4002	if (val & BMAC_CTRL_STATUS_PAUSE)
 4003		mp->pause_on_state++;
 4004	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4005		mp->pause_received++;
 4006}
 4007
 4008static int niu_mac_interrupt(struct niu *np)
 4009{
 4010	if (np->flags & NIU_FLAGS_XMAC)
 4011		niu_xmac_interrupt(np);
 4012	else
 4013		niu_bmac_interrupt(np);
 4014
 4015	return 0;
 4016}
 4017
 4018static void niu_log_device_error(struct niu *np, u64 stat)
 4019{
 4020	netdev_err(np->dev, "Core device errors ( ");
 4021
 4022	if (stat & SYS_ERR_MASK_META2)
 4023		pr_cont("META2 ");
 4024	if (stat & SYS_ERR_MASK_META1)
 4025		pr_cont("META1 ");
 4026	if (stat & SYS_ERR_MASK_PEU)
 4027		pr_cont("PEU ");
 4028	if (stat & SYS_ERR_MASK_TXC)
 4029		pr_cont("TXC ");
 4030	if (stat & SYS_ERR_MASK_RDMC)
 4031		pr_cont("RDMC ");
 4032	if (stat & SYS_ERR_MASK_TDMC)
 4033		pr_cont("TDMC ");
 4034	if (stat & SYS_ERR_MASK_ZCP)
 4035		pr_cont("ZCP ");
 4036	if (stat & SYS_ERR_MASK_FFLP)
 4037		pr_cont("FFLP ");
 4038	if (stat & SYS_ERR_MASK_IPP)
 4039		pr_cont("IPP ");
 4040	if (stat & SYS_ERR_MASK_MAC)
 4041		pr_cont("MAC ");
 4042	if (stat & SYS_ERR_MASK_SMX)
 4043		pr_cont("SMX ");
 4044
 4045	pr_cont(")\n");
 4046}
 4047
 4048static int niu_device_error(struct niu *np)
 4049{
 4050	u64 stat = nr64(SYS_ERR_STAT);
 4051
 4052	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4053		   (unsigned long long)stat);
 4054
 4055	niu_log_device_error(np, stat);
 4056
 4057	return -ENODEV;
 4058}
 4059
 4060static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4061			      u64 v0, u64 v1, u64 v2)
 4062{
 4063
 4064	int i, err = 0;
 4065
 4066	lp->v0 = v0;
 4067	lp->v1 = v1;
 4068	lp->v2 = v2;
 4069
 4070	if (v1 & 0x00000000ffffffffULL) {
 4071		u32 rx_vec = (v1 & 0xffffffff);
 4072
 4073		for (i = 0; i < np->num_rx_rings; i++) {
 4074			struct rx_ring_info *rp = &np->rx_rings[i];
 4075
 4076			if (rx_vec & (1 << rp->rx_channel)) {
 4077				int r = niu_rx_error(np, rp);
 4078				if (r) {
 4079					err = r;
 4080				} else {
 4081					if (!v0)
 4082						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4083						     RX_DMA_CTL_STAT_MEX);
 4084				}
 4085			}
 4086		}
 4087	}
 4088	if (v1 & 0x7fffffff00000000ULL) {
 4089		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4090
 4091		for (i = 0; i < np->num_tx_rings; i++) {
 4092			struct tx_ring_info *rp = &np->tx_rings[i];
 4093
 4094			if (tx_vec & (1 << rp->tx_channel)) {
 4095				int r = niu_tx_error(np, rp);
 4096				if (r)
 4097					err = r;
 4098			}
 4099		}
 4100	}
 4101	if ((v0 | v1) & 0x8000000000000000ULL) {
 4102		int r = niu_mif_interrupt(np);
 4103		if (r)
 4104			err = r;
 4105	}
 4106	if (v2) {
 4107		if (v2 & 0x01ef) {
 4108			int r = niu_mac_interrupt(np);
 4109			if (r)
 4110				err = r;
 4111		}
 4112		if (v2 & 0x0210) {
 4113			int r = niu_device_error(np);
 4114			if (r)
 4115				err = r;
 4116		}
 4117	}
 4118
 4119	if (err)
 4120		niu_enable_interrupts(np, 0);
 4121
 4122	return err;
 4123}
 4124
 4125static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4126			    int ldn)
 4127{
 4128	struct rxdma_mailbox *mbox = rp->mbox;
 4129	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4130
 4131	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4132		      RX_DMA_CTL_STAT_RCRTO);
 4133	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4134
 4135	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4136		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4137}
 4138
 4139static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4140			    int ldn)
 4141{
 4142	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4143
 4144	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4145		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4146}
 4147
 4148static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4149{
 4150	struct niu_parent *parent = np->parent;
 4151	u32 rx_vec, tx_vec;
 4152	int i;
 4153
 4154	tx_vec = (v0 >> 32);
 4155	rx_vec = (v0 & 0xffffffff);
 4156
 4157	for (i = 0; i < np->num_rx_rings; i++) {
 4158		struct rx_ring_info *rp = &np->rx_rings[i];
 4159		int ldn = LDN_RXDMA(rp->rx_channel);
 4160
 4161		if (parent->ldg_map[ldn] != ldg)
 4162			continue;
 4163
 4164		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4165		if (rx_vec & (1 << rp->rx_channel))
 4166			niu_rxchan_intr(np, rp, ldn);
 4167	}
 4168
 4169	for (i = 0; i < np->num_tx_rings; i++) {
 4170		struct tx_ring_info *rp = &np->tx_rings[i];
 4171		int ldn = LDN_TXDMA(rp->tx_channel);
 4172
 4173		if (parent->ldg_map[ldn] != ldg)
 4174			continue;
 4175
 4176		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4177		if (tx_vec & (1 << rp->tx_channel))
 4178			niu_txchan_intr(np, rp, ldn);
 4179	}
 4180}
 4181
 4182static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4183			      u64 v0, u64 v1, u64 v2)
 4184{
 4185	if (likely(napi_schedule_prep(&lp->napi))) {
 4186		lp->v0 = v0;
 4187		lp->v1 = v1;
 4188		lp->v2 = v2;
 4189		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4190		__napi_schedule(&lp->napi);
 4191	}
 4192}
 4193
 4194static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4195{
 4196	struct niu_ldg *lp = dev_id;
 4197	struct niu *np = lp->np;
 4198	int ldg = lp->ldg_num;
 4199	unsigned long flags;
 4200	u64 v0, v1, v2;
 4201
 4202	if (netif_msg_intr(np))
 4203		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4204		       __func__, lp, ldg);
 4205
 4206	spin_lock_irqsave(&np->lock, flags);
 4207
 4208	v0 = nr64(LDSV0(ldg));
 4209	v1 = nr64(LDSV1(ldg));
 4210	v2 = nr64(LDSV2(ldg));
 4211
 4212	if (netif_msg_intr(np))
 4213		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4214		       (unsigned long long) v0,
 4215		       (unsigned long long) v1,
 4216		       (unsigned long long) v2);
 4217
 4218	if (unlikely(!v0 && !v1 && !v2)) {
 4219		spin_unlock_irqrestore(&np->lock, flags);
 4220		return IRQ_NONE;
 4221	}
 4222
 4223	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4224		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4225		if (err)
 4226			goto out;
 4227	}
 4228	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4229		niu_schedule_napi(np, lp, v0, v1, v2);
 4230	else
 4231		niu_ldg_rearm(np, lp, 1);
 4232out:
 4233	spin_unlock_irqrestore(&np->lock, flags);
 4234
 4235	return IRQ_HANDLED;
 4236}
 4237
 4238static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4239{
 4240	if (rp->mbox) {
 4241		np->ops->free_coherent(np->device,
 4242				       sizeof(struct rxdma_mailbox),
 4243				       rp->mbox, rp->mbox_dma);
 4244		rp->mbox = NULL;
 4245	}
 4246	if (rp->rcr) {
 4247		np->ops->free_coherent(np->device,
 4248				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4249				       rp->rcr, rp->rcr_dma);
 4250		rp->rcr = NULL;
 4251		rp->rcr_table_size = 0;
 4252		rp->rcr_index = 0;
 4253	}
 4254	if (rp->rbr) {
 4255		niu_rbr_free(np, rp);
 4256
 4257		np->ops->free_coherent(np->device,
 4258				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4259				       rp->rbr, rp->rbr_dma);
 4260		rp->rbr = NULL;
 4261		rp->rbr_table_size = 0;
 4262		rp->rbr_index = 0;
 4263	}
 4264	kfree(rp->rxhash);
 4265	rp->rxhash = NULL;
 4266}
 4267
 4268static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4269{
 4270	if (rp->mbox) {
 4271		np->ops->free_coherent(np->device,
 4272				       sizeof(struct txdma_mailbox),
 4273				       rp->mbox, rp->mbox_dma);
 4274		rp->mbox = NULL;
 4275	}
 4276	if (rp->descr) {
 4277		int i;
 4278
 4279		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4280			if (rp->tx_buffs[i].skb)
 4281				(void) release_tx_packet(np, rp, i);
 4282		}
 4283
 4284		np->ops->free_coherent(np->device,
 4285				       MAX_TX_RING_SIZE * sizeof(__le64),
 4286				       rp->descr, rp->descr_dma);
 4287		rp->descr = NULL;
 4288		rp->pending = 0;
 4289		rp->prod = 0;
 4290		rp->cons = 0;
 4291		rp->wrap_bit = 0;
 4292	}
 4293}
 4294
 4295static void niu_free_channels(struct niu *np)
 4296{
 4297	int i;
 4298
 4299	if (np->rx_rings) {
 4300		for (i = 0; i < np->num_rx_rings; i++) {
 4301			struct rx_ring_info *rp = &np->rx_rings[i];
 4302
 4303			niu_free_rx_ring_info(np, rp);
 4304		}
 4305		kfree(np->rx_rings);
 4306		np->rx_rings = NULL;
 4307		np->num_rx_rings = 0;
 4308	}
 4309
 4310	if (np->tx_rings) {
 4311		for (i = 0; i < np->num_tx_rings; i++) {
 4312			struct tx_ring_info *rp = &np->tx_rings[i];
 4313
 4314			niu_free_tx_ring_info(np, rp);
 4315		}
 4316		kfree(np->tx_rings);
 4317		np->tx_rings = NULL;
 4318		np->num_tx_rings = 0;
 4319	}
 4320}
 4321
 4322static int niu_alloc_rx_ring_info(struct niu *np,
 4323				  struct rx_ring_info *rp)
 4324{
 4325	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4326
 4327	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
 4328			     GFP_KERNEL);
 4329	if (!rp->rxhash)
 4330		return -ENOMEM;
 4331
 4332	rp->mbox = np->ops->alloc_coherent(np->device,
 4333					   sizeof(struct rxdma_mailbox),
 4334					   &rp->mbox_dma, GFP_KERNEL);
 4335	if (!rp->mbox)
 4336		return -ENOMEM;
 4337	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4338		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4339			   rp->mbox);
 4340		return -EINVAL;
 4341	}
 4342
 4343	rp->rcr = np->ops->alloc_coherent(np->device,
 4344					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4345					  &rp->rcr_dma, GFP_KERNEL);
 4346	if (!rp->rcr)
 4347		return -ENOMEM;
 4348	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4349		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4350			   rp->rcr);
 4351		return -EINVAL;
 4352	}
 4353	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4354	rp->rcr_index = 0;
 4355
 4356	rp->rbr = np->ops->alloc_coherent(np->device,
 4357					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4358					  &rp->rbr_dma, GFP_KERNEL);
 4359	if (!rp->rbr)
 4360		return -ENOMEM;
 4361	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4362		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4363			   rp->rbr);
 4364		return -EINVAL;
 4365	}
 4366	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4367	rp->rbr_index = 0;
 4368	rp->rbr_pending = 0;
 4369
 4370	return 0;
 4371}
 4372
 4373static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4374{
 4375	int mtu = np->dev->mtu;
 4376
 4377	/* These values are recommended by the HW designers for fair
 4378	 * utilization of DRR amongst the rings.
 4379	 */
 4380	rp->max_burst = mtu + 32;
 4381	if (rp->max_burst > 4096)
 4382		rp->max_burst = 4096;
 4383}
 4384
 4385static int niu_alloc_tx_ring_info(struct niu *np,
 4386				  struct tx_ring_info *rp)
 4387{
 4388	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4389
 4390	rp->mbox = np->ops->alloc_coherent(np->device,
 4391					   sizeof(struct txdma_mailbox),
 4392					   &rp->mbox_dma, GFP_KERNEL);
 4393	if (!rp->mbox)
 4394		return -ENOMEM;
 4395	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4396		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4397			   rp->mbox);
 4398		return -EINVAL;
 4399	}
 4400
 4401	rp->descr = np->ops->alloc_coherent(np->device,
 4402					    MAX_TX_RING_SIZE * sizeof(__le64),
 4403					    &rp->descr_dma, GFP_KERNEL);
 4404	if (!rp->descr)
 4405		return -ENOMEM;
 4406	if ((unsigned long)rp->descr & (64UL - 1)) {
 4407		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4408			   rp->descr);
 4409		return -EINVAL;
 4410	}
 4411
 4412	rp->pending = MAX_TX_RING_SIZE;
 4413	rp->prod = 0;
 4414	rp->cons = 0;
 4415	rp->wrap_bit = 0;
 4416
 4417	/* XXX make these configurable... XXX */
 4418	rp->mark_freq = rp->pending / 4;
 4419
 4420	niu_set_max_burst(np, rp);
 4421
 4422	return 0;
 4423}
 4424
 4425static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4426{
 4427	u16 bss;
 4428
 4429	bss = min(PAGE_SHIFT, 15);
 4430
 4431	rp->rbr_block_size = 1 << bss;
 4432	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4433
 4434	rp->rbr_sizes[0] = 256;
 4435	rp->rbr_sizes[1] = 1024;
 4436	if (np->dev->mtu > ETH_DATA_LEN) {
 4437		switch (PAGE_SIZE) {
 4438		case 4 * 1024:
 4439			rp->rbr_sizes[2] = 4096;
 4440			break;
 4441
 4442		default:
 4443			rp->rbr_sizes[2] = 8192;
 4444			break;
 4445		}
 4446	} else {
 4447		rp->rbr_sizes[2] = 2048;
 4448	}
 4449	rp->rbr_sizes[3] = rp->rbr_block_size;
 4450}
 4451
 4452static int niu_alloc_channels(struct niu *np)
 4453{
 4454	struct niu_parent *parent = np->parent;
 4455	int first_rx_channel, first_tx_channel;
 4456	int num_rx_rings, num_tx_rings;
 4457	struct rx_ring_info *rx_rings;
 4458	struct tx_ring_info *tx_rings;
 4459	int i, port, err;
 4460
 4461	port = np->port;
 4462	first_rx_channel = first_tx_channel = 0;
 4463	for (i = 0; i < port; i++) {
 4464		first_rx_channel += parent->rxchan_per_port[i];
 4465		first_tx_channel += parent->txchan_per_port[i];
 4466	}
 4467
 4468	num_rx_rings = parent->rxchan_per_port[port];
 4469	num_tx_rings = parent->txchan_per_port[port];
 4470
 4471	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4472			   GFP_KERNEL);
 4473	err = -ENOMEM;
 4474	if (!rx_rings)
 4475		goto out_err;
 4476
 4477	np->num_rx_rings = num_rx_rings;
 4478	smp_wmb();
 4479	np->rx_rings = rx_rings;
 4480
 4481	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4482
 4483	for (i = 0; i < np->num_rx_rings; i++) {
 4484		struct rx_ring_info *rp = &np->rx_rings[i];
 4485
 4486		rp->np = np;
 4487		rp->rx_channel = first_rx_channel + i;
 4488
 4489		err = niu_alloc_rx_ring_info(np, rp);
 4490		if (err)
 4491			goto out_err;
 4492
 4493		niu_size_rbr(np, rp);
 4494
 4495		/* XXX better defaults, configurable, etc... XXX */
 4496		rp->nonsyn_window = 64;
 4497		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4498		rp->syn_window = 64;
 4499		rp->syn_threshold = rp->rcr_table_size - 64;
 4500		rp->rcr_pkt_threshold = 16;
 4501		rp->rcr_timeout = 8;
 4502		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4503		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4504			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4505
 4506		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4507		if (err)
 4508			return err;
 4509	}
 4510
 4511	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4512			   GFP_KERNEL);
 4513	err = -ENOMEM;
 4514	if (!tx_rings)
 4515		goto out_err;
 4516
 4517	np->num_tx_rings = num_tx_rings;
 4518	smp_wmb();
 4519	np->tx_rings = tx_rings;
 4520
 4521	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4522
 4523	for (i = 0; i < np->num_tx_rings; i++) {
 4524		struct tx_ring_info *rp = &np->tx_rings[i];
 4525
 4526		rp->np = np;
 4527		rp->tx_channel = first_tx_channel + i;
 4528
 4529		err = niu_alloc_tx_ring_info(np, rp);
 4530		if (err)
 4531			goto out_err;
 4532	}
 4533
 4534	return 0;
 4535
 4536out_err:
 4537	niu_free_channels(np);
 4538	return err;
 4539}
 4540
 4541static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4542{
 4543	int limit = 1000;
 4544
 4545	while (--limit > 0) {
 4546		u64 val = nr64(TX_CS(channel));
 4547		if (val & TX_CS_SNG_STATE)
 4548			return 0;
 4549	}
 4550	return -ENODEV;
 4551}
 4552
 4553static int niu_tx_channel_stop(struct niu *np, int channel)
 4554{
 4555	u64 val = nr64(TX_CS(channel));
 4556
 4557	val |= TX_CS_STOP_N_GO;
 4558	nw64(TX_CS(channel), val);
 4559
 4560	return niu_tx_cs_sng_poll(np, channel);
 4561}
 4562
 4563static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4564{
 4565	int limit = 1000;
 4566
 4567	while (--limit > 0) {
 4568		u64 val = nr64(TX_CS(channel));
 4569		if (!(val & TX_CS_RST))
 4570			return 0;
 4571	}
 4572	return -ENODEV;
 4573}
 4574
 4575static int niu_tx_channel_reset(struct niu *np, int channel)
 4576{
 4577	u64 val = nr64(TX_CS(channel));
 4578	int err;
 4579
 4580	val |= TX_CS_RST;
 4581	nw64(TX_CS(channel), val);
 4582
 4583	err = niu_tx_cs_reset_poll(np, channel);
 4584	if (!err)
 4585		nw64(TX_RING_KICK(channel), 0);
 4586
 4587	return err;
 4588}
 4589
 4590static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4591{
 4592	u64 val;
 4593
 4594	nw64(TX_LOG_MASK1(channel), 0);
 4595	nw64(TX_LOG_VAL1(channel), 0);
 4596	nw64(TX_LOG_MASK2(channel), 0);
 4597	nw64(TX_LOG_VAL2(channel), 0);
 4598	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4599	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4600	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4601
 4602	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4603	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4604	nw64(TX_LOG_PAGE_VLD(channel), val);
 4605
 4606	/* XXX TXDMA 32bit mode? XXX */
 4607
 4608	return 0;
 4609}
 4610
 4611static void niu_txc_enable_port(struct niu *np, int on)
 4612{
 4613	unsigned long flags;
 4614	u64 val, mask;
 4615
 4616	niu_lock_parent(np, flags);
 4617	val = nr64(TXC_CONTROL);
 4618	mask = (u64)1 << np->port;
 4619	if (on) {
 4620		val |= TXC_CONTROL_ENABLE | mask;
 4621	} else {
 4622		val &= ~mask;
 4623		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4624			val &= ~TXC_CONTROL_ENABLE;
 4625	}
 4626	nw64(TXC_CONTROL, val);
 4627	niu_unlock_parent(np, flags);
 4628}
 4629
 4630static void niu_txc_set_imask(struct niu *np, u64 imask)
 4631{
 4632	unsigned long flags;
 4633	u64 val;
 4634
 4635	niu_lock_parent(np, flags);
 4636	val = nr64(TXC_INT_MASK);
 4637	val &= ~TXC_INT_MASK_VAL(np->port);
 4638	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4639	niu_unlock_parent(np, flags);
 4640}
 4641
 4642static void niu_txc_port_dma_enable(struct niu *np, int on)
 4643{
 4644	u64 val = 0;
 4645
 4646	if (on) {
 4647		int i;
 4648
 4649		for (i = 0; i < np->num_tx_rings; i++)
 4650			val |= (1 << np->tx_rings[i].tx_channel);
 4651	}
 4652	nw64(TXC_PORT_DMA(np->port), val);
 4653}
 4654
 4655static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4656{
 4657	int err, channel = rp->tx_channel;
 4658	u64 val, ring_len;
 4659
 4660	err = niu_tx_channel_stop(np, channel);
 4661	if (err)
 4662		return err;
 4663
 4664	err = niu_tx_channel_reset(np, channel);
 4665	if (err)
 4666		return err;
 4667
 4668	err = niu_tx_channel_lpage_init(np, channel);
 4669	if (err)
 4670		return err;
 4671
 4672	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4673	nw64(TX_ENT_MSK(channel), 0);
 4674
 4675	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4676			      TX_RNG_CFIG_STADDR)) {
 4677		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4678			   channel, (unsigned long long)rp->descr_dma);
 4679		return -EINVAL;
 4680	}
 4681
 4682	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4683	 * blocks.  rp->pending is the number of TX descriptors in
 4684	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4685	 * to get the proper value the chip wants.
 4686	 */
 4687	ring_len = (rp->pending / 8);
 4688
 4689	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4690	       rp->descr_dma);
 4691	nw64(TX_RNG_CFIG(channel), val);
 4692
 4693	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4694	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4695		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4696			    channel, (unsigned long long)rp->mbox_dma);
 4697		return -EINVAL;
 4698	}
 4699	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4700	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4701
 4702	nw64(TX_CS(channel), 0);
 4703
 4704	rp->last_pkt_cnt = 0;
 4705
 4706	return 0;
 4707}
 4708
 4709static void niu_init_rdc_groups(struct niu *np)
 4710{
 4711	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4712	int i, first_table_num = tp->first_table_num;
 4713
 4714	for (i = 0; i < tp->num_tables; i++) {
 4715		struct rdc_table *tbl = &tp->tables[i];
 4716		int this_table = first_table_num + i;
 4717		int slot;
 4718
 4719		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4720			nw64(RDC_TBL(this_table, slot),
 4721			     tbl->rxdma_channel[slot]);
 4722	}
 4723
 4724	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4725}
 4726
 4727static void niu_init_drr_weight(struct niu *np)
 4728{
 4729	int type = phy_decode(np->parent->port_phy, np->port);
 4730	u64 val;
 4731
 4732	switch (type) {
 4733	case PORT_TYPE_10G:
 4734		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4735		break;
 4736
 4737	case PORT_TYPE_1G:
 4738	default:
 4739		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4740		break;
 4741	}
 4742	nw64(PT_DRR_WT(np->port), val);
 4743}
 4744
 4745static int niu_init_hostinfo(struct niu *np)
 4746{
 4747	struct niu_parent *parent = np->parent;
 4748	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4749	int i, err, num_alt = niu_num_alt_addr(np);
 4750	int first_rdc_table = tp->first_table_num;
 4751
 4752	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4753	if (err)
 4754		return err;
 4755
 4756	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4757	if (err)
 4758		return err;
 4759
 4760	for (i = 0; i < num_alt; i++) {
 4761		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4762		if (err)
 4763			return err;
 4764	}
 4765
 4766	return 0;
 4767}
 4768
 4769static int niu_rx_channel_reset(struct niu *np, int channel)
 4770{
 4771	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4772				      RXDMA_CFIG1_RST, 1000, 10,
 4773				      "RXDMA_CFIG1");
 4774}
 4775
 4776static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4777{
 4778	u64 val;
 4779
 4780	nw64(RX_LOG_MASK1(channel), 0);
 4781	nw64(RX_LOG_VAL1(channel), 0);
 4782	nw64(RX_LOG_MASK2(channel), 0);
 4783	nw64(RX_LOG_VAL2(channel), 0);
 4784	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4785	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4786	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4787
 4788	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4789	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4790	nw64(RX_LOG_PAGE_VLD(channel), val);
 4791
 4792	return 0;
 4793}
 4794
 4795static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4796{
 4797	u64 val;
 4798
 4799	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4800	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4801	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4802	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4803	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4804}
 4805
 4806static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4807{
 4808	u64 val = 0;
 4809
 4810	*ret = 0;
 4811	switch (rp->rbr_block_size) {
 4812	case 4 * 1024:
 4813		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4814		break;
 4815	case 8 * 1024:
 4816		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4817		break;
 4818	case 16 * 1024:
 4819		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4820		break;
 4821	case 32 * 1024:
 4822		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4823		break;
 4824	default:
 4825		return -EINVAL;
 4826	}
 4827	val |= RBR_CFIG_B_VLD2;
 4828	switch (rp->rbr_sizes[2]) {
 4829	case 2 * 1024:
 4830		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4831		break;
 4832	case 4 * 1024:
 4833		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4834		break;
 4835	case 8 * 1024:
 4836		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4837		break;
 4838	case 16 * 1024:
 4839		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4840		break;
 4841
 4842	default:
 4843		return -EINVAL;
 4844	}
 4845	val |= RBR_CFIG_B_VLD1;
 4846	switch (rp->rbr_sizes[1]) {
 4847	case 1 * 1024:
 4848		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4849		break;
 4850	case 2 * 1024:
 4851		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4852		break;
 4853	case 4 * 1024:
 4854		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4855		break;
 4856	case 8 * 1024:
 4857		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4858		break;
 4859
 4860	default:
 4861		return -EINVAL;
 4862	}
 4863	val |= RBR_CFIG_B_VLD0;
 4864	switch (rp->rbr_sizes[0]) {
 4865	case 256:
 4866		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4867		break;
 4868	case 512:
 4869		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4870		break;
 4871	case 1 * 1024:
 4872		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4873		break;
 4874	case 2 * 1024:
 4875		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4876		break;
 4877
 4878	default:
 4879		return -EINVAL;
 4880	}
 4881
 4882	*ret = val;
 4883	return 0;
 4884}
 4885
 4886static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4887{
 4888	u64 val = nr64(RXDMA_CFIG1(channel));
 4889	int limit;
 4890
 4891	if (on)
 4892		val |= RXDMA_CFIG1_EN;
 4893	else
 4894		val &= ~RXDMA_CFIG1_EN;
 4895	nw64(RXDMA_CFIG1(channel), val);
 4896
 4897	limit = 1000;
 4898	while (--limit > 0) {
 4899		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4900			break;
 4901		udelay(10);
 4902	}
 4903	if (limit <= 0)
 4904		return -ENODEV;
 4905	return 0;
 4906}
 4907
 4908static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4909{
 4910	int err, channel = rp->rx_channel;
 4911	u64 val;
 4912
 4913	err = niu_rx_channel_reset(np, channel);
 4914	if (err)
 4915		return err;
 4916
 4917	err = niu_rx_channel_lpage_init(np, channel);
 4918	if (err)
 4919		return err;
 4920
 4921	niu_rx_channel_wred_init(np, rp);
 4922
 4923	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4924	nw64(RX_DMA_CTL_STAT(channel),
 4925	     (RX_DMA_CTL_STAT_MEX |
 4926	      RX_DMA_CTL_STAT_RCRTHRES |
 4927	      RX_DMA_CTL_STAT_RCRTO |
 4928	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4929	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4930	nw64(RXDMA_CFIG2(channel),
 4931	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4932	      RXDMA_CFIG2_FULL_HDR));
 4933	nw64(RBR_CFIG_A(channel),
 4934	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4935	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4936	err = niu_compute_rbr_cfig_b(rp, &val);
 4937	if (err)
 4938		return err;
 4939	nw64(RBR_CFIG_B(channel), val);
 4940	nw64(RCRCFIG_A(channel),
 4941	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4942	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4943	nw64(RCRCFIG_B(channel),
 4944	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4945	     RCRCFIG_B_ENTOUT |
 4946	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4947
 4948	err = niu_enable_rx_channel(np, channel, 1);
 4949	if (err)
 4950		return err;
 4951
 4952	nw64(RBR_KICK(channel), rp->rbr_index);
 4953
 4954	val = nr64(RX_DMA_CTL_STAT(channel));
 4955	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4956	nw64(RX_DMA_CTL_STAT(channel), val);
 4957
 4958	return 0;
 4959}
 4960
 4961static int niu_init_rx_channels(struct niu *np)
 4962{
 4963	unsigned long flags;
 4964	u64 seed = jiffies_64;
 4965	int err, i;
 4966
 4967	niu_lock_parent(np, flags);
 4968	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 4969	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 4970	niu_unlock_parent(np, flags);
 4971
 4972	/* XXX RXDMA 32bit mode? XXX */
 4973
 4974	niu_init_rdc_groups(np);
 4975	niu_init_drr_weight(np);
 4976
 4977	err = niu_init_hostinfo(np);
 4978	if (err)
 4979		return err;
 4980
 4981	for (i = 0; i < np->num_rx_rings; i++) {
 4982		struct rx_ring_info *rp = &np->rx_rings[i];
 4983
 4984		err = niu_init_one_rx_channel(np, rp);
 4985		if (err)
 4986			return err;
 4987	}
 4988
 4989	return 0;
 4990}
 4991
 4992static int niu_set_ip_frag_rule(struct niu *np)
 4993{
 4994	struct niu_parent *parent = np->parent;
 4995	struct niu_classifier *cp = &np->clas;
 4996	struct niu_tcam_entry *tp;
 4997	int index, err;
 4998
 4999	index = cp->tcam_top;
 5000	tp = &parent->tcam[index];
 5001
 5002	/* Note that the noport bit is the same in both ipv4 and
 5003	 * ipv6 format TCAM entries.
 5004	 */
 5005	memset(tp, 0, sizeof(*tp));
 5006	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5007	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5008	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5009			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5010	err = tcam_write(np, index, tp->key, tp->key_mask);
 5011	if (err)
 5012		return err;
 5013	err = tcam_assoc_write(np, index, tp->assoc_data);
 5014	if (err)
 5015		return err;
 5016	tp->valid = 1;
 5017	cp->tcam_valid_entries++;
 5018
 5019	return 0;
 5020}
 5021
 5022static int niu_init_classifier_hw(struct niu *np)
 5023{
 5024	struct niu_parent *parent = np->parent;
 5025	struct niu_classifier *cp = &np->clas;
 5026	int i, err;
 5027
 5028	nw64(H1POLY, cp->h1_init);
 5029	nw64(H2POLY, cp->h2_init);
 5030
 5031	err = niu_init_hostinfo(np);
 5032	if (err)
 5033		return err;
 5034
 5035	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5036		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5037
 5038		vlan_tbl_write(np, i, np->port,
 5039			       vp->vlan_pref, vp->rdc_num);
 5040	}
 5041
 5042	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5043		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5044
 5045		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5046						ap->rdc_num, ap->mac_pref);
 5047		if (err)
 5048			return err;
 5049	}
 5050
 5051	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5052		int index = i - CLASS_CODE_USER_PROG1;
 5053
 5054		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5055		if (err)
 5056			return err;
 5057		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5058		if (err)
 5059			return err;
 5060	}
 5061
 5062	err = niu_set_ip_frag_rule(np);
 5063	if (err)
 5064		return err;
 5065
 5066	tcam_enable(np, 1);
 5067
 5068	return 0;
 5069}
 5070
 5071static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5072{
 5073	nw64(ZCP_RAM_DATA0, data[0]);
 5074	nw64(ZCP_RAM_DATA1, data[1]);
 5075	nw64(ZCP_RAM_DATA2, data[2]);
 5076	nw64(ZCP_RAM_DATA3, data[3]);
 5077	nw64(ZCP_RAM_DATA4, data[4]);
 5078	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5079	nw64(ZCP_RAM_ACC,
 5080	     (ZCP_RAM_ACC_WRITE |
 5081	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5082	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5083
 5084	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5085				   1000, 100);
 5086}
 5087
 5088static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5089{
 5090	int err;
 5091
 5092	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5093				  1000, 100);
 5094	if (err) {
 5095		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5096			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5097		return err;
 5098	}
 5099
 5100	nw64(ZCP_RAM_ACC,
 5101	     (ZCP_RAM_ACC_READ |
 5102	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5103	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5104
 5105	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5106				  1000, 100);
 5107	if (err) {
 5108		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5109			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5110		return err;
 5111	}
 5112
 5113	data[0] = nr64(ZCP_RAM_DATA0);
 5114	data[1] = nr64(ZCP_RAM_DATA1);
 5115	data[2] = nr64(ZCP_RAM_DATA2);
 5116	data[3] = nr64(ZCP_RAM_DATA3);
 5117	data[4] = nr64(ZCP_RAM_DATA4);
 5118
 5119	return 0;
 5120}
 5121
 5122static void niu_zcp_cfifo_reset(struct niu *np)
 5123{
 5124	u64 val = nr64(RESET_CFIFO);
 5125
 5126	val |= RESET_CFIFO_RST(np->port);
 5127	nw64(RESET_CFIFO, val);
 5128	udelay(10);
 5129
 5130	val &= ~RESET_CFIFO_RST(np->port);
 5131	nw64(RESET_CFIFO, val);
 5132}
 5133
 5134static int niu_init_zcp(struct niu *np)
 5135{
 5136	u64 data[5], rbuf[5];
 5137	int i, max, err;
 5138
 5139	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5140		if (np->port == 0 || np->port == 1)
 5141			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5142		else
 5143			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5144	} else
 5145		max = NIU_CFIFO_ENTRIES;
 5146
 5147	data[0] = 0;
 5148	data[1] = 0;
 5149	data[2] = 0;
 5150	data[3] = 0;
 5151	data[4] = 0;
 5152
 5153	for (i = 0; i < max; i++) {
 5154		err = niu_zcp_write(np, i, data);
 5155		if (err)
 5156			return err;
 5157		err = niu_zcp_read(np, i, rbuf);
 5158		if (err)
 5159			return err;
 5160	}
 5161
 5162	niu_zcp_cfifo_reset(np);
 5163	nw64(CFIFO_ECC(np->port), 0);
 5164	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5165	(void) nr64(ZCP_INT_STAT);
 5166	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5167
 5168	return 0;
 5169}
 5170
 5171static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5172{
 5173	u64 val = nr64_ipp(IPP_CFIG);
 5174
 5175	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5176	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5177	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5178	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5179	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5180	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5181	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5182	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5183}
 5184
 5185static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5186{
 5187	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5188	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5189	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5190	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5191	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5192	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5193}
 5194
 5195static int niu_ipp_reset(struct niu *np)
 5196{
 5197	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5198					  1000, 100, "IPP_CFIG");
 5199}
 5200
 5201static int niu_init_ipp(struct niu *np)
 5202{
 5203	u64 data[5], rbuf[5], val;
 5204	int i, max, err;
 5205
 5206	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5207		if (np->port == 0 || np->port == 1)
 5208			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5209		else
 5210			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5211	} else
 5212		max = NIU_DFIFO_ENTRIES;
 5213
 5214	data[0] = 0;
 5215	data[1] = 0;
 5216	data[2] = 0;
 5217	data[3] = 0;
 5218	data[4] = 0;
 5219
 5220	for (i = 0; i < max; i++) {
 5221		niu_ipp_write(np, i, data);
 5222		niu_ipp_read(np, i, rbuf);
 5223	}
 5224
 5225	(void) nr64_ipp(IPP_INT_STAT);
 5226	(void) nr64_ipp(IPP_INT_STAT);
 5227
 5228	err = niu_ipp_reset(np);
 5229	if (err)
 5230		return err;
 5231
 5232	(void) nr64_ipp(IPP_PKT_DIS);
 5233	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5234	(void) nr64_ipp(IPP_ECC);
 5235
 5236	(void) nr64_ipp(IPP_INT_STAT);
 5237
 5238	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5239
 5240	val = nr64_ipp(IPP_CFIG);
 5241	val &= ~IPP_CFIG_IP_MAX_PKT;
 5242	val |= (IPP_CFIG_IPP_ENABLE |
 5243		IPP_CFIG_DFIFO_ECC_EN |
 5244		IPP_CFIG_DROP_BAD_CRC |
 5245		IPP_CFIG_CKSUM_EN |
 5246		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5247	nw64_ipp(IPP_CFIG, val);
 5248
 5249	return 0;
 5250}
 5251
 5252static void niu_handle_led(struct niu *np, int status)
 5253{
 5254	u64 val;
 5255	val = nr64_mac(XMAC_CONFIG);
 5256
 5257	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5258	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5259		if (status) {
 5260			val |= XMAC_CONFIG_LED_POLARITY;
 5261			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5262		} else {
 5263			val |= XMAC_CONFIG_FORCE_LED_ON;
 5264			val &= ~XMAC_CONFIG_LED_POLARITY;
 5265		}
 5266	}
 5267
 5268	nw64_mac(XMAC_CONFIG, val);
 5269}
 5270
 5271static void niu_init_xif_xmac(struct niu *np)
 5272{
 5273	struct niu_link_config *lp = &np->link_config;
 5274	u64 val;
 5275
 5276	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5277		val = nr64(MIF_CONFIG);
 5278		val |= MIF_CONFIG_ATCA_GE;
 5279		nw64(MIF_CONFIG, val);
 5280	}
 5281
 5282	val = nr64_mac(XMAC_CONFIG);
 5283	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5284
 5285	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5286
 5287	if (lp->loopback_mode == LOOPBACK_MAC) {
 5288		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5289		val |= XMAC_CONFIG_LOOPBACK;
 5290	} else {
 5291		val &= ~XMAC_CONFIG_LOOPBACK;
 5292	}
 5293
 5294	if (np->flags & NIU_FLAGS_10G) {
 5295		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5296	} else {
 5297		val |= XMAC_CONFIG_LFS_DISABLE;
 5298		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5299		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5300			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5301		else
 5302			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5303	}
 5304
 5305	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5306
 5307	if (lp->active_speed == SPEED_100)
 5308		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5309	else
 5310		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5311
 5312	nw64_mac(XMAC_CONFIG, val);
 5313
 5314	val = nr64_mac(XMAC_CONFIG);
 5315	val &= ~XMAC_CONFIG_MODE_MASK;
 5316	if (np->flags & NIU_FLAGS_10G) {
 5317		val |= XMAC_CONFIG_MODE_XGMII;
 5318	} else {
 5319		if (lp->active_speed == SPEED_1000)
 5320			val |= XMAC_CONFIG_MODE_GMII;
 5321		else
 5322			val |= XMAC_CONFIG_MODE_MII;
 5323	}
 5324
 5325	nw64_mac(XMAC_CONFIG, val);
 5326}
 5327
 5328static void niu_init_xif_bmac(struct niu *np)
 5329{
 5330	struct niu_link_config *lp = &np->link_config;
 5331	u64 val;
 5332
 5333	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5334
 5335	if (lp->loopback_mode == LOOPBACK_MAC)
 5336		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5337	else
 5338		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5339
 5340	if (lp->active_speed == SPEED_1000)
 5341		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5342	else
 5343		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5344
 5345	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5346		 BMAC_XIF_CONFIG_LED_POLARITY);
 5347
 5348	if (!(np->flags & NIU_FLAGS_10G) &&
 5349	    !(np->flags & NIU_FLAGS_FIBER) &&
 5350	    lp->active_speed == SPEED_100)
 5351		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5352	else
 5353		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5354
 5355	nw64_mac(BMAC_XIF_CONFIG, val);
 5356}
 5357
 5358static void niu_init_xif(struct niu *np)
 5359{
 5360	if (np->flags & NIU_FLAGS_XMAC)
 5361		niu_init_xif_xmac(np);
 5362	else
 5363		niu_init_xif_bmac(np);
 5364}
 5365
 5366static void niu_pcs_mii_reset(struct niu *np)
 5367{
 5368	int limit = 1000;
 5369	u64 val = nr64_pcs(PCS_MII_CTL);
 5370	val |= PCS_MII_CTL_RST;
 5371	nw64_pcs(PCS_MII_CTL, val);
 5372	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5373		udelay(100);
 5374		val = nr64_pcs(PCS_MII_CTL);
 5375	}
 5376}
 5377
 5378static void niu_xpcs_reset(struct niu *np)
 5379{
 5380	int limit = 1000;
 5381	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5382	val |= XPCS_CONTROL1_RESET;
 5383	nw64_xpcs(XPCS_CONTROL1, val);
 5384	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5385		udelay(100);
 5386		val = nr64_xpcs(XPCS_CONTROL1);
 5387	}
 5388}
 5389
 5390static int niu_init_pcs(struct niu *np)
 5391{
 5392	struct niu_link_config *lp = &np->link_config;
 5393	u64 val;
 5394
 5395	switch (np->flags & (NIU_FLAGS_10G |
 5396			     NIU_FLAGS_FIBER |
 5397			     NIU_FLAGS_XCVR_SERDES)) {
 5398	case NIU_FLAGS_FIBER:
 5399		/* 1G fiber */
 5400		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5401		nw64_pcs(PCS_DPATH_MODE, 0);
 5402		niu_pcs_mii_reset(np);
 5403		break;
 5404
 5405	case NIU_FLAGS_10G:
 5406	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5407	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5408		/* 10G SERDES */
 5409		if (!(np->flags & NIU_FLAGS_XMAC))
 5410			return -EINVAL;
 5411
 5412		/* 10G copper or fiber */
 5413		val = nr64_mac(XMAC_CONFIG);
 5414		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5415		nw64_mac(XMAC_CONFIG, val);
 5416
 5417		niu_xpcs_reset(np);
 5418
 5419		val = nr64_xpcs(XPCS_CONTROL1);
 5420		if (lp->loopback_mode == LOOPBACK_PHY)
 5421			val |= XPCS_CONTROL1_LOOPBACK;
 5422		else
 5423			val &= ~XPCS_CONTROL1_LOOPBACK;
 5424		nw64_xpcs(XPCS_CONTROL1, val);
 5425
 5426		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5427		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5428		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5429		break;
 5430
 5431
 5432	case NIU_FLAGS_XCVR_SERDES:
 5433		/* 1G SERDES */
 5434		niu_pcs_mii_reset(np);
 5435		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5436		nw64_pcs(PCS_DPATH_MODE, 0);
 5437		break;
 5438
 5439	case 0:
 5440		/* 1G copper */
 5441	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5442		/* 1G RGMII FIBER */
 5443		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5444		niu_pcs_mii_reset(np);
 5445		break;
 5446
 5447	default:
 5448		return -EINVAL;
 5449	}
 5450
 5451	return 0;
 5452}
 5453
 5454static int niu_reset_tx_xmac(struct niu *np)
 5455{
 5456	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5457					  (XTXMAC_SW_RST_REG_RS |
 5458					   XTXMAC_SW_RST_SOFT_RST),
 5459					  1000, 100, "XTXMAC_SW_RST");
 5460}
 5461
 5462static int niu_reset_tx_bmac(struct niu *np)
 5463{
 5464	int limit;
 5465
 5466	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5467	limit = 1000;
 5468	while (--limit >= 0) {
 5469		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5470			break;
 5471		udelay(100);
 5472	}
 5473	if (limit < 0) {
 5474		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5475			np->port,
 5476			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5477		return -ENODEV;
 5478	}
 5479
 5480	return 0;
 5481}
 5482
 5483static int niu_reset_tx_mac(struct niu *np)
 5484{
 5485	if (np->flags & NIU_FLAGS_XMAC)
 5486		return niu_reset_tx_xmac(np);
 5487	else
 5488		return niu_reset_tx_bmac(np);
 5489}
 5490
 5491static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5492{
 5493	u64 val;
 5494
 5495	val = nr64_mac(XMAC_MIN);
 5496	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5497		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5498	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5499	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5500	nw64_mac(XMAC_MIN, val);
 5501
 5502	nw64_mac(XMAC_MAX, max);
 5503
 5504	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5505
 5506	val = nr64_mac(XMAC_IPG);
 5507	if (np->flags & NIU_FLAGS_10G) {
 5508		val &= ~XMAC_IPG_IPG_XGMII;
 5509		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5510	} else {
 5511		val &= ~XMAC_IPG_IPG_MII_GMII;
 5512		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5513	}
 5514	nw64_mac(XMAC_IPG, val);
 5515
 5516	val = nr64_mac(XMAC_CONFIG);
 5517	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5518		 XMAC_CONFIG_STRETCH_MODE |
 5519		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5520		 XMAC_CONFIG_TX_ENABLE);
 5521	nw64_mac(XMAC_CONFIG, val);
 5522
 5523	nw64_mac(TXMAC_FRM_CNT, 0);
 5524	nw64_mac(TXMAC_BYTE_CNT, 0);
 5525}
 5526
 5527static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5528{
 5529	u64 val;
 5530
 5531	nw64_mac(BMAC_MIN_FRAME, min);
 5532	nw64_mac(BMAC_MAX_FRAME, max);
 5533
 5534	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5535	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5536	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5537
 5538	val = nr64_mac(BTXMAC_CONFIG);
 5539	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5540		 BTXMAC_CONFIG_ENABLE);
 5541	nw64_mac(BTXMAC_CONFIG, val);
 5542}
 5543
 5544static void niu_init_tx_mac(struct niu *np)
 5545{
 5546	u64 min, max;
 5547
 5548	min = 64;
 5549	if (np->dev->mtu > ETH_DATA_LEN)
 5550		max = 9216;
 5551	else
 5552		max = 1522;
 5553
 5554	/* The XMAC_MIN register only accepts values for TX min which
 5555	 * have the low 3 bits cleared.
 5556	 */
 5557	BUG_ON(min & 0x7);
 5558
 5559	if (np->flags & NIU_FLAGS_XMAC)
 5560		niu_init_tx_xmac(np, min, max);
 5561	else
 5562		niu_init_tx_bmac(np, min, max);
 5563}
 5564
 5565static int niu_reset_rx_xmac(struct niu *np)
 5566{
 5567	int limit;
 5568
 5569	nw64_mac(XRXMAC_SW_RST,
 5570		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5571	limit = 1000;
 5572	while (--limit >= 0) {
 5573		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5574						 XRXMAC_SW_RST_SOFT_RST)))
 5575			break;
 5576		udelay(100);
 5577	}
 5578	if (limit < 0) {
 5579		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5580			np->port,
 5581			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5582		return -ENODEV;
 5583	}
 5584
 5585	return 0;
 5586}
 5587
 5588static int niu_reset_rx_bmac(struct niu *np)
 5589{
 5590	int limit;
 5591
 5592	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5593	limit = 1000;
 5594	while (--limit >= 0) {
 5595		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5596			break;
 5597		udelay(100);
 5598	}
 5599	if (limit < 0) {
 5600		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5601			np->port,
 5602			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5603		return -ENODEV;
 5604	}
 5605
 5606	return 0;
 5607}
 5608
 5609static int niu_reset_rx_mac(struct niu *np)
 5610{
 5611	if (np->flags & NIU_FLAGS_XMAC)
 5612		return niu_reset_rx_xmac(np);
 5613	else
 5614		return niu_reset_rx_bmac(np);
 5615}
 5616
 5617static void niu_init_rx_xmac(struct niu *np)
 5618{
 5619	struct niu_parent *parent = np->parent;
 5620	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5621	int first_rdc_table = tp->first_table_num;
 5622	unsigned long i;
 5623	u64 val;
 5624
 5625	nw64_mac(XMAC_ADD_FILT0, 0);
 5626	nw64_mac(XMAC_ADD_FILT1, 0);
 5627	nw64_mac(XMAC_ADD_FILT2, 0);
 5628	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5629	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5630	for (i = 0; i < MAC_NUM_HASH; i++)
 5631		nw64_mac(XMAC_HASH_TBL(i), 0);
 5632	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5633	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5634	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5635
 5636	val = nr64_mac(XMAC_CONFIG);
 5637	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5638		 XMAC_CONFIG_PROMISCUOUS |
 5639		 XMAC_CONFIG_PROMISC_GROUP |
 5640		 XMAC_CONFIG_ERR_CHK_DIS |
 5641		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5642		 XMAC_CONFIG_RESERVED_MULTICAST |
 5643		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5644		 XMAC_CONFIG_ADDR_FILTER_EN |
 5645		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5646		 XMAC_CONFIG_STRIP_CRC |
 5647		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5648		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5649	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5650	nw64_mac(XMAC_CONFIG, val);
 5651
 5652	nw64_mac(RXMAC_BT_CNT, 0);
 5653	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5654	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5655	nw64_mac(RXMAC_FRAG_CNT, 0);
 5656	nw64_mac(RXMAC_HIST_CNT1, 0);
 5657	nw64_mac(RXMAC_HIST_CNT2, 0);
 5658	nw64_mac(RXMAC_HIST_CNT3, 0);
 5659	nw64_mac(RXMAC_HIST_CNT4, 0);
 5660	nw64_mac(RXMAC_HIST_CNT5, 0);
 5661	nw64_mac(RXMAC_HIST_CNT6, 0);
 5662	nw64_mac(RXMAC_HIST_CNT7, 0);
 5663	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5664	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5665	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5666	nw64_mac(LINK_FAULT_CNT, 0);
 5667}
 5668
 5669static void niu_init_rx_bmac(struct niu *np)
 5670{
 5671	struct niu_parent *parent = np->parent;
 5672	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5673	int first_rdc_table = tp->first_table_num;
 5674	unsigned long i;
 5675	u64 val;
 5676
 5677	nw64_mac(BMAC_ADD_FILT0, 0);
 5678	nw64_mac(BMAC_ADD_FILT1, 0);
 5679	nw64_mac(BMAC_ADD_FILT2, 0);
 5680	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5681	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5682	for (i = 0; i < MAC_NUM_HASH; i++)
 5683		nw64_mac(BMAC_HASH_TBL(i), 0);
 5684	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5685	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5686	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5687
 5688	val = nr64_mac(BRXMAC_CONFIG);
 5689	val &= ~(BRXMAC_CONFIG_ENABLE |
 5690		 BRXMAC_CONFIG_STRIP_PAD |
 5691		 BRXMAC_CONFIG_STRIP_FCS |
 5692		 BRXMAC_CONFIG_PROMISC |
 5693		 BRXMAC_CONFIG_PROMISC_GRP |
 5694		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5695		 BRXMAC_CONFIG_DISCARD_DIS);
 5696	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5697	nw64_mac(BRXMAC_CONFIG, val);
 5698
 5699	val = nr64_mac(BMAC_ADDR_CMPEN);
 5700	val |= BMAC_ADDR_CMPEN_EN0;
 5701	nw64_mac(BMAC_ADDR_CMPEN, val);
 5702}
 5703
 5704static void niu_init_rx_mac(struct niu *np)
 5705{
 5706	niu_set_primary_mac(np, np->dev->dev_addr);
 5707
 5708	if (np->flags & NIU_FLAGS_XMAC)
 5709		niu_init_rx_xmac(np);
 5710	else
 5711		niu_init_rx_bmac(np);
 5712}
 5713
 5714static void niu_enable_tx_xmac(struct niu *np, int on)
 5715{
 5716	u64 val = nr64_mac(XMAC_CONFIG);
 5717
 5718	if (on)
 5719		val |= XMAC_CONFIG_TX_ENABLE;
 5720	else
 5721		val &= ~XMAC_CONFIG_TX_ENABLE;
 5722	nw64_mac(XMAC_CONFIG, val);
 5723}
 5724
 5725static void niu_enable_tx_bmac(struct niu *np, int on)
 5726{
 5727	u64 val = nr64_mac(BTXMAC_CONFIG);
 5728
 5729	if (on)
 5730		val |= BTXMAC_CONFIG_ENABLE;
 5731	else
 5732		val &= ~BTXMAC_CONFIG_ENABLE;
 5733	nw64_mac(BTXMAC_CONFIG, val);
 5734}
 5735
 5736static void niu_enable_tx_mac(struct niu *np, int on)
 5737{
 5738	if (np->flags & NIU_FLAGS_XMAC)
 5739		niu_enable_tx_xmac(np, on);
 5740	else
 5741		niu_enable_tx_bmac(np, on);
 5742}
 5743
 5744static void niu_enable_rx_xmac(struct niu *np, int on)
 5745{
 5746	u64 val = nr64_mac(XMAC_CONFIG);
 5747
 5748	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5749		 XMAC_CONFIG_PROMISCUOUS);
 5750
 5751	if (np->flags & NIU_FLAGS_MCAST)
 5752		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5753	if (np->flags & NIU_FLAGS_PROMISC)
 5754		val |= XMAC_CONFIG_PROMISCUOUS;
 5755
 5756	if (on)
 5757		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5758	else
 5759		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5760	nw64_mac(XMAC_CONFIG, val);
 5761}
 5762
 5763static void niu_enable_rx_bmac(struct niu *np, int on)
 5764{
 5765	u64 val = nr64_mac(BRXMAC_CONFIG);
 5766
 5767	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5768		 BRXMAC_CONFIG_PROMISC);
 5769
 5770	if (np->flags & NIU_FLAGS_MCAST)
 5771		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5772	if (np->flags & NIU_FLAGS_PROMISC)
 5773		val |= BRXMAC_CONFIG_PROMISC;
 5774
 5775	if (on)
 5776		val |= BRXMAC_CONFIG_ENABLE;
 5777	else
 5778		val &= ~BRXMAC_CONFIG_ENABLE;
 5779	nw64_mac(BRXMAC_CONFIG, val);
 5780}
 5781
 5782static void niu_enable_rx_mac(struct niu *np, int on)
 5783{
 5784	if (np->flags & NIU_FLAGS_XMAC)
 5785		niu_enable_rx_xmac(np, on);
 5786	else
 5787		niu_enable_rx_bmac(np, on);
 5788}
 5789
 5790static int niu_init_mac(struct niu *np)
 5791{
 5792	int err;
 5793
 5794	niu_init_xif(np);
 5795	err = niu_init_pcs(np);
 5796	if (err)
 5797		return err;
 5798
 5799	err = niu_reset_tx_mac(np);
 5800	if (err)
 5801		return err;
 5802	niu_init_tx_mac(np);
 5803	err = niu_reset_rx_mac(np);
 5804	if (err)
 5805		return err;
 5806	niu_init_rx_mac(np);
 5807
 5808	/* This looks hookey but the RX MAC reset we just did will
 5809	 * undo some of the state we setup in niu_init_tx_mac() so we
 5810	 * have to call it again.  In particular, the RX MAC reset will
 5811	 * set the XMAC_MAX register back to it's default value.
 5812	 */
 5813	niu_init_tx_mac(np);
 5814	niu_enable_tx_mac(np, 1);
 5815
 5816	niu_enable_rx_mac(np, 1);
 5817
 5818	return 0;
 5819}
 5820
 5821static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5822{
 5823	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5824}
 5825
 5826static void niu_stop_tx_channels(struct niu *np)
 5827{
 5828	int i;
 5829
 5830	for (i = 0; i < np->num_tx_rings; i++) {
 5831		struct tx_ring_info *rp = &np->tx_rings[i];
 5832
 5833		niu_stop_one_tx_channel(np, rp);
 5834	}
 5835}
 5836
 5837static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5838{
 5839	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5840}
 5841
 5842static void niu_reset_tx_channels(struct niu *np)
 5843{
 5844	int i;
 5845
 5846	for (i = 0; i < np->num_tx_rings; i++) {
 5847		struct tx_ring_info *rp = &np->tx_rings[i];
 5848
 5849		niu_reset_one_tx_channel(np, rp);
 5850	}
 5851}
 5852
 5853static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5854{
 5855	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5856}
 5857
 5858static void niu_stop_rx_channels(struct niu *np)
 5859{
 5860	int i;
 5861
 5862	for (i = 0; i < np->num_rx_rings; i++) {
 5863		struct rx_ring_info *rp = &np->rx_rings[i];
 5864
 5865		niu_stop_one_rx_channel(np, rp);
 5866	}
 5867}
 5868
 5869static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5870{
 5871	int channel = rp->rx_channel;
 5872
 5873	(void) niu_rx_channel_reset(np, channel);
 5874	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5875	nw64(RX_DMA_CTL_STAT(channel), 0);
 5876	(void) niu_enable_rx_channel(np, channel, 0);
 5877}
 5878
 5879static void niu_reset_rx_channels(struct niu *np)
 5880{
 5881	int i;
 5882
 5883	for (i = 0; i < np->num_rx_rings; i++) {
 5884		struct rx_ring_info *rp = &np->rx_rings[i];
 5885
 5886		niu_reset_one_rx_channel(np, rp);
 5887	}
 5888}
 5889
 5890static void niu_disable_ipp(struct niu *np)
 5891{
 5892	u64 rd, wr, val;
 5893	int limit;
 5894
 5895	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5896	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5897	limit = 100;
 5898	while (--limit >= 0 && (rd != wr)) {
 5899		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5900		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5901	}
 5902	if (limit < 0 &&
 5903	    (rd != 0 && wr != 1)) {
 5904		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5905			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5906			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5907	}
 5908
 5909	val = nr64_ipp(IPP_CFIG);
 5910	val &= ~(IPP_CFIG_IPP_ENABLE |
 5911		 IPP_CFIG_DFIFO_ECC_EN |
 5912		 IPP_CFIG_DROP_BAD_CRC |
 5913		 IPP_CFIG_CKSUM_EN);
 5914	nw64_ipp(IPP_CFIG, val);
 5915
 5916	(void) niu_ipp_reset(np);
 5917}
 5918
 5919static int niu_init_hw(struct niu *np)
 5920{
 5921	int i, err;
 5922
 5923	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5924	niu_txc_enable_port(np, 1);
 5925	niu_txc_port_dma_enable(np, 1);
 5926	niu_txc_set_imask(np, 0);
 5927
 5928	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5929	for (i = 0; i < np->num_tx_rings; i++) {
 5930		struct tx_ring_info *rp = &np->tx_rings[i];
 5931
 5932		err = niu_init_one_tx_channel(np, rp);
 5933		if (err)
 5934			return err;
 5935	}
 5936
 5937	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5938	err = niu_init_rx_channels(np);
 5939	if (err)
 5940		goto out_uninit_tx_channels;
 5941
 5942	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5943	err = niu_init_classifier_hw(np);
 5944	if (err)
 5945		goto out_uninit_rx_channels;
 5946
 5947	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5948	err = niu_init_zcp(np);
 5949	if (err)
 5950		goto out_uninit_rx_channels;
 5951
 5952	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5953	err = niu_init_ipp(np);
 5954	if (err)
 5955		goto out_uninit_rx_channels;
 5956
 5957	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5958	err = niu_init_mac(np);
 5959	if (err)
 5960		goto out_uninit_ipp;
 5961
 5962	return 0;
 5963
 5964out_uninit_ipp:
 5965	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5966	niu_disable_ipp(np);
 5967
 5968out_uninit_rx_channels:
 5969	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 5970	niu_stop_rx_channels(np);
 5971	niu_reset_rx_channels(np);
 5972
 5973out_uninit_tx_channels:
 5974	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 5975	niu_stop_tx_channels(np);
 5976	niu_reset_tx_channels(np);
 5977
 5978	return err;
 5979}
 5980
 5981static void niu_stop_hw(struct niu *np)
 5982{
 5983	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 5984	niu_enable_interrupts(np, 0);
 5985
 5986	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 5987	niu_enable_rx_mac(np, 0);
 5988
 5989	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 5990	niu_disable_ipp(np);
 5991
 5992	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 5993	niu_stop_tx_channels(np);
 5994
 5995	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 5996	niu_stop_rx_channels(np);
 5997
 5998	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 5999	niu_reset_tx_channels(np);
 6000
 6001	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6002	niu_reset_rx_channels(np);
 6003}
 6004
 6005static void niu_set_irq_name(struct niu *np)
 6006{
 6007	int port = np->port;
 6008	int i, j = 1;
 6009
 6010	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6011
 6012	if (port == 0) {
 6013		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6014		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6015		j = 3;
 6016	}
 6017
 6018	for (i = 0; i < np->num_ldg - j; i++) {
 6019		if (i < np->num_rx_rings)
 6020			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6021				np->dev->name, i);
 6022		else if (i < np->num_tx_rings + np->num_rx_rings)
 6023			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6024				i - np->num_rx_rings);
 6025	}
 6026}
 6027
 6028static int niu_request_irq(struct niu *np)
 6029{
 6030	int i, j, err;
 6031
 6032	niu_set_irq_name(np);
 6033
 6034	err = 0;
 6035	for (i = 0; i < np->num_ldg; i++) {
 6036		struct niu_ldg *lp = &np->ldg[i];
 6037
 6038		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6039				  np->irq_name[i], lp);
 6040		if (err)
 6041			goto out_free_irqs;
 6042
 6043	}
 6044
 6045	return 0;
 6046
 6047out_free_irqs:
 6048	for (j = 0; j < i; j++) {
 6049		struct niu_ldg *lp = &np->ldg[j];
 6050
 6051		free_irq(lp->irq, lp);
 6052	}
 6053	return err;
 6054}
 6055
 6056static void niu_free_irq(struct niu *np)
 6057{
 6058	int i;
 6059
 6060	for (i = 0; i < np->num_ldg; i++) {
 6061		struct niu_ldg *lp = &np->ldg[i];
 6062
 6063		free_irq(lp->irq, lp);
 6064	}
 6065}
 6066
 6067static void niu_enable_napi(struct niu *np)
 6068{
 6069	int i;
 6070
 6071	for (i = 0; i < np->num_ldg; i++)
 6072		napi_enable(&np->ldg[i].napi);
 6073}
 6074
 6075static void niu_disable_napi(struct niu *np)
 6076{
 6077	int i;
 6078
 6079	for (i = 0; i < np->num_ldg; i++)
 6080		napi_disable(&np->ldg[i].napi);
 6081}
 6082
 6083static int niu_open(struct net_device *dev)
 6084{
 6085	struct niu *np = netdev_priv(dev);
 6086	int err;
 6087
 6088	netif_carrier_off(dev);
 6089
 6090	err = niu_alloc_channels(np);
 6091	if (err)
 6092		goto out_err;
 6093
 6094	err = niu_enable_interrupts(np, 0);
 6095	if (err)
 6096		goto out_free_channels;
 6097
 6098	err = niu_request_irq(np);
 6099	if (err)
 6100		goto out_free_channels;
 6101
 6102	niu_enable_napi(np);
 6103
 6104	spin_lock_irq(&np->lock);
 6105
 6106	err = niu_init_hw(np);
 6107	if (!err) {
 6108		timer_setup(&np->timer, niu_timer, 0);
 6109		np->timer.expires = jiffies + HZ;
 6110
 6111		err = niu_enable_interrupts(np, 1);
 6112		if (err)
 6113			niu_stop_hw(np);
 6114	}
 6115
 6116	spin_unlock_irq(&np->lock);
 6117
 6118	if (err) {
 6119		niu_disable_napi(np);
 6120		goto out_free_irq;
 6121	}
 6122
 6123	netif_tx_start_all_queues(dev);
 6124
 6125	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6126		netif_carrier_on(dev);
 6127
 6128	add_timer(&np->timer);
 6129
 6130	return 0;
 6131
 6132out_free_irq:
 6133	niu_free_irq(np);
 6134
 6135out_free_channels:
 6136	niu_free_channels(np);
 6137
 6138out_err:
 6139	return err;
 6140}
 6141
 6142static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6143{
 6144	cancel_work_sync(&np->reset_task);
 6145
 6146	niu_disable_napi(np);
 6147	netif_tx_stop_all_queues(dev);
 6148
 6149	del_timer_sync(&np->timer);
 6150
 6151	spin_lock_irq(&np->lock);
 6152
 6153	niu_stop_hw(np);
 6154
 6155	spin_unlock_irq(&np->lock);
 6156}
 6157
 6158static int niu_close(struct net_device *dev)
 6159{
 6160	struct niu *np = netdev_priv(dev);
 6161
 6162	niu_full_shutdown(np, dev);
 6163
 6164	niu_free_irq(np);
 6165
 6166	niu_free_channels(np);
 6167
 6168	niu_handle_led(np, 0);
 6169
 6170	return 0;
 6171}
 6172
 6173static void niu_sync_xmac_stats(struct niu *np)
 6174{
 6175	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6176
 6177	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6178	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6179
 6180	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6181	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6182	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6183	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6184	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6185	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6186	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6187	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6188	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6189	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6190	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6191	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6192	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6193	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6194	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6195	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6196}
 6197
 6198static void niu_sync_bmac_stats(struct niu *np)
 6199{
 6200	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6201
 6202	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6203	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6204
 6205	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6206	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6207	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6208	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6209}
 6210
 6211static void niu_sync_mac_stats(struct niu *np)
 6212{
 6213	if (np->flags & NIU_FLAGS_XMAC)
 6214		niu_sync_xmac_stats(np);
 6215	else
 6216		niu_sync_bmac_stats(np);
 6217}
 6218
 6219static void niu_get_rx_stats(struct niu *np,
 6220			     struct rtnl_link_stats64 *stats)
 6221{
 6222	u64 pkts, dropped, errors, bytes;
 6223	struct rx_ring_info *rx_rings;
 6224	int i;
 6225
 6226	pkts = dropped = errors = bytes = 0;
 6227
 6228	rx_rings = READ_ONCE(np->rx_rings);
 6229	if (!rx_rings)
 6230		goto no_rings;
 6231
 6232	for (i = 0; i < np->num_rx_rings; i++) {
 6233		struct rx_ring_info *rp = &rx_rings[i];
 6234
 6235		niu_sync_rx_discard_stats(np, rp, 0);
 6236
 6237		pkts += rp->rx_packets;
 6238		bytes += rp->rx_bytes;
 6239		dropped += rp->rx_dropped;
 6240		errors += rp->rx_errors;
 6241	}
 6242
 6243no_rings:
 6244	stats->rx_packets = pkts;
 6245	stats->rx_bytes = bytes;
 6246	stats->rx_dropped = dropped;
 6247	stats->rx_errors = errors;
 6248}
 6249
 6250static void niu_get_tx_stats(struct niu *np,
 6251			     struct rtnl_link_stats64 *stats)
 6252{
 6253	u64 pkts, errors, bytes;
 6254	struct tx_ring_info *tx_rings;
 6255	int i;
 6256
 6257	pkts = errors = bytes = 0;
 6258
 6259	tx_rings = READ_ONCE(np->tx_rings);
 6260	if (!tx_rings)
 6261		goto no_rings;
 6262
 6263	for (i = 0; i < np->num_tx_rings; i++) {
 6264		struct tx_ring_info *rp = &tx_rings[i];
 6265
 6266		pkts += rp->tx_packets;
 6267		bytes += rp->tx_bytes;
 6268		errors += rp->tx_errors;
 6269	}
 6270
 6271no_rings:
 6272	stats->tx_packets = pkts;
 6273	stats->tx_bytes = bytes;
 6274	stats->tx_errors = errors;
 6275}
 6276
 6277static void niu_get_stats(struct net_device *dev,
 6278			  struct rtnl_link_stats64 *stats)
 6279{
 6280	struct niu *np = netdev_priv(dev);
 6281
 6282	if (netif_running(dev)) {
 6283		niu_get_rx_stats(np, stats);
 6284		niu_get_tx_stats(np, stats);
 6285	}
 6286}
 6287
 6288static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6289{
 6290	int i;
 6291
 6292	for (i = 0; i < 16; i++)
 6293		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6294}
 6295
 6296static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6297{
 6298	int i;
 6299
 6300	for (i = 0; i < 16; i++)
 6301		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6302}
 6303
 6304static void niu_load_hash(struct niu *np, u16 *hash)
 6305{
 6306	if (np->flags & NIU_FLAGS_XMAC)
 6307		niu_load_hash_xmac(np, hash);
 6308	else
 6309		niu_load_hash_bmac(np, hash);
 6310}
 6311
 6312static void niu_set_rx_mode(struct net_device *dev)
 6313{
 6314	struct niu *np = netdev_priv(dev);
 6315	int i, alt_cnt, err;
 6316	struct netdev_hw_addr *ha;
 6317	unsigned long flags;
 6318	u16 hash[16] = { 0, };
 6319
 6320	spin_lock_irqsave(&np->lock, flags);
 6321	niu_enable_rx_mac(np, 0);
 6322
 6323	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6324	if (dev->flags & IFF_PROMISC)
 6325		np->flags |= NIU_FLAGS_PROMISC;
 6326	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6327		np->flags |= NIU_FLAGS_MCAST;
 6328
 6329	alt_cnt = netdev_uc_count(dev);
 6330	if (alt_cnt > niu_num_alt_addr(np)) {
 6331		alt_cnt = 0;
 6332		np->flags |= NIU_FLAGS_PROMISC;
 6333	}
 6334
 6335	if (alt_cnt) {
 6336		int index = 0;
 6337
 6338		netdev_for_each_uc_addr(ha, dev) {
 6339			err = niu_set_alt_mac(np, index, ha->addr);
 6340			if (err)
 6341				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6342					    err, index);
 6343			err = niu_enable_alt_mac(np, index, 1);
 6344			if (err)
 6345				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6346					    err, index);
 6347
 6348			index++;
 6349		}
 6350	} else {
 6351		int alt_start;
 6352		if (np->flags & NIU_FLAGS_XMAC)
 6353			alt_start = 0;
 6354		else
 6355			alt_start = 1;
 6356		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6357			err = niu_enable_alt_mac(np, i, 0);
 6358			if (err)
 6359				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6360					    err, i);
 6361		}
 6362	}
 6363	if (dev->flags & IFF_ALLMULTI) {
 6364		for (i = 0; i < 16; i++)
 6365			hash[i] = 0xffff;
 6366	} else if (!netdev_mc_empty(dev)) {
 6367		netdev_for_each_mc_addr(ha, dev) {
 6368			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6369
 6370			crc >>= 24;
 6371			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6372		}
 6373	}
 6374
 6375	if (np->flags & NIU_FLAGS_MCAST)
 6376		niu_load_hash(np, hash);
 6377
 6378	niu_enable_rx_mac(np, 1);
 6379	spin_unlock_irqrestore(&np->lock, flags);
 6380}
 6381
 6382static int niu_set_mac_addr(struct net_device *dev, void *p)
 6383{
 6384	struct niu *np = netdev_priv(dev);
 6385	struct sockaddr *addr = p;
 6386	unsigned long flags;
 6387
 6388	if (!is_valid_ether_addr(addr->sa_data))
 6389		return -EADDRNOTAVAIL;
 6390
 6391	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6392
 6393	if (!netif_running(dev))
 6394		return 0;
 6395
 6396	spin_lock_irqsave(&np->lock, flags);
 6397	niu_enable_rx_mac(np, 0);
 6398	niu_set_primary_mac(np, dev->dev_addr);
 6399	niu_enable_rx_mac(np, 1);
 6400	spin_unlock_irqrestore(&np->lock, flags);
 6401
 6402	return 0;
 6403}
 6404
 6405static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6406{
 6407	return -EOPNOTSUPP;
 6408}
 6409
 6410static void niu_netif_stop(struct niu *np)
 6411{
 6412	netif_trans_update(np->dev);	/* prevent tx timeout */
 6413
 6414	niu_disable_napi(np);
 6415
 6416	netif_tx_disable(np->dev);
 6417}
 6418
 6419static void niu_netif_start(struct niu *np)
 6420{
 6421	/* NOTE: unconditional netif_wake_queue is only appropriate
 6422	 * so long as all callers are assured to have free tx slots
 6423	 * (such as after niu_init_hw).
 6424	 */
 6425	netif_tx_wake_all_queues(np->dev);
 6426
 6427	niu_enable_napi(np);
 6428
 6429	niu_enable_interrupts(np, 1);
 6430}
 6431
 6432static void niu_reset_buffers(struct niu *np)
 6433{
 6434	int i, j, k, err;
 6435
 6436	if (np->rx_rings) {
 6437		for (i = 0; i < np->num_rx_rings; i++) {
 6438			struct rx_ring_info *rp = &np->rx_rings[i];
 6439
 6440			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6441				struct page *page;
 6442
 6443				page = rp->rxhash[j];
 6444				while (page) {
 6445					struct page *next =
 6446						(struct page *) page->mapping;
 6447					u64 base = page->index;
 6448					base = base >> RBR_DESCR_ADDR_SHIFT;
 6449					rp->rbr[k++] = cpu_to_le32(base);
 6450					page = next;
 6451				}
 6452			}
 6453			for (; k < MAX_RBR_RING_SIZE; k++) {
 6454				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6455				if (unlikely(err))
 6456					break;
 6457			}
 6458
 6459			rp->rbr_index = rp->rbr_table_size - 1;
 6460			rp->rcr_index = 0;
 6461			rp->rbr_pending = 0;
 6462			rp->rbr_refill_pending = 0;
 6463		}
 6464	}
 6465	if (np->tx_rings) {
 6466		for (i = 0; i < np->num_tx_rings; i++) {
 6467			struct tx_ring_info *rp = &np->tx_rings[i];
 6468
 6469			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6470				if (rp->tx_buffs[j].skb)
 6471					(void) release_tx_packet(np, rp, j);
 6472			}
 6473
 6474			rp->pending = MAX_TX_RING_SIZE;
 6475			rp->prod = 0;
 6476			rp->cons = 0;
 6477			rp->wrap_bit = 0;
 6478		}
 6479	}
 6480}
 6481
 6482static void niu_reset_task(struct work_struct *work)
 6483{
 6484	struct niu *np = container_of(work, struct niu, reset_task);
 6485	unsigned long flags;
 6486	int err;
 6487
 6488	spin_lock_irqsave(&np->lock, flags);
 6489	if (!netif_running(np->dev)) {
 6490		spin_unlock_irqrestore(&np->lock, flags);
 6491		return;
 6492	}
 6493
 6494	spin_unlock_irqrestore(&np->lock, flags);
 6495
 6496	del_timer_sync(&np->timer);
 6497
 6498	niu_netif_stop(np);
 6499
 6500	spin_lock_irqsave(&np->lock, flags);
 6501
 6502	niu_stop_hw(np);
 6503
 6504	spin_unlock_irqrestore(&np->lock, flags);
 6505
 6506	niu_reset_buffers(np);
 6507
 6508	spin_lock_irqsave(&np->lock, flags);
 6509
 6510	err = niu_init_hw(np);
 6511	if (!err) {
 6512		np->timer.expires = jiffies + HZ;
 6513		add_timer(&np->timer);
 6514		niu_netif_start(np);
 6515	}
 6516
 6517	spin_unlock_irqrestore(&np->lock, flags);
 6518}
 6519
 6520static void niu_tx_timeout(struct net_device *dev)
 6521{
 6522	struct niu *np = netdev_priv(dev);
 6523
 6524	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6525		dev->name);
 6526
 6527	schedule_work(&np->reset_task);
 6528}
 6529
 6530static void niu_set_txd(struct tx_ring_info *rp, int index,
 6531			u64 mapping, u64 len, u64 mark,
 6532			u64 n_frags)
 6533{
 6534	__le64 *desc = &rp->descr[index];
 6535
 6536	*desc = cpu_to_le64(mark |
 6537			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6538			    (len << TX_DESC_TR_LEN_SHIFT) |
 6539			    (mapping & TX_DESC_SAD));
 6540}
 6541
 6542static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6543				u64 pad_bytes, u64 len)
 6544{
 6545	u16 eth_proto, eth_proto_inner;
 6546	u64 csum_bits, l3off, ihl, ret;
 6547	u8 ip_proto;
 6548	int ipv6;
 6549
 6550	eth_proto = be16_to_cpu(ehdr->h_proto);
 6551	eth_proto_inner = eth_proto;
 6552	if (eth_proto == ETH_P_8021Q) {
 6553		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6554		__be16 val = vp->h_vlan_encapsulated_proto;
 6555
 6556		eth_proto_inner = be16_to_cpu(val);
 6557	}
 6558
 6559	ipv6 = ihl = 0;
 6560	switch (skb->protocol) {
 6561	case cpu_to_be16(ETH_P_IP):
 6562		ip_proto = ip_hdr(skb)->protocol;
 6563		ihl = ip_hdr(skb)->ihl;
 6564		break;
 6565	case cpu_to_be16(ETH_P_IPV6):
 6566		ip_proto = ipv6_hdr(skb)->nexthdr;
 6567		ihl = (40 >> 2);
 6568		ipv6 = 1;
 6569		break;
 6570	default:
 6571		ip_proto = ihl = 0;
 6572		break;
 6573	}
 6574
 6575	csum_bits = TXHDR_CSUM_NONE;
 6576	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6577		u64 start, stuff;
 6578
 6579		csum_bits = (ip_proto == IPPROTO_TCP ?
 6580			     TXHDR_CSUM_TCP :
 6581			     (ip_proto == IPPROTO_UDP ?
 6582			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6583
 6584		start = skb_checksum_start_offset(skb) -
 6585			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6586		stuff = start + skb->csum_offset;
 6587
 6588		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6589		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6590	}
 6591
 6592	l3off = skb_network_offset(skb) -
 6593		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6594
 6595	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6596	       (len << TXHDR_LEN_SHIFT) |
 6597	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6598	       (ihl << TXHDR_IHL_SHIFT) |
 6599	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
 6600	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6601	       (ipv6 ? TXHDR_IP_VER : 0) |
 6602	       csum_bits);
 6603
 6604	return ret;
 6605}
 6606
 6607static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6608				  struct net_device *dev)
 6609{
 6610	struct niu *np = netdev_priv(dev);
 6611	unsigned long align, headroom;
 6612	struct netdev_queue *txq;
 6613	struct tx_ring_info *rp;
 6614	struct tx_pkt_hdr *tp;
 6615	unsigned int len, nfg;
 6616	struct ethhdr *ehdr;
 6617	int prod, i, tlen;
 6618	u64 mapping, mrk;
 6619
 6620	i = skb_get_queue_mapping(skb);
 6621	rp = &np->tx_rings[i];
 6622	txq = netdev_get_tx_queue(dev, i);
 6623
 6624	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6625		netif_tx_stop_queue(txq);
 6626		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6627		rp->tx_errors++;
 6628		return NETDEV_TX_BUSY;
 6629	}
 6630
 6631	if (eth_skb_pad(skb))
 6632		goto out;
 6633
 6634	len = sizeof(struct tx_pkt_hdr) + 15;
 6635	if (skb_headroom(skb) < len) {
 6636		struct sk_buff *skb_new;
 6637
 6638		skb_new = skb_realloc_headroom(skb, len);
 6639		if (!skb_new)
 6640			goto out_drop;
 6641		kfree_skb(skb);
 6642		skb = skb_new;
 6643	} else
 6644		skb_orphan(skb);
 6645
 6646	align = ((unsigned long) skb->data & (16 - 1));
 6647	headroom = align + sizeof(struct tx_pkt_hdr);
 6648
 6649	ehdr = (struct ethhdr *) skb->data;
 6650	tp = skb_push(skb, headroom);
 6651
 6652	len = skb->len - sizeof(struct tx_pkt_hdr);
 6653	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6654	tp->resv = 0;
 6655
 6656	len = skb_headlen(skb);
 6657	mapping = np->ops->map_single(np->device, skb->data,
 6658				      len, DMA_TO_DEVICE);
 6659
 6660	prod = rp->prod;
 6661
 6662	rp->tx_buffs[prod].skb = skb;
 6663	rp->tx_buffs[prod].mapping = mapping;
 6664
 6665	mrk = TX_DESC_SOP;
 6666	if (++rp->mark_counter == rp->mark_freq) {
 6667		rp->mark_counter = 0;
 6668		mrk |= TX_DESC_MARK;
 6669		rp->mark_pending++;
 6670	}
 6671
 6672	tlen = len;
 6673	nfg = skb_shinfo(skb)->nr_frags;
 6674	while (tlen > 0) {
 6675		tlen -= MAX_TX_DESC_LEN;
 6676		nfg++;
 6677	}
 6678
 6679	while (len > 0) {
 6680		unsigned int this_len = len;
 6681
 6682		if (this_len > MAX_TX_DESC_LEN)
 6683			this_len = MAX_TX_DESC_LEN;
 6684
 6685		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6686		mrk = nfg = 0;
 6687
 6688		prod = NEXT_TX(rp, prod);
 6689		mapping += this_len;
 6690		len -= this_len;
 6691	}
 6692
 6693	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6694		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6695
 6696		len = skb_frag_size(frag);
 6697		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
 6698					    skb_frag_off(frag), len,
 6699					    DMA_TO_DEVICE);
 6700
 6701		rp->tx_buffs[prod].skb = NULL;
 6702		rp->tx_buffs[prod].mapping = mapping;
 6703
 6704		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6705
 6706		prod = NEXT_TX(rp, prod);
 6707	}
 6708
 6709	if (prod < rp->prod)
 6710		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6711	rp->prod = prod;
 6712
 6713	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6714
 6715	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6716		netif_tx_stop_queue(txq);
 6717		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6718			netif_tx_wake_queue(txq);
 6719	}
 6720
 6721out:
 6722	return NETDEV_TX_OK;
 6723
 6724out_drop:
 6725	rp->tx_errors++;
 6726	kfree_skb(skb);
 6727	goto out;
 6728}
 6729
 6730static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6731{
 6732	struct niu *np = netdev_priv(dev);
 6733	int err, orig_jumbo, new_jumbo;
 6734
 6735	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6736	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6737
 6738	dev->mtu = new_mtu;
 6739
 6740	if (!netif_running(dev) ||
 6741	    (orig_jumbo == new_jumbo))
 6742		return 0;
 6743
 6744	niu_full_shutdown(np, dev);
 6745
 6746	niu_free_channels(np);
 6747
 6748	niu_enable_napi(np);
 6749
 6750	err = niu_alloc_channels(np);
 6751	if (err)
 6752		return err;
 6753
 6754	spin_lock_irq(&np->lock);
 6755
 6756	err = niu_init_hw(np);
 6757	if (!err) {
 6758		timer_setup(&np->timer, niu_timer, 0);
 6759		np->timer.expires = jiffies + HZ;
 6760
 6761		err = niu_enable_interrupts(np, 1);
 6762		if (err)
 6763			niu_stop_hw(np);
 6764	}
 6765
 6766	spin_unlock_irq(&np->lock);
 6767
 6768	if (!err) {
 6769		netif_tx_start_all_queues(dev);
 6770		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6771			netif_carrier_on(dev);
 6772
 6773		add_timer(&np->timer);
 6774	}
 6775
 6776	return err;
 6777}
 6778
 6779static void niu_get_drvinfo(struct net_device *dev,
 6780			    struct ethtool_drvinfo *info)
 6781{
 6782	struct niu *np = netdev_priv(dev);
 6783	struct niu_vpd *vpd = &np->vpd;
 6784
 6785	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 6786	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 6787	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 6788		vpd->fcode_major, vpd->fcode_minor);
 6789	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6790		strlcpy(info->bus_info, pci_name(np->pdev),
 6791			sizeof(info->bus_info));
 6792}
 6793
 6794static int niu_get_link_ksettings(struct net_device *dev,
 6795				  struct ethtool_link_ksettings *cmd)
 6796{
 6797	struct niu *np = netdev_priv(dev);
 6798	struct niu_link_config *lp;
 6799
 6800	lp = &np->link_config;
 6801
 6802	memset(cmd, 0, sizeof(*cmd));
 6803	cmd->base.phy_address = np->phy_addr;
 6804	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 6805						lp->supported);
 6806	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 6807						lp->active_advertising);
 6808	cmd->base.autoneg = lp->active_autoneg;
 6809	cmd->base.speed = lp->active_speed;
 6810	cmd->base.duplex = lp->active_duplex;
 6811	cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6812
 6813	return 0;
 6814}
 6815
 6816static int niu_set_link_ksettings(struct net_device *dev,
 6817				  const struct ethtool_link_ksettings *cmd)
 6818{
 6819	struct niu *np = netdev_priv(dev);
 6820	struct niu_link_config *lp = &np->link_config;
 6821
 6822	ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
 6823						cmd->link_modes.advertising);
 6824	lp->speed = cmd->base.speed;
 6825	lp->duplex = cmd->base.duplex;
 6826	lp->autoneg = cmd->base.autoneg;
 6827	return niu_init_link(np);
 6828}
 6829
 6830static u32 niu_get_msglevel(struct net_device *dev)
 6831{
 6832	struct niu *np = netdev_priv(dev);
 6833	return np->msg_enable;
 6834}
 6835
 6836static void niu_set_msglevel(struct net_device *dev, u32 value)
 6837{
 6838	struct niu *np = netdev_priv(dev);
 6839	np->msg_enable = value;
 6840}
 6841
 6842static int niu_nway_reset(struct net_device *dev)
 6843{
 6844	struct niu *np = netdev_priv(dev);
 6845
 6846	if (np->link_config.autoneg)
 6847		return niu_init_link(np);
 6848
 6849	return 0;
 6850}
 6851
 6852static int niu_get_eeprom_len(struct net_device *dev)
 6853{
 6854	struct niu *np = netdev_priv(dev);
 6855
 6856	return np->eeprom_len;
 6857}
 6858
 6859static int niu_get_eeprom(struct net_device *dev,
 6860			  struct ethtool_eeprom *eeprom, u8 *data)
 6861{
 6862	struct niu *np = netdev_priv(dev);
 6863	u32 offset, len, val;
 6864
 6865	offset = eeprom->offset;
 6866	len = eeprom->len;
 6867
 6868	if (offset + len < offset)
 6869		return -EINVAL;
 6870	if (offset >= np->eeprom_len)
 6871		return -EINVAL;
 6872	if (offset + len > np->eeprom_len)
 6873		len = eeprom->len = np->eeprom_len - offset;
 6874
 6875	if (offset & 3) {
 6876		u32 b_offset, b_count;
 6877
 6878		b_offset = offset & 3;
 6879		b_count = 4 - b_offset;
 6880		if (b_count > len)
 6881			b_count = len;
 6882
 6883		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6884		memcpy(data, ((char *)&val) + b_offset, b_count);
 6885		data += b_count;
 6886		len -= b_count;
 6887		offset += b_count;
 6888	}
 6889	while (len >= 4) {
 6890		val = nr64(ESPC_NCR(offset / 4));
 6891		memcpy(data, &val, 4);
 6892		data += 4;
 6893		len -= 4;
 6894		offset += 4;
 6895	}
 6896	if (len) {
 6897		val = nr64(ESPC_NCR(offset / 4));
 6898		memcpy(data, &val, len);
 6899	}
 6900	return 0;
 6901}
 6902
 6903static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6904{
 6905	switch (flow_type) {
 6906	case TCP_V4_FLOW:
 6907	case TCP_V6_FLOW:
 6908		*pid = IPPROTO_TCP;
 6909		break;
 6910	case UDP_V4_FLOW:
 6911	case UDP_V6_FLOW:
 6912		*pid = IPPROTO_UDP;
 6913		break;
 6914	case SCTP_V4_FLOW:
 6915	case SCTP_V6_FLOW:
 6916		*pid = IPPROTO_SCTP;
 6917		break;
 6918	case AH_V4_FLOW:
 6919	case AH_V6_FLOW:
 6920		*pid = IPPROTO_AH;
 6921		break;
 6922	case ESP_V4_FLOW:
 6923	case ESP_V6_FLOW:
 6924		*pid = IPPROTO_ESP;
 6925		break;
 6926	default:
 6927		*pid = 0;
 6928		break;
 6929	}
 6930}
 6931
 6932static int niu_class_to_ethflow(u64 class, int *flow_type)
 6933{
 6934	switch (class) {
 6935	case CLASS_CODE_TCP_IPV4:
 6936		*flow_type = TCP_V4_FLOW;
 6937		break;
 6938	case CLASS_CODE_UDP_IPV4:
 6939		*flow_type = UDP_V4_FLOW;
 6940		break;
 6941	case CLASS_CODE_AH_ESP_IPV4:
 6942		*flow_type = AH_V4_FLOW;
 6943		break;
 6944	case CLASS_CODE_SCTP_IPV4:
 6945		*flow_type = SCTP_V4_FLOW;
 6946		break;
 6947	case CLASS_CODE_TCP_IPV6:
 6948		*flow_type = TCP_V6_FLOW;
 6949		break;
 6950	case CLASS_CODE_UDP_IPV6:
 6951		*flow_type = UDP_V6_FLOW;
 6952		break;
 6953	case CLASS_CODE_AH_ESP_IPV6:
 6954		*flow_type = AH_V6_FLOW;
 6955		break;
 6956	case CLASS_CODE_SCTP_IPV6:
 6957		*flow_type = SCTP_V6_FLOW;
 6958		break;
 6959	case CLASS_CODE_USER_PROG1:
 6960	case CLASS_CODE_USER_PROG2:
 6961	case CLASS_CODE_USER_PROG3:
 6962	case CLASS_CODE_USER_PROG4:
 6963		*flow_type = IP_USER_FLOW;
 6964		break;
 6965	default:
 6966		return -EINVAL;
 6967	}
 6968
 6969	return 0;
 6970}
 6971
 6972static int niu_ethflow_to_class(int flow_type, u64 *class)
 6973{
 6974	switch (flow_type) {
 6975	case TCP_V4_FLOW:
 6976		*class = CLASS_CODE_TCP_IPV4;
 6977		break;
 6978	case UDP_V4_FLOW:
 6979		*class = CLASS_CODE_UDP_IPV4;
 6980		break;
 6981	case AH_ESP_V4_FLOW:
 6982	case AH_V4_FLOW:
 6983	case ESP_V4_FLOW:
 6984		*class = CLASS_CODE_AH_ESP_IPV4;
 6985		break;
 6986	case SCTP_V4_FLOW:
 6987		*class = CLASS_CODE_SCTP_IPV4;
 6988		break;
 6989	case TCP_V6_FLOW:
 6990		*class = CLASS_CODE_TCP_IPV6;
 6991		break;
 6992	case UDP_V6_FLOW:
 6993		*class = CLASS_CODE_UDP_IPV6;
 6994		break;
 6995	case AH_ESP_V6_FLOW:
 6996	case AH_V6_FLOW:
 6997	case ESP_V6_FLOW:
 6998		*class = CLASS_CODE_AH_ESP_IPV6;
 6999		break;
 7000	case SCTP_V6_FLOW:
 7001		*class = CLASS_CODE_SCTP_IPV6;
 7002		break;
 7003	default:
 7004		return 0;
 7005	}
 7006
 7007	return 1;
 7008}
 7009
 7010static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7011{
 7012	u64 ethflow = 0;
 7013
 7014	if (flow_key & FLOW_KEY_L2DA)
 7015		ethflow |= RXH_L2DA;
 7016	if (flow_key & FLOW_KEY_VLAN)
 7017		ethflow |= RXH_VLAN;
 7018	if (flow_key & FLOW_KEY_IPSA)
 7019		ethflow |= RXH_IP_SRC;
 7020	if (flow_key & FLOW_KEY_IPDA)
 7021		ethflow |= RXH_IP_DST;
 7022	if (flow_key & FLOW_KEY_PROTO)
 7023		ethflow |= RXH_L3_PROTO;
 7024	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7025		ethflow |= RXH_L4_B_0_1;
 7026	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7027		ethflow |= RXH_L4_B_2_3;
 7028
 7029	return ethflow;
 7030
 7031}
 7032
 7033static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7034{
 7035	u64 key = 0;
 7036
 7037	if (ethflow & RXH_L2DA)
 7038		key |= FLOW_KEY_L2DA;
 7039	if (ethflow & RXH_VLAN)
 7040		key |= FLOW_KEY_VLAN;
 7041	if (ethflow & RXH_IP_SRC)
 7042		key |= FLOW_KEY_IPSA;
 7043	if (ethflow & RXH_IP_DST)
 7044		key |= FLOW_KEY_IPDA;
 7045	if (ethflow & RXH_L3_PROTO)
 7046		key |= FLOW_KEY_PROTO;
 7047	if (ethflow & RXH_L4_B_0_1)
 7048		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7049	if (ethflow & RXH_L4_B_2_3)
 7050		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7051
 7052	*flow_key = key;
 7053
 7054	return 1;
 7055
 7056}
 7057
 7058static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7059{
 7060	u64 class;
 7061
 7062	nfc->data = 0;
 7063
 7064	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7065		return -EINVAL;
 7066
 7067	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7068	    TCAM_KEY_DISC)
 7069		nfc->data = RXH_DISCARD;
 7070	else
 7071		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7072						      CLASS_CODE_USER_PROG1]);
 7073	return 0;
 7074}
 7075
 7076static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7077					struct ethtool_rx_flow_spec *fsp)
 7078{
 7079	u32 tmp;
 7080	u16 prt;
 7081
 7082	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7083	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7084
 7085	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7086	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7087
 7088	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7089	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7090
 7091	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7092	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7093
 7094	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7095		TCAM_V4KEY2_TOS_SHIFT;
 7096	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7097		TCAM_V4KEY2_TOS_SHIFT;
 7098
 7099	switch (fsp->flow_type) {
 7100	case TCP_V4_FLOW:
 7101	case UDP_V4_FLOW:
 7102	case SCTP_V4_FLOW:
 7103		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7104			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7105		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7106
 7107		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7108			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7109		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7110
 7111		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7112			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7113		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7114
 7115		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7116			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7117		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7118		break;
 7119	case AH_V4_FLOW:
 7120	case ESP_V4_FLOW:
 7121		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7122			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7123		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7124
 7125		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7126			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7127		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7128		break;
 7129	case IP_USER_FLOW:
 7130		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7131			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7132		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7133
 7134		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7135			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7136		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7137
 7138		fsp->h_u.usr_ip4_spec.proto =
 7139			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7140			TCAM_V4KEY2_PROTO_SHIFT;
 7141		fsp->m_u.usr_ip4_spec.proto =
 7142			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7143			TCAM_V4KEY2_PROTO_SHIFT;
 7144
 7145		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7146		break;
 7147	default:
 7148		break;
 7149	}
 7150}
 7151
 7152static int niu_get_ethtool_tcam_entry(struct niu *np,
 7153				      struct ethtool_rxnfc *nfc)
 7154{
 7155	struct niu_parent *parent = np->parent;
 7156	struct niu_tcam_entry *tp;
 7157	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7158	u16 idx;
 7159	u64 class;
 7160	int ret = 0;
 7161
 7162	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7163
 7164	tp = &parent->tcam[idx];
 7165	if (!tp->valid) {
 7166		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7167			    parent->index, (u16)nfc->fs.location, idx);
 7168		return -EINVAL;
 7169	}
 7170
 7171	/* fill the flow spec entry */
 7172	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7173		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7174	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7175	if (ret < 0) {
 7176		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7177			    parent->index);
 7178		goto out;
 7179	}
 7180
 7181	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7182		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7183			TCAM_V4KEY2_PROTO_SHIFT;
 7184		if (proto == IPPROTO_ESP) {
 7185			if (fsp->flow_type == AH_V4_FLOW)
 7186				fsp->flow_type = ESP_V4_FLOW;
 7187			else
 7188				fsp->flow_type = ESP_V6_FLOW;
 7189		}
 7190	}
 7191
 7192	switch (fsp->flow_type) {
 7193	case TCP_V4_FLOW:
 7194	case UDP_V4_FLOW:
 7195	case SCTP_V4_FLOW:
 7196	case AH_V4_FLOW:
 7197	case ESP_V4_FLOW:
 7198		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7199		break;
 7200	case TCP_V6_FLOW:
 7201	case UDP_V6_FLOW:
 7202	case SCTP_V6_FLOW:
 7203	case AH_V6_FLOW:
 7204	case ESP_V6_FLOW:
 7205		/* Not yet implemented */
 7206		ret = -EINVAL;
 7207		break;
 7208	case IP_USER_FLOW:
 7209		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7210		break;
 7211	default:
 7212		ret = -EINVAL;
 7213		break;
 7214	}
 7215
 7216	if (ret < 0)
 7217		goto out;
 7218
 7219	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7220		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7221	else
 7222		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7223			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7224
 7225	/* put the tcam size here */
 7226	nfc->data = tcam_get_size(np);
 7227out:
 7228	return ret;
 7229}
 7230
 7231static int niu_get_ethtool_tcam_all(struct niu *np,
 7232				    struct ethtool_rxnfc *nfc,
 7233				    u32 *rule_locs)
 7234{
 7235	struct niu_parent *parent = np->parent;
 7236	struct niu_tcam_entry *tp;
 7237	int i, idx, cnt;
 7238	unsigned long flags;
 7239	int ret = 0;
 7240
 7241	/* put the tcam size here */
 7242	nfc->data = tcam_get_size(np);
 7243
 7244	niu_lock_parent(np, flags);
 7245	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7246		idx = tcam_get_index(np, i);
 7247		tp = &parent->tcam[idx];
 7248		if (!tp->valid)
 7249			continue;
 7250		if (cnt == nfc->rule_cnt) {
 7251			ret = -EMSGSIZE;
 7252			break;
 7253		}
 7254		rule_locs[cnt] = i;
 7255		cnt++;
 7256	}
 7257	niu_unlock_parent(np, flags);
 7258
 7259	nfc->rule_cnt = cnt;
 7260
 7261	return ret;
 7262}
 7263
 7264static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7265		       u32 *rule_locs)
 7266{
 7267	struct niu *np = netdev_priv(dev);
 7268	int ret = 0;
 7269
 7270	switch (cmd->cmd) {
 7271	case ETHTOOL_GRXFH:
 7272		ret = niu_get_hash_opts(np, cmd);
 7273		break;
 7274	case ETHTOOL_GRXRINGS:
 7275		cmd->data = np->num_rx_rings;
 7276		break;
 7277	case ETHTOOL_GRXCLSRLCNT:
 7278		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7279		break;
 7280	case ETHTOOL_GRXCLSRULE:
 7281		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7282		break;
 7283	case ETHTOOL_GRXCLSRLALL:
 7284		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
 7285		break;
 7286	default:
 7287		ret = -EINVAL;
 7288		break;
 7289	}
 7290
 7291	return ret;
 7292}
 7293
 7294static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7295{
 7296	u64 class;
 7297	u64 flow_key = 0;
 7298	unsigned long flags;
 7299
 7300	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7301		return -EINVAL;
 7302
 7303	if (class < CLASS_CODE_USER_PROG1 ||
 7304	    class > CLASS_CODE_SCTP_IPV6)
 7305		return -EINVAL;
 7306
 7307	if (nfc->data & RXH_DISCARD) {
 7308		niu_lock_parent(np, flags);
 7309		flow_key = np->parent->tcam_key[class -
 7310					       CLASS_CODE_USER_PROG1];
 7311		flow_key |= TCAM_KEY_DISC;
 7312		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7313		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7314		niu_unlock_parent(np, flags);
 7315		return 0;
 7316	} else {
 7317		/* Discard was set before, but is not set now */
 7318		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7319		    TCAM_KEY_DISC) {
 7320			niu_lock_parent(np, flags);
 7321			flow_key = np->parent->tcam_key[class -
 7322					       CLASS_CODE_USER_PROG1];
 7323			flow_key &= ~TCAM_KEY_DISC;
 7324			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7325			     flow_key);
 7326			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7327				flow_key;
 7328			niu_unlock_parent(np, flags);
 7329		}
 7330	}
 7331
 7332	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7333		return -EINVAL;
 7334
 7335	niu_lock_parent(np, flags);
 7336	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7337	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7338	niu_unlock_parent(np, flags);
 7339
 7340	return 0;
 7341}
 7342
 7343static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7344				       struct niu_tcam_entry *tp,
 7345				       int l2_rdc_tab, u64 class)
 7346{
 7347	u8 pid = 0;
 7348	u32 sip, dip, sipm, dipm, spi, spim;
 7349	u16 sport, dport, spm, dpm;
 7350
 7351	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7352	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7353	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7354	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7355
 7356	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7357	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7358	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7359	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7360
 7361	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7362	tp->key[3] |= dip;
 7363
 7364	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7365	tp->key_mask[3] |= dipm;
 7366
 7367	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7368		       TCAM_V4KEY2_TOS_SHIFT);
 7369	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7370			    TCAM_V4KEY2_TOS_SHIFT);
 7371	switch (fsp->flow_type) {
 7372	case TCP_V4_FLOW:
 7373	case UDP_V4_FLOW:
 7374	case SCTP_V4_FLOW:
 7375		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7376		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7377		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7378		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7379
 7380		tp->key[2] |= (((u64)sport << 16) | dport);
 7381		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7382		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7383		break;
 7384	case AH_V4_FLOW:
 7385	case ESP_V4_FLOW:
 7386		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7387		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7388
 7389		tp->key[2] |= spi;
 7390		tp->key_mask[2] |= spim;
 7391		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7392		break;
 7393	case IP_USER_FLOW:
 7394		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7395		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7396
 7397		tp->key[2] |= spi;
 7398		tp->key_mask[2] |= spim;
 7399		pid = fsp->h_u.usr_ip4_spec.proto;
 7400		break;
 7401	default:
 7402		break;
 7403	}
 7404
 7405	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7406	if (pid) {
 7407		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7408	}
 7409}
 7410
 7411static int niu_add_ethtool_tcam_entry(struct niu *np,
 7412				      struct ethtool_rxnfc *nfc)
 7413{
 7414	struct niu_parent *parent = np->parent;
 7415	struct niu_tcam_entry *tp;
 7416	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7417	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7418	int l2_rdc_table = rdc_table->first_table_num;
 7419	u16 idx;
 7420	u64 class;
 7421	unsigned long flags;
 7422	int err, ret;
 7423
 7424	ret = 0;
 7425
 7426	idx = nfc->fs.location;
 7427	if (idx >= tcam_get_size(np))
 7428		return -EINVAL;
 7429
 7430	if (fsp->flow_type == IP_USER_FLOW) {
 7431		int i;
 7432		int add_usr_cls = 0;
 7433		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7434		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7435
 7436		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7437			return -EINVAL;
 7438
 7439		niu_lock_parent(np, flags);
 7440
 7441		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7442			if (parent->l3_cls[i]) {
 7443				if (uspec->proto == parent->l3_cls_pid[i]) {
 7444					class = parent->l3_cls[i];
 7445					parent->l3_cls_refcnt[i]++;
 7446					add_usr_cls = 1;
 7447					break;
 7448				}
 7449			} else {
 7450				/* Program new user IP class */
 7451				switch (i) {
 7452				case 0:
 7453					class = CLASS_CODE_USER_PROG1;
 7454					break;
 7455				case 1:
 7456					class = CLASS_CODE_USER_PROG2;
 7457					break;
 7458				case 2:
 7459					class = CLASS_CODE_USER_PROG3;
 7460					break;
 7461				case 3:
 7462					class = CLASS_CODE_USER_PROG4;
 7463					break;
 7464				default:
 7465					class = CLASS_CODE_UNRECOG;
 7466					break;
 7467				}
 7468				ret = tcam_user_ip_class_set(np, class, 0,
 7469							     uspec->proto,
 7470							     uspec->tos,
 7471							     umask->tos);
 7472				if (ret)
 7473					goto out;
 7474
 7475				ret = tcam_user_ip_class_enable(np, class, 1);
 7476				if (ret)
 7477					goto out;
 7478				parent->l3_cls[i] = class;
 7479				parent->l3_cls_pid[i] = uspec->proto;
 7480				parent->l3_cls_refcnt[i]++;
 7481				add_usr_cls = 1;
 7482				break;
 7483			}
 7484		}
 7485		if (!add_usr_cls) {
 7486			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7487				    parent->index, __func__, uspec->proto);
 7488			ret = -EINVAL;
 7489			goto out;
 7490		}
 7491		niu_unlock_parent(np, flags);
 7492	} else {
 7493		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7494			return -EINVAL;
 7495		}
 7496	}
 7497
 7498	niu_lock_parent(np, flags);
 7499
 7500	idx = tcam_get_index(np, idx);
 7501	tp = &parent->tcam[idx];
 7502
 7503	memset(tp, 0, sizeof(*tp));
 7504
 7505	/* fill in the tcam key and mask */
 7506	switch (fsp->flow_type) {
 7507	case TCP_V4_FLOW:
 7508	case UDP_V4_FLOW:
 7509	case SCTP_V4_FLOW:
 7510	case AH_V4_FLOW:
 7511	case ESP_V4_FLOW:
 7512		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7513		break;
 7514	case TCP_V6_FLOW:
 7515	case UDP_V6_FLOW:
 7516	case SCTP_V6_FLOW:
 7517	case AH_V6_FLOW:
 7518	case ESP_V6_FLOW:
 7519		/* Not yet implemented */
 7520		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7521			    parent->index, __func__, fsp->flow_type);
 7522		ret = -EINVAL;
 7523		goto out;
 7524	case IP_USER_FLOW:
 7525		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7526		break;
 7527	default:
 7528		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7529			    parent->index, __func__, fsp->flow_type);
 7530		ret = -EINVAL;
 7531		goto out;
 7532	}
 7533
 7534	/* fill in the assoc data */
 7535	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7536		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7537	} else {
 7538		if (fsp->ring_cookie >= np->num_rx_rings) {
 7539			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7540				    parent->index, __func__,
 7541				    (long long)fsp->ring_cookie);
 7542			ret = -EINVAL;
 7543			goto out;
 7544		}
 7545		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7546				  (fsp->ring_cookie <<
 7547				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7548	}
 7549
 7550	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7551	if (err) {
 7552		ret = -EINVAL;
 7553		goto out;
 7554	}
 7555	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7556	if (err) {
 7557		ret = -EINVAL;
 7558		goto out;
 7559	}
 7560
 7561	/* validate the entry */
 7562	tp->valid = 1;
 7563	np->clas.tcam_valid_entries++;
 7564out:
 7565	niu_unlock_parent(np, flags);
 7566
 7567	return ret;
 7568}
 7569
 7570static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7571{
 7572	struct niu_parent *parent = np->parent;
 7573	struct niu_tcam_entry *tp;
 7574	u16 idx;
 7575	unsigned long flags;
 7576	u64 class;
 7577	int ret = 0;
 7578
 7579	if (loc >= tcam_get_size(np))
 7580		return -EINVAL;
 7581
 7582	niu_lock_parent(np, flags);
 7583
 7584	idx = tcam_get_index(np, loc);
 7585	tp = &parent->tcam[idx];
 7586
 7587	/* if the entry is of a user defined class, then update*/
 7588	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7589		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7590
 7591	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7592		int i;
 7593		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7594			if (parent->l3_cls[i] == class) {
 7595				parent->l3_cls_refcnt[i]--;
 7596				if (!parent->l3_cls_refcnt[i]) {
 7597					/* disable class */
 7598					ret = tcam_user_ip_class_enable(np,
 7599									class,
 7600									0);
 7601					if (ret)
 7602						goto out;
 7603					parent->l3_cls[i] = 0;
 7604					parent->l3_cls_pid[i] = 0;
 7605				}
 7606				break;
 7607			}
 7608		}
 7609		if (i == NIU_L3_PROG_CLS) {
 7610			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7611				    parent->index, __func__,
 7612				    (unsigned long long)class);
 7613			ret = -EINVAL;
 7614			goto out;
 7615		}
 7616	}
 7617
 7618	ret = tcam_flush(np, idx);
 7619	if (ret)
 7620		goto out;
 7621
 7622	/* invalidate the entry */
 7623	tp->valid = 0;
 7624	np->clas.tcam_valid_entries--;
 7625out:
 7626	niu_unlock_parent(np, flags);
 7627
 7628	return ret;
 7629}
 7630
 7631static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7632{
 7633	struct niu *np = netdev_priv(dev);
 7634	int ret = 0;
 7635
 7636	switch (cmd->cmd) {
 7637	case ETHTOOL_SRXFH:
 7638		ret = niu_set_hash_opts(np, cmd);
 7639		break;
 7640	case ETHTOOL_SRXCLSRLINS:
 7641		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7642		break;
 7643	case ETHTOOL_SRXCLSRLDEL:
 7644		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7645		break;
 7646	default:
 7647		ret = -EINVAL;
 7648		break;
 7649	}
 7650
 7651	return ret;
 7652}
 7653
 7654static const struct {
 7655	const char string[ETH_GSTRING_LEN];
 7656} niu_xmac_stat_keys[] = {
 7657	{ "tx_frames" },
 7658	{ "tx_bytes" },
 7659	{ "tx_fifo_errors" },
 7660	{ "tx_overflow_errors" },
 7661	{ "tx_max_pkt_size_errors" },
 7662	{ "tx_underflow_errors" },
 7663	{ "rx_local_faults" },
 7664	{ "rx_remote_faults" },
 7665	{ "rx_link_faults" },
 7666	{ "rx_align_errors" },
 7667	{ "rx_frags" },
 7668	{ "rx_mcasts" },
 7669	{ "rx_bcasts" },
 7670	{ "rx_hist_cnt1" },
 7671	{ "rx_hist_cnt2" },
 7672	{ "rx_hist_cnt3" },
 7673	{ "rx_hist_cnt4" },
 7674	{ "rx_hist_cnt5" },
 7675	{ "rx_hist_cnt6" },
 7676	{ "rx_hist_cnt7" },
 7677	{ "rx_octets" },
 7678	{ "rx_code_violations" },
 7679	{ "rx_len_errors" },
 7680	{ "rx_crc_errors" },
 7681	{ "rx_underflows" },
 7682	{ "rx_overflows" },
 7683	{ "pause_off_state" },
 7684	{ "pause_on_state" },
 7685	{ "pause_received" },
 7686};
 7687
 7688#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7689
 7690static const struct {
 7691	const char string[ETH_GSTRING_LEN];
 7692} niu_bmac_stat_keys[] = {
 7693	{ "tx_underflow_errors" },
 7694	{ "tx_max_pkt_size_errors" },
 7695	{ "tx_bytes" },
 7696	{ "tx_frames" },
 7697	{ "rx_overflows" },
 7698	{ "rx_frames" },
 7699	{ "rx_align_errors" },
 7700	{ "rx_crc_errors" },
 7701	{ "rx_len_errors" },
 7702	{ "pause_off_state" },
 7703	{ "pause_on_state" },
 7704	{ "pause_received" },
 7705};
 7706
 7707#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7708
 7709static const struct {
 7710	const char string[ETH_GSTRING_LEN];
 7711} niu_rxchan_stat_keys[] = {
 7712	{ "rx_channel" },
 7713	{ "rx_packets" },
 7714	{ "rx_bytes" },
 7715	{ "rx_dropped" },
 7716	{ "rx_errors" },
 7717};
 7718
 7719#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7720
 7721static const struct {
 7722	const char string[ETH_GSTRING_LEN];
 7723} niu_txchan_stat_keys[] = {
 7724	{ "tx_channel" },
 7725	{ "tx_packets" },
 7726	{ "tx_bytes" },
 7727	{ "tx_errors" },
 7728};
 7729
 7730#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7731
 7732static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7733{
 7734	struct niu *np = netdev_priv(dev);
 7735	int i;
 7736
 7737	if (stringset != ETH_SS_STATS)
 7738		return;
 7739
 7740	if (np->flags & NIU_FLAGS_XMAC) {
 7741		memcpy(data, niu_xmac_stat_keys,
 7742		       sizeof(niu_xmac_stat_keys));
 7743		data += sizeof(niu_xmac_stat_keys);
 7744	} else {
 7745		memcpy(data, niu_bmac_stat_keys,
 7746		       sizeof(niu_bmac_stat_keys));
 7747		data += sizeof(niu_bmac_stat_keys);
 7748	}
 7749	for (i = 0; i < np->num_rx_rings; i++) {
 7750		memcpy(data, niu_rxchan_stat_keys,
 7751		       sizeof(niu_rxchan_stat_keys));
 7752		data += sizeof(niu_rxchan_stat_keys);
 7753	}
 7754	for (i = 0; i < np->num_tx_rings; i++) {
 7755		memcpy(data, niu_txchan_stat_keys,
 7756		       sizeof(niu_txchan_stat_keys));
 7757		data += sizeof(niu_txchan_stat_keys);
 7758	}
 7759}
 7760
 7761static int niu_get_sset_count(struct net_device *dev, int stringset)
 7762{
 7763	struct niu *np = netdev_priv(dev);
 7764
 7765	if (stringset != ETH_SS_STATS)
 7766		return -EINVAL;
 7767
 7768	return (np->flags & NIU_FLAGS_XMAC ?
 7769		 NUM_XMAC_STAT_KEYS :
 7770		 NUM_BMAC_STAT_KEYS) +
 7771		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7772		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7773}
 7774
 7775static void niu_get_ethtool_stats(struct net_device *dev,
 7776				  struct ethtool_stats *stats, u64 *data)
 7777{
 7778	struct niu *np = netdev_priv(dev);
 7779	int i;
 7780
 7781	niu_sync_mac_stats(np);
 7782	if (np->flags & NIU_FLAGS_XMAC) {
 7783		memcpy(data, &np->mac_stats.xmac,
 7784		       sizeof(struct niu_xmac_stats));
 7785		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7786	} else {
 7787		memcpy(data, &np->mac_stats.bmac,
 7788		       sizeof(struct niu_bmac_stats));
 7789		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7790	}
 7791	for (i = 0; i < np->num_rx_rings; i++) {
 7792		struct rx_ring_info *rp = &np->rx_rings[i];
 7793
 7794		niu_sync_rx_discard_stats(np, rp, 0);
 7795
 7796		data[0] = rp->rx_channel;
 7797		data[1] = rp->rx_packets;
 7798		data[2] = rp->rx_bytes;
 7799		data[3] = rp->rx_dropped;
 7800		data[4] = rp->rx_errors;
 7801		data += 5;
 7802	}
 7803	for (i = 0; i < np->num_tx_rings; i++) {
 7804		struct tx_ring_info *rp = &np->tx_rings[i];
 7805
 7806		data[0] = rp->tx_channel;
 7807		data[1] = rp->tx_packets;
 7808		data[2] = rp->tx_bytes;
 7809		data[3] = rp->tx_errors;
 7810		data += 4;
 7811	}
 7812}
 7813
 7814static u64 niu_led_state_save(struct niu *np)
 7815{
 7816	if (np->flags & NIU_FLAGS_XMAC)
 7817		return nr64_mac(XMAC_CONFIG);
 7818	else
 7819		return nr64_mac(BMAC_XIF_CONFIG);
 7820}
 7821
 7822static void niu_led_state_restore(struct niu *np, u64 val)
 7823{
 7824	if (np->flags & NIU_FLAGS_XMAC)
 7825		nw64_mac(XMAC_CONFIG, val);
 7826	else
 7827		nw64_mac(BMAC_XIF_CONFIG, val);
 7828}
 7829
 7830static void niu_force_led(struct niu *np, int on)
 7831{
 7832	u64 val, reg, bit;
 7833
 7834	if (np->flags & NIU_FLAGS_XMAC) {
 7835		reg = XMAC_CONFIG;
 7836		bit = XMAC_CONFIG_FORCE_LED_ON;
 7837	} else {
 7838		reg = BMAC_XIF_CONFIG;
 7839		bit = BMAC_XIF_CONFIG_LINK_LED;
 7840	}
 7841
 7842	val = nr64_mac(reg);
 7843	if (on)
 7844		val |= bit;
 7845	else
 7846		val &= ~bit;
 7847	nw64_mac(reg, val);
 7848}
 7849
 7850static int niu_set_phys_id(struct net_device *dev,
 7851			   enum ethtool_phys_id_state state)
 7852
 7853{
 7854	struct niu *np = netdev_priv(dev);
 7855
 7856	if (!netif_running(dev))
 7857		return -EAGAIN;
 7858
 7859	switch (state) {
 7860	case ETHTOOL_ID_ACTIVE:
 7861		np->orig_led_state = niu_led_state_save(np);
 7862		return 1;	/* cycle on/off once per second */
 7863
 7864	case ETHTOOL_ID_ON:
 7865		niu_force_led(np, 1);
 7866		break;
 7867
 7868	case ETHTOOL_ID_OFF:
 7869		niu_force_led(np, 0);
 7870		break;
 7871
 7872	case ETHTOOL_ID_INACTIVE:
 7873		niu_led_state_restore(np, np->orig_led_state);
 7874	}
 7875
 7876	return 0;
 7877}
 7878
 7879static const struct ethtool_ops niu_ethtool_ops = {
 7880	.get_drvinfo		= niu_get_drvinfo,
 7881	.get_link		= ethtool_op_get_link,
 7882	.get_msglevel		= niu_get_msglevel,
 7883	.set_msglevel		= niu_set_msglevel,
 7884	.nway_reset		= niu_nway_reset,
 7885	.get_eeprom_len		= niu_get_eeprom_len,
 7886	.get_eeprom		= niu_get_eeprom,
 7887	.get_strings		= niu_get_strings,
 7888	.get_sset_count		= niu_get_sset_count,
 7889	.get_ethtool_stats	= niu_get_ethtool_stats,
 7890	.set_phys_id		= niu_set_phys_id,
 7891	.get_rxnfc		= niu_get_nfc,
 7892	.set_rxnfc		= niu_set_nfc,
 7893	.get_link_ksettings	= niu_get_link_ksettings,
 7894	.set_link_ksettings	= niu_set_link_ksettings,
 7895};
 7896
 7897static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7898			      int ldg, int ldn)
 7899{
 7900	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7901		return -EINVAL;
 7902	if (ldn < 0 || ldn > LDN_MAX)
 7903		return -EINVAL;
 7904
 7905	parent->ldg_map[ldn] = ldg;
 7906
 7907	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7908		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7909		 * the firmware, and we're not supposed to change them.
 7910		 * Validate the mapping, because if it's wrong we probably
 7911		 * won't get any interrupts and that's painful to debug.
 7912		 */
 7913		if (nr64(LDG_NUM(ldn)) != ldg) {
 7914			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7915				np->port, ldn, ldg,
 7916				(unsigned long long) nr64(LDG_NUM(ldn)));
 7917			return -EINVAL;
 7918		}
 7919	} else
 7920		nw64(LDG_NUM(ldn), ldg);
 7921
 7922	return 0;
 7923}
 7924
 7925static int niu_set_ldg_timer_res(struct niu *np, int res)
 7926{
 7927	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7928		return -EINVAL;
 7929
 7930
 7931	nw64(LDG_TIMER_RES, res);
 7932
 7933	return 0;
 7934}
 7935
 7936static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7937{
 7938	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7939	    (func < 0 || func > 3) ||
 7940	    (vector < 0 || vector > 0x1f))
 7941		return -EINVAL;
 7942
 7943	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7944
 7945	return 0;
 7946}
 7947
 7948static int niu_pci_eeprom_read(struct niu *np, u32 addr)
 7949{
 7950	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7951				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7952	int limit;
 7953
 7954	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7955		return -EINVAL;
 7956
 7957	frame = frame_base;
 7958	nw64(ESPC_PIO_STAT, frame);
 7959	limit = 64;
 7960	do {
 7961		udelay(5);
 7962		frame = nr64(ESPC_PIO_STAT);
 7963		if (frame & ESPC_PIO_STAT_READ_END)
 7964			break;
 7965	} while (limit--);
 7966	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7967		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 7968			(unsigned long long) frame);
 7969		return -ENODEV;
 7970	}
 7971
 7972	frame = frame_base;
 7973	nw64(ESPC_PIO_STAT, frame);
 7974	limit = 64;
 7975	do {
 7976		udelay(5);
 7977		frame = nr64(ESPC_PIO_STAT);
 7978		if (frame & ESPC_PIO_STAT_READ_END)
 7979			break;
 7980	} while (limit--);
 7981	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7982		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 7983			(unsigned long long) frame);
 7984		return -ENODEV;
 7985	}
 7986
 7987	frame = nr64(ESPC_PIO_STAT);
 7988	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 7989}
 7990
 7991static int niu_pci_eeprom_read16(struct niu *np, u32 off)
 7992{
 7993	int err = niu_pci_eeprom_read(np, off);
 7994	u16 val;
 7995
 7996	if (err < 0)
 7997		return err;
 7998	val = (err << 8);
 7999	err = niu_pci_eeprom_read(np, off + 1);
 8000	if (err < 0)
 8001		return err;
 8002	val |= (err & 0xff);
 8003
 8004	return val;
 8005}
 8006
 8007static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8008{
 8009	int err = niu_pci_eeprom_read(np, off);
 8010	u16 val;
 8011
 8012	if (err < 0)
 8013		return err;
 8014
 8015	val = (err & 0xff);
 8016	err = niu_pci_eeprom_read(np, off + 1);
 8017	if (err < 0)
 8018		return err;
 8019
 8020	val |= (err & 0xff) << 8;
 8021
 8022	return val;
 8023}
 8024
 8025static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
 8026				    int namebuf_len)
 8027{
 8028	int i;
 8029
 8030	for (i = 0; i < namebuf_len; i++) {
 8031		int err = niu_pci_eeprom_read(np, off + i);
 8032		if (err < 0)
 8033			return err;
 8034		*namebuf++ = err;
 8035		if (!err)
 8036			break;
 8037	}
 8038	if (i >= namebuf_len)
 8039		return -EINVAL;
 8040
 8041	return i + 1;
 8042}
 8043
 8044static void niu_vpd_parse_version(struct niu *np)
 8045{
 8046	struct niu_vpd *vpd = &np->vpd;
 8047	int len = strlen(vpd->version) + 1;
 8048	const char *s = vpd->version;
 8049	int i;
 8050
 8051	for (i = 0; i < len - 5; i++) {
 8052		if (!strncmp(s + i, "FCode ", 6))
 8053			break;
 8054	}
 8055	if (i >= len - 5)
 8056		return;
 8057
 8058	s += i + 5;
 8059	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8060
 8061	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8062		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8063		     vpd->fcode_major, vpd->fcode_minor);
 8064	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8065	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8066	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8067		np->flags |= NIU_FLAGS_VPD_VALID;
 8068}
 8069
 8070/* ESPC_PIO_EN_ENABLE must be set */
 8071static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
 8072{
 8073	unsigned int found_mask = 0;
 8074#define FOUND_MASK_MODEL	0x00000001
 8075#define FOUND_MASK_BMODEL	0x00000002
 8076#define FOUND_MASK_VERS		0x00000004
 8077#define FOUND_MASK_MAC		0x00000008
 8078#define FOUND_MASK_NMAC		0x00000010
 8079#define FOUND_MASK_PHY		0x00000020
 8080#define FOUND_MASK_ALL		0x0000003f
 8081
 8082	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8083		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8084	while (start < end) {
 8085		int len, err, prop_len;
 8086		char namebuf[64];
 8087		u8 *prop_buf;
 8088		int max_len;
 8089
 8090		if (found_mask == FOUND_MASK_ALL) {
 8091			niu_vpd_parse_version(np);
 8092			return 1;
 8093		}
 8094
 8095		err = niu_pci_eeprom_read(np, start + 2);
 8096		if (err < 0)
 8097			return err;
 8098		len = err;
 8099		start += 3;
 8100
 8101		prop_len = niu_pci_eeprom_read(np, start + 4);
 8102		if (prop_len < 0)
 8103			return prop_len;
 8104		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8105		if (err < 0)
 8106			return err;
 8107
 8108		prop_buf = NULL;
 8109		max_len = 0;
 8110		if (!strcmp(namebuf, "model")) {
 8111			prop_buf = np->vpd.model;
 8112			max_len = NIU_VPD_MODEL_MAX;
 8113			found_mask |= FOUND_MASK_MODEL;
 8114		} else if (!strcmp(namebuf, "board-model")) {
 8115			prop_buf = np->vpd.board_model;
 8116			max_len = NIU_VPD_BD_MODEL_MAX;
 8117			found_mask |= FOUND_MASK_BMODEL;
 8118		} else if (!strcmp(namebuf, "version")) {
 8119			prop_buf = np->vpd.version;
 8120			max_len = NIU_VPD_VERSION_MAX;
 8121			found_mask |= FOUND_MASK_VERS;
 8122		} else if (!strcmp(namebuf, "local-mac-address")) {
 8123			prop_buf = np->vpd.local_mac;
 8124			max_len = ETH_ALEN;
 8125			found_mask |= FOUND_MASK_MAC;
 8126		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8127			prop_buf = &np->vpd.mac_num;
 8128			max_len = 1;
 8129			found_mask |= FOUND_MASK_NMAC;
 8130		} else if (!strcmp(namebuf, "phy-type")) {
 8131			prop_buf = np->vpd.phy_type;
 8132			max_len = NIU_VPD_PHY_TYPE_MAX;
 8133			found_mask |= FOUND_MASK_PHY;
 8134		}
 8135
 8136		if (max_len && prop_len > max_len) {
 8137			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8138			return -EINVAL;
 8139		}
 8140
 8141		if (prop_buf) {
 8142			u32 off = start + 5 + err;
 8143			int i;
 8144
 8145			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8146				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8147				     namebuf, prop_len);
 8148			for (i = 0; i < prop_len; i++) {
 8149				err = niu_pci_eeprom_read(np, off + i);
 8150				if (err >= 0)
 8151					*prop_buf = err;
 8152				++prop_buf;
 8153			}
 8154		}
 8155
 8156		start += len;
 8157	}
 8158
 8159	return 0;
 8160}
 8161
 8162/* ESPC_PIO_EN_ENABLE must be set */
 8163static void niu_pci_vpd_fetch(struct niu *np, u32 start)
 8164{
 8165	u32 offset;
 8166	int err;
 8167
 8168	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8169	if (err < 0)
 8170		return;
 8171
 8172	offset = err + 3;
 8173
 8174	while (start + offset < ESPC_EEPROM_SIZE) {
 8175		u32 here = start + offset;
 8176		u32 end;
 8177
 8178		err = niu_pci_eeprom_read(np, here);
 8179		if (err != 0x90)
 8180			return;
 8181
 8182		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8183		if (err < 0)
 8184			return;
 8185
 8186		here = start + offset + 3;
 8187		end = start + offset + err;
 8188
 8189		offset += err;
 8190
 8191		err = niu_pci_vpd_scan_props(np, here, end);
 8192		if (err < 0 || err == 1)
 8193			return;
 8194	}
 8195}
 8196
 8197/* ESPC_PIO_EN_ENABLE must be set */
 8198static u32 niu_pci_vpd_offset(struct niu *np)
 8199{
 8200	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8201	int err;
 8202
 8203	while (start < end) {
 8204		ret = start;
 8205
 8206		/* ROM header signature?  */
 8207		err = niu_pci_eeprom_read16(np, start +  0);
 8208		if (err != 0x55aa)
 8209			return 0;
 8210
 8211		/* Apply offset to PCI data structure.  */
 8212		err = niu_pci_eeprom_read16(np, start + 23);
 8213		if (err < 0)
 8214			return 0;
 8215		start += err;
 8216
 8217		/* Check for "PCIR" signature.  */
 8218		err = niu_pci_eeprom_read16(np, start +  0);
 8219		if (err != 0x5043)
 8220			return 0;
 8221		err = niu_pci_eeprom_read16(np, start +  2);
 8222		if (err != 0x4952)
 8223			return 0;
 8224
 8225		/* Check for OBP image type.  */
 8226		err = niu_pci_eeprom_read(np, start + 20);
 8227		if (err < 0)
 8228			return 0;
 8229		if (err != 0x01) {
 8230			err = niu_pci_eeprom_read(np, ret + 2);
 8231			if (err < 0)
 8232				return 0;
 8233
 8234			start = ret + (err * 512);
 8235			continue;
 8236		}
 8237
 8238		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8239		if (err < 0)
 8240			return err;
 8241		ret += err;
 8242
 8243		err = niu_pci_eeprom_read(np, ret + 0);
 8244		if (err != 0x82)
 8245			return 0;
 8246
 8247		return ret;
 8248	}
 8249
 8250	return 0;
 8251}
 8252
 8253static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
 8254{
 8255	if (!strcmp(phy_prop, "mif")) {
 8256		/* 1G copper, MII */
 8257		np->flags &= ~(NIU_FLAGS_FIBER |
 8258			       NIU_FLAGS_10G);
 8259		np->mac_xcvr = MAC_XCVR_MII;
 8260	} else if (!strcmp(phy_prop, "xgf")) {
 8261		/* 10G fiber, XPCS */
 8262		np->flags |= (NIU_FLAGS_10G |
 8263			      NIU_FLAGS_FIBER);
 8264		np->mac_xcvr = MAC_XCVR_XPCS;
 8265	} else if (!strcmp(phy_prop, "pcs")) {
 8266		/* 1G fiber, PCS */
 8267		np->flags &= ~NIU_FLAGS_10G;
 8268		np->flags |= NIU_FLAGS_FIBER;
 8269		np->mac_xcvr = MAC_XCVR_PCS;
 8270	} else if (!strcmp(phy_prop, "xgc")) {
 8271		/* 10G copper, XPCS */
 8272		np->flags |= NIU_FLAGS_10G;
 8273		np->flags &= ~NIU_FLAGS_FIBER;
 8274		np->mac_xcvr = MAC_XCVR_XPCS;
 8275	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8276		/* 10G Serdes or 1G Serdes, default to 10G */
 8277		np->flags |= NIU_FLAGS_10G;
 8278		np->flags &= ~NIU_FLAGS_FIBER;
 8279		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8280		np->mac_xcvr = MAC_XCVR_XPCS;
 8281	} else {
 8282		return -EINVAL;
 8283	}
 8284	return 0;
 8285}
 8286
 8287static int niu_pci_vpd_get_nports(struct niu *np)
 8288{
 8289	int ports = 0;
 8290
 8291	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8292	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8293	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8294	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8295	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8296		ports = 4;
 8297	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8298		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8299		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8300		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8301		ports = 2;
 8302	}
 8303
 8304	return ports;
 8305}
 8306
 8307static void niu_pci_vpd_validate(struct niu *np)
 8308{
 8309	struct net_device *dev = np->dev;
 8310	struct niu_vpd *vpd = &np->vpd;
 8311	u8 val8;
 8312
 8313	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8314		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8315
 8316		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8317		return;
 8318	}
 8319
 8320	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8321	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8322		np->flags |= NIU_FLAGS_10G;
 8323		np->flags &= ~NIU_FLAGS_FIBER;
 8324		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8325		np->mac_xcvr = MAC_XCVR_PCS;
 8326		if (np->port > 1) {
 8327			np->flags |= NIU_FLAGS_FIBER;
 8328			np->flags &= ~NIU_FLAGS_10G;
 8329		}
 8330		if (np->flags & NIU_FLAGS_10G)
 8331			np->mac_xcvr = MAC_XCVR_XPCS;
 8332	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8333		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8334			      NIU_FLAGS_HOTPLUG_PHY);
 8335	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8336		dev_err(np->device, "Illegal phy string [%s]\n",
 8337			np->vpd.phy_type);
 8338		dev_err(np->device, "Falling back to SPROM\n");
 8339		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8340		return;
 8341	}
 8342
 8343	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
 8344
 8345	val8 = dev->dev_addr[5];
 8346	dev->dev_addr[5] += np->port;
 8347	if (dev->dev_addr[5] < val8)
 8348		dev->dev_addr[4]++;
 8349}
 8350
 8351static int niu_pci_probe_sprom(struct niu *np)
 8352{
 8353	struct net_device *dev = np->dev;
 8354	int len, i;
 8355	u64 val, sum;
 8356	u8 val8;
 8357
 8358	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8359	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8360	len = val / 4;
 8361
 8362	np->eeprom_len = len;
 8363
 8364	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8365		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8366
 8367	sum = 0;
 8368	for (i = 0; i < len; i++) {
 8369		val = nr64(ESPC_NCR(i));
 8370		sum += (val >>  0) & 0xff;
 8371		sum += (val >>  8) & 0xff;
 8372		sum += (val >> 16) & 0xff;
 8373		sum += (val >> 24) & 0xff;
 8374	}
 8375	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8376		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8377	if ((sum & 0xff) != 0xab) {
 8378		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8379		return -EINVAL;
 8380	}
 8381
 8382	val = nr64(ESPC_PHY_TYPE);
 8383	switch (np->port) {
 8384	case 0:
 8385		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8386			ESPC_PHY_TYPE_PORT0_SHIFT;
 8387		break;
 8388	case 1:
 8389		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8390			ESPC_PHY_TYPE_PORT1_SHIFT;
 8391		break;
 8392	case 2:
 8393		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8394			ESPC_PHY_TYPE_PORT2_SHIFT;
 8395		break;
 8396	case 3:
 8397		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8398			ESPC_PHY_TYPE_PORT3_SHIFT;
 8399		break;
 8400	default:
 8401		dev_err(np->device, "Bogus port number %u\n",
 8402			np->port);
 8403		return -EINVAL;
 8404	}
 8405	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8406		     "SPROM: PHY type %x\n", val8);
 8407
 8408	switch (val8) {
 8409	case ESPC_PHY_TYPE_1G_COPPER:
 8410		/* 1G copper, MII */
 8411		np->flags &= ~(NIU_FLAGS_FIBER |
 8412			       NIU_FLAGS_10G);
 8413		np->mac_xcvr = MAC_XCVR_MII;
 8414		break;
 8415
 8416	case ESPC_PHY_TYPE_1G_FIBER:
 8417		/* 1G fiber, PCS */
 8418		np->flags &= ~NIU_FLAGS_10G;
 8419		np->flags |= NIU_FLAGS_FIBER;
 8420		np->mac_xcvr = MAC_XCVR_PCS;
 8421		break;
 8422
 8423	case ESPC_PHY_TYPE_10G_COPPER:
 8424		/* 10G copper, XPCS */
 8425		np->flags |= NIU_FLAGS_10G;
 8426		np->flags &= ~NIU_FLAGS_FIBER;
 8427		np->mac_xcvr = MAC_XCVR_XPCS;
 8428		break;
 8429
 8430	case ESPC_PHY_TYPE_10G_FIBER:
 8431		/* 10G fiber, XPCS */
 8432		np->flags |= (NIU_FLAGS_10G |
 8433			      NIU_FLAGS_FIBER);
 8434		np->mac_xcvr = MAC_XCVR_XPCS;
 8435		break;
 8436
 8437	default:
 8438		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8439		return -EINVAL;
 8440	}
 8441
 8442	val = nr64(ESPC_MAC_ADDR0);
 8443	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8444		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8445	dev->dev_addr[0] = (val >>  0) & 0xff;
 8446	dev->dev_addr[1] = (val >>  8) & 0xff;
 8447	dev->dev_addr[2] = (val >> 16) & 0xff;
 8448	dev->dev_addr[3] = (val >> 24) & 0xff;
 8449
 8450	val = nr64(ESPC_MAC_ADDR1);
 8451	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8452		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8453	dev->dev_addr[4] = (val >>  0) & 0xff;
 8454	dev->dev_addr[5] = (val >>  8) & 0xff;
 8455
 8456	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 8457		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8458			dev->dev_addr);
 8459		return -EINVAL;
 8460	}
 8461
 8462	val8 = dev->dev_addr[5];
 8463	dev->dev_addr[5] += np->port;
 8464	if (dev->dev_addr[5] < val8)
 8465		dev->dev_addr[4]++;
 8466
 8467	val = nr64(ESPC_MOD_STR_LEN);
 8468	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8469		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8470	if (val >= 8 * 4)
 8471		return -EINVAL;
 8472
 8473	for (i = 0; i < val; i += 4) {
 8474		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8475
 8476		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8477		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8478		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8479		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8480	}
 8481	np->vpd.model[val] = '\0';
 8482
 8483	val = nr64(ESPC_BD_MOD_STR_LEN);
 8484	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8485		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8486	if (val >= 4 * 4)
 8487		return -EINVAL;
 8488
 8489	for (i = 0; i < val; i += 4) {
 8490		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8491
 8492		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8493		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8494		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8495		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8496	}
 8497	np->vpd.board_model[val] = '\0';
 8498
 8499	np->vpd.mac_num =
 8500		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8501	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8502		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8503
 8504	return 0;
 8505}
 8506
 8507static int niu_get_and_validate_port(struct niu *np)
 8508{
 8509	struct niu_parent *parent = np->parent;
 8510
 8511	if (np->port <= 1)
 8512		np->flags |= NIU_FLAGS_XMAC;
 8513
 8514	if (!parent->num_ports) {
 8515		if (parent->plat_type == PLAT_TYPE_NIU) {
 8516			parent->num_ports = 2;
 8517		} else {
 8518			parent->num_ports = niu_pci_vpd_get_nports(np);
 8519			if (!parent->num_ports) {
 8520				/* Fall back to SPROM as last resort.
 8521				 * This will fail on most cards.
 8522				 */
 8523				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8524					ESPC_NUM_PORTS_MACS_VAL;
 8525
 8526				/* All of the current probing methods fail on
 8527				 * Maramba on-board parts.
 8528				 */
 8529				if (!parent->num_ports)
 8530					parent->num_ports = 4;
 8531			}
 8532		}
 8533	}
 8534
 8535	if (np->port >= parent->num_ports)
 8536		return -ENODEV;
 8537
 8538	return 0;
 8539}
 8540
 8541static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
 8542		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
 8543{
 8544	u32 id = (dev_id_1 << 16) | dev_id_2;
 8545	u8 idx;
 8546
 8547	if (dev_id_1 < 0 || dev_id_2 < 0)
 8548		return 0;
 8549	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8550		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
 8551		 * test covers the 8706 as well.
 8552		 */
 8553		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8554		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 8555			return 0;
 8556	} else {
 8557		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8558			return 0;
 8559	}
 8560
 8561	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8562		parent->index, id,
 8563		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8564		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8565		phy_port);
 8566
 8567	if (p->cur[type] >= NIU_MAX_PORTS) {
 8568		pr_err("Too many PHY ports\n");
 8569		return -EINVAL;
 8570	}
 8571	idx = p->cur[type];
 8572	p->phy_id[type][idx] = id;
 8573	p->phy_port[type][idx] = phy_port;
 8574	p->cur[type] = idx + 1;
 8575	return 0;
 8576}
 8577
 8578static int port_has_10g(struct phy_probe_info *p, int port)
 8579{
 8580	int i;
 8581
 8582	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8583		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8584			return 1;
 8585	}
 8586	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8587		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8588			return 1;
 8589	}
 8590
 8591	return 0;
 8592}
 8593
 8594static int count_10g_ports(struct phy_probe_info *p, int *lowest)
 8595{
 8596	int port, cnt;
 8597
 8598	cnt = 0;
 8599	*lowest = 32;
 8600	for (port = 8; port < 32; port++) {
 8601		if (port_has_10g(p, port)) {
 8602			if (!cnt)
 8603				*lowest = port;
 8604			cnt++;
 8605		}
 8606	}
 8607
 8608	return cnt;
 8609}
 8610
 8611static int count_1g_ports(struct phy_probe_info *p, int *lowest)
 8612{
 8613	*lowest = 32;
 8614	if (p->cur[PHY_TYPE_MII])
 8615		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8616
 8617	return p->cur[PHY_TYPE_MII];
 8618}
 8619
 8620static void niu_n2_divide_channels(struct niu_parent *parent)
 8621{
 8622	int num_ports = parent->num_ports;
 8623	int i;
 8624
 8625	for (i = 0; i < num_ports; i++) {
 8626		parent->rxchan_per_port[i] = (16 / num_ports);
 8627		parent->txchan_per_port[i] = (16 / num_ports);
 8628
 8629		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8630			parent->index, i,
 8631			parent->rxchan_per_port[i],
 8632			parent->txchan_per_port[i]);
 8633	}
 8634}
 8635
 8636static void niu_divide_channels(struct niu_parent *parent,
 8637				int num_10g, int num_1g)
 8638{
 8639	int num_ports = parent->num_ports;
 8640	int rx_chans_per_10g, rx_chans_per_1g;
 8641	int tx_chans_per_10g, tx_chans_per_1g;
 8642	int i, tot_rx, tot_tx;
 8643
 8644	if (!num_10g || !num_1g) {
 8645		rx_chans_per_10g = rx_chans_per_1g =
 8646			(NIU_NUM_RXCHAN / num_ports);
 8647		tx_chans_per_10g = tx_chans_per_1g =
 8648			(NIU_NUM_TXCHAN / num_ports);
 8649	} else {
 8650		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8651		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8652				    (rx_chans_per_1g * num_1g)) /
 8653			num_10g;
 8654
 8655		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8656		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8657				    (tx_chans_per_1g * num_1g)) /
 8658			num_10g;
 8659	}
 8660
 8661	tot_rx = tot_tx = 0;
 8662	for (i = 0; i < num_ports; i++) {
 8663		int type = phy_decode(parent->port_phy, i);
 8664
 8665		if (type == PORT_TYPE_10G) {
 8666			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8667			parent->txchan_per_port[i] = tx_chans_per_10g;
 8668		} else {
 8669			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8670			parent->txchan_per_port[i] = tx_chans_per_1g;
 8671		}
 8672		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8673			parent->index, i,
 8674			parent->rxchan_per_port[i],
 8675			parent->txchan_per_port[i]);
 8676		tot_rx += parent->rxchan_per_port[i];
 8677		tot_tx += parent->txchan_per_port[i];
 8678	}
 8679
 8680	if (tot_rx > NIU_NUM_RXCHAN) {
 8681		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8682		       parent->index, tot_rx);
 8683		for (i = 0; i < num_ports; i++)
 8684			parent->rxchan_per_port[i] = 1;
 8685	}
 8686	if (tot_tx > NIU_NUM_TXCHAN) {
 8687		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8688		       parent->index, tot_tx);
 8689		for (i = 0; i < num_ports; i++)
 8690			parent->txchan_per_port[i] = 1;
 8691	}
 8692	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8693		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8694			parent->index, tot_rx, tot_tx);
 8695	}
 8696}
 8697
 8698static void niu_divide_rdc_groups(struct niu_parent *parent,
 8699				  int num_10g, int num_1g)
 8700{
 8701	int i, num_ports = parent->num_ports;
 8702	int rdc_group, rdc_groups_per_port;
 8703	int rdc_channel_base;
 8704
 8705	rdc_group = 0;
 8706	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8707
 8708	rdc_channel_base = 0;
 8709
 8710	for (i = 0; i < num_ports; i++) {
 8711		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8712		int grp, num_channels = parent->rxchan_per_port[i];
 8713		int this_channel_offset;
 8714
 8715		tp->first_table_num = rdc_group;
 8716		tp->num_tables = rdc_groups_per_port;
 8717		this_channel_offset = 0;
 8718		for (grp = 0; grp < tp->num_tables; grp++) {
 8719			struct rdc_table *rt = &tp->tables[grp];
 8720			int slot;
 8721
 8722			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8723				parent->index, i, tp->first_table_num + grp);
 8724			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8725				rt->rxdma_channel[slot] =
 8726					rdc_channel_base + this_channel_offset;
 8727
 8728				pr_cont("%d ", rt->rxdma_channel[slot]);
 8729
 8730				if (++this_channel_offset == num_channels)
 8731					this_channel_offset = 0;
 8732			}
 8733			pr_cont("]\n");
 8734		}
 8735
 8736		parent->rdc_default[i] = rdc_channel_base;
 8737
 8738		rdc_channel_base += num_channels;
 8739		rdc_group += rdc_groups_per_port;
 8740	}
 8741}
 8742
 8743static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
 8744			       struct phy_probe_info *info)
 8745{
 8746	unsigned long flags;
 8747	int port, err;
 8748
 8749	memset(info, 0, sizeof(*info));
 8750
 8751	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8752	niu_lock_parent(np, flags);
 8753	err = 0;
 8754	for (port = 8; port < 32; port++) {
 8755		int dev_id_1, dev_id_2;
 8756
 8757		dev_id_1 = mdio_read(np, port,
 8758				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8759		dev_id_2 = mdio_read(np, port,
 8760				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8761		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8762				 PHY_TYPE_PMA_PMD);
 8763		if (err)
 8764			break;
 8765		dev_id_1 = mdio_read(np, port,
 8766				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8767		dev_id_2 = mdio_read(np, port,
 8768				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8769		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8770				 PHY_TYPE_PCS);
 8771		if (err)
 8772			break;
 8773		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8774		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8775		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8776				 PHY_TYPE_MII);
 8777		if (err)
 8778			break;
 8779	}
 8780	niu_unlock_parent(np, flags);
 8781
 8782	return err;
 8783}
 8784
 8785static int walk_phys(struct niu *np, struct niu_parent *parent)
 8786{
 8787	struct phy_probe_info *info = &parent->phy_probe_info;
 8788	int lowest_10g, lowest_1g;
 8789	int num_10g, num_1g;
 8790	u32 val;
 8791	int err;
 8792
 8793	num_10g = num_1g = 0;
 8794
 8795	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8796	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8797		num_10g = 0;
 8798		num_1g = 2;
 8799		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8800		parent->num_ports = 4;
 8801		val = (phy_encode(PORT_TYPE_1G, 0) |
 8802		       phy_encode(PORT_TYPE_1G, 1) |
 8803		       phy_encode(PORT_TYPE_1G, 2) |
 8804		       phy_encode(PORT_TYPE_1G, 3));
 8805	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8806		num_10g = 2;
 8807		num_1g = 0;
 8808		parent->num_ports = 2;
 8809		val = (phy_encode(PORT_TYPE_10G, 0) |
 8810		       phy_encode(PORT_TYPE_10G, 1));
 8811	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8812		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8813		/* this is the Monza case */
 8814		if (np->flags & NIU_FLAGS_10G) {
 8815			val = (phy_encode(PORT_TYPE_10G, 0) |
 8816			       phy_encode(PORT_TYPE_10G, 1));
 8817		} else {
 8818			val = (phy_encode(PORT_TYPE_1G, 0) |
 8819			       phy_encode(PORT_TYPE_1G, 1));
 8820		}
 8821	} else {
 8822		err = fill_phy_probe_info(np, parent, info);
 8823		if (err)
 8824			return err;
 8825
 8826		num_10g = count_10g_ports(info, &lowest_10g);
 8827		num_1g = count_1g_ports(info, &lowest_1g);
 8828
 8829		switch ((num_10g << 4) | num_1g) {
 8830		case 0x24:
 8831			if (lowest_1g == 10)
 8832				parent->plat_type = PLAT_TYPE_VF_P0;
 8833			else if (lowest_1g == 26)
 8834				parent->plat_type = PLAT_TYPE_VF_P1;
 8835			else
 8836				goto unknown_vg_1g_port;
 8837
 8838			/* fallthru */
 8839		case 0x22:
 8840			val = (phy_encode(PORT_TYPE_10G, 0) |
 8841			       phy_encode(PORT_TYPE_10G, 1) |
 8842			       phy_encode(PORT_TYPE_1G, 2) |
 8843			       phy_encode(PORT_TYPE_1G, 3));
 8844			break;
 8845
 8846		case 0x20:
 8847			val = (phy_encode(PORT_TYPE_10G, 0) |
 8848			       phy_encode(PORT_TYPE_10G, 1));
 8849			break;
 8850
 8851		case 0x10:
 8852			val = phy_encode(PORT_TYPE_10G, np->port);
 8853			break;
 8854
 8855		case 0x14:
 8856			if (lowest_1g == 10)
 8857				parent->plat_type = PLAT_TYPE_VF_P0;
 8858			else if (lowest_1g == 26)
 8859				parent->plat_type = PLAT_TYPE_VF_P1;
 8860			else
 8861				goto unknown_vg_1g_port;
 8862
 8863			/* fallthru */
 8864		case 0x13:
 8865			if ((lowest_10g & 0x7) == 0)
 8866				val = (phy_encode(PORT_TYPE_10G, 0) |
 8867				       phy_encode(PORT_TYPE_1G, 1) |
 8868				       phy_encode(PORT_TYPE_1G, 2) |
 8869				       phy_encode(PORT_TYPE_1G, 3));
 8870			else
 8871				val = (phy_encode(PORT_TYPE_1G, 0) |
 8872				       phy_encode(PORT_TYPE_10G, 1) |
 8873				       phy_encode(PORT_TYPE_1G, 2) |
 8874				       phy_encode(PORT_TYPE_1G, 3));
 8875			break;
 8876
 8877		case 0x04:
 8878			if (lowest_1g == 10)
 8879				parent->plat_type = PLAT_TYPE_VF_P0;
 8880			else if (lowest_1g == 26)
 8881				parent->plat_type = PLAT_TYPE_VF_P1;
 8882			else
 8883				goto unknown_vg_1g_port;
 8884
 8885			val = (phy_encode(PORT_TYPE_1G, 0) |
 8886			       phy_encode(PORT_TYPE_1G, 1) |
 8887			       phy_encode(PORT_TYPE_1G, 2) |
 8888			       phy_encode(PORT_TYPE_1G, 3));
 8889			break;
 8890
 8891		default:
 8892			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8893			       num_10g, num_1g);
 8894			return -EINVAL;
 8895		}
 8896	}
 8897
 8898	parent->port_phy = val;
 8899
 8900	if (parent->plat_type == PLAT_TYPE_NIU)
 8901		niu_n2_divide_channels(parent);
 8902	else
 8903		niu_divide_channels(parent, num_10g, num_1g);
 8904
 8905	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8906
 8907	return 0;
 8908
 8909unknown_vg_1g_port:
 8910	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8911	return -EINVAL;
 8912}
 8913
 8914static int niu_probe_ports(struct niu *np)
 8915{
 8916	struct niu_parent *parent = np->parent;
 8917	int err, i;
 8918
 8919	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8920		err = walk_phys(np, parent);
 8921		if (err)
 8922			return err;
 8923
 8924		niu_set_ldg_timer_res(np, 2);
 8925		for (i = 0; i <= LDN_MAX; i++)
 8926			niu_ldn_irq_enable(np, i, 0);
 8927	}
 8928
 8929	if (parent->port_phy == PORT_PHY_INVALID)
 8930		return -EINVAL;
 8931
 8932	return 0;
 8933}
 8934
 8935static int niu_classifier_swstate_init(struct niu *np)
 8936{
 8937	struct niu_classifier *cp = &np->clas;
 8938
 8939	cp->tcam_top = (u16) np->port;
 8940	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8941	cp->h1_init = 0xffffffff;
 8942	cp->h2_init = 0xffff;
 8943
 8944	return fflp_early_init(np);
 8945}
 8946
 8947static void niu_link_config_init(struct niu *np)
 8948{
 8949	struct niu_link_config *lp = &np->link_config;
 8950
 8951	lp->advertising = (ADVERTISED_10baseT_Half |
 8952			   ADVERTISED_10baseT_Full |
 8953			   ADVERTISED_100baseT_Half |
 8954			   ADVERTISED_100baseT_Full |
 8955			   ADVERTISED_1000baseT_Half |
 8956			   ADVERTISED_1000baseT_Full |
 8957			   ADVERTISED_10000baseT_Full |
 8958			   ADVERTISED_Autoneg);
 8959	lp->speed = lp->active_speed = SPEED_INVALID;
 8960	lp->duplex = DUPLEX_FULL;
 8961	lp->active_duplex = DUPLEX_INVALID;
 8962	lp->autoneg = 1;
 8963#if 0
 8964	lp->loopback_mode = LOOPBACK_MAC;
 8965	lp->active_speed = SPEED_10000;
 8966	lp->active_duplex = DUPLEX_FULL;
 8967#else
 8968	lp->loopback_mode = LOOPBACK_DISABLED;
 8969#endif
 8970}
 8971
 8972static int niu_init_mac_ipp_pcs_base(struct niu *np)
 8973{
 8974	switch (np->port) {
 8975	case 0:
 8976		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 8977		np->ipp_off  = 0x00000;
 8978		np->pcs_off  = 0x04000;
 8979		np->xpcs_off = 0x02000;
 8980		break;
 8981
 8982	case 1:
 8983		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 8984		np->ipp_off  = 0x08000;
 8985		np->pcs_off  = 0x0a000;
 8986		np->xpcs_off = 0x08000;
 8987		break;
 8988
 8989	case 2:
 8990		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 8991		np->ipp_off  = 0x04000;
 8992		np->pcs_off  = 0x0e000;
 8993		np->xpcs_off = ~0UL;
 8994		break;
 8995
 8996	case 3:
 8997		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 8998		np->ipp_off  = 0x0c000;
 8999		np->pcs_off  = 0x12000;
 9000		np->xpcs_off = ~0UL;
 9001		break;
 9002
 9003	default:
 9004		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9005		return -EINVAL;
 9006	}
 9007
 9008	return 0;
 9009}
 9010
 9011static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9012{
 9013	struct msix_entry msi_vec[NIU_NUM_LDG];
 9014	struct niu_parent *parent = np->parent;
 9015	struct pci_dev *pdev = np->pdev;
 9016	int i, num_irqs;
 9017	u8 first_ldg;
 9018
 9019	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9020	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9021		ldg_num_map[i] = first_ldg + i;
 9022
 9023	num_irqs = (parent->rxchan_per_port[np->port] +
 9024		    parent->txchan_per_port[np->port] +
 9025		    (np->port == 0 ? 3 : 1));
 9026	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9027
 9028	for (i = 0; i < num_irqs; i++) {
 9029		msi_vec[i].vector = 0;
 9030		msi_vec[i].entry = i;
 9031	}
 9032
 9033	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
 9034	if (num_irqs < 0) {
 9035		np->flags &= ~NIU_FLAGS_MSIX;
 9036		return;
 9037	}
 9038
 9039	np->flags |= NIU_FLAGS_MSIX;
 9040	for (i = 0; i < num_irqs; i++)
 9041		np->ldg[i].irq = msi_vec[i].vector;
 9042	np->num_ldg = num_irqs;
 9043}
 9044
 9045static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9046{
 9047#ifdef CONFIG_SPARC64
 9048	struct platform_device *op = np->op;
 9049	const u32 *int_prop;
 9050	int i;
 9051
 9052	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9053	if (!int_prop)
 9054		return -ENODEV;
 9055
 9056	for (i = 0; i < op->archdata.num_irqs; i++) {
 9057		ldg_num_map[i] = int_prop[i];
 9058		np->ldg[i].irq = op->archdata.irqs[i];
 9059	}
 9060
 9061	np->num_ldg = op->archdata.num_irqs;
 9062
 9063	return 0;
 9064#else
 9065	return -EINVAL;
 9066#endif
 9067}
 9068
 9069static int niu_ldg_init(struct niu *np)
 9070{
 9071	struct niu_parent *parent = np->parent;
 9072	u8 ldg_num_map[NIU_NUM_LDG];
 9073	int first_chan, num_chan;
 9074	int i, err, ldg_rotor;
 9075	u8 port;
 9076
 9077	np->num_ldg = 1;
 9078	np->ldg[0].irq = np->dev->irq;
 9079	if (parent->plat_type == PLAT_TYPE_NIU) {
 9080		err = niu_n2_irq_init(np, ldg_num_map);
 9081		if (err)
 9082			return err;
 9083	} else
 9084		niu_try_msix(np, ldg_num_map);
 9085
 9086	port = np->port;
 9087	for (i = 0; i < np->num_ldg; i++) {
 9088		struct niu_ldg *lp = &np->ldg[i];
 9089
 9090		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9091
 9092		lp->np = np;
 9093		lp->ldg_num = ldg_num_map[i];
 9094		lp->timer = 2; /* XXX */
 9095
 9096		/* On N2 NIU the firmware has setup the SID mappings so they go
 9097		 * to the correct values that will route the LDG to the proper
 9098		 * interrupt in the NCU interrupt table.
 9099		 */
 9100		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9101			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9102			if (err)
 9103				return err;
 9104		}
 9105	}
 9106
 9107	/* We adopt the LDG assignment ordering used by the N2 NIU
 9108	 * 'interrupt' properties because that simplifies a lot of
 9109	 * things.  This ordering is:
 9110	 *
 9111	 *	MAC
 9112	 *	MIF	(if port zero)
 9113	 *	SYSERR	(if port zero)
 9114	 *	RX channels
 9115	 *	TX channels
 9116	 */
 9117
 9118	ldg_rotor = 0;
 9119
 9120	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9121				  LDN_MAC(port));
 9122	if (err)
 9123		return err;
 9124
 9125	ldg_rotor++;
 9126	if (ldg_rotor == np->num_ldg)
 9127		ldg_rotor = 0;
 9128
 9129	if (port == 0) {
 9130		err = niu_ldg_assign_ldn(np, parent,
 9131					 ldg_num_map[ldg_rotor],
 9132					 LDN_MIF);
 9133		if (err)
 9134			return err;
 9135
 9136		ldg_rotor++;
 9137		if (ldg_rotor == np->num_ldg)
 9138			ldg_rotor = 0;
 9139
 9140		err = niu_ldg_assign_ldn(np, parent,
 9141					 ldg_num_map[ldg_rotor],
 9142					 LDN_DEVICE_ERROR);
 9143		if (err)
 9144			return err;
 9145
 9146		ldg_rotor++;
 9147		if (ldg_rotor == np->num_ldg)
 9148			ldg_rotor = 0;
 9149
 9150	}
 9151
 9152	first_chan = 0;
 9153	for (i = 0; i < port; i++)
 9154		first_chan += parent->rxchan_per_port[i];
 9155	num_chan = parent->rxchan_per_port[port];
 9156
 9157	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9158		err = niu_ldg_assign_ldn(np, parent,
 9159					 ldg_num_map[ldg_rotor],
 9160					 LDN_RXDMA(i));
 9161		if (err)
 9162			return err;
 9163		ldg_rotor++;
 9164		if (ldg_rotor == np->num_ldg)
 9165			ldg_rotor = 0;
 9166	}
 9167
 9168	first_chan = 0;
 9169	for (i = 0; i < port; i++)
 9170		first_chan += parent->txchan_per_port[i];
 9171	num_chan = parent->txchan_per_port[port];
 9172	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9173		err = niu_ldg_assign_ldn(np, parent,
 9174					 ldg_num_map[ldg_rotor],
 9175					 LDN_TXDMA(i));
 9176		if (err)
 9177			return err;
 9178		ldg_rotor++;
 9179		if (ldg_rotor == np->num_ldg)
 9180			ldg_rotor = 0;
 9181	}
 9182
 9183	return 0;
 9184}
 9185
 9186static void niu_ldg_free(struct niu *np)
 9187{
 9188	if (np->flags & NIU_FLAGS_MSIX)
 9189		pci_disable_msix(np->pdev);
 9190}
 9191
 9192static int niu_get_of_props(struct niu *np)
 9193{
 9194#ifdef CONFIG_SPARC64
 9195	struct net_device *dev = np->dev;
 9196	struct device_node *dp;
 9197	const char *phy_type;
 9198	const u8 *mac_addr;
 9199	const char *model;
 9200	int prop_len;
 9201
 9202	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9203		dp = np->op->dev.of_node;
 9204	else
 9205		dp = pci_device_to_OF_node(np->pdev);
 9206
 9207	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9208	if (!phy_type) {
 9209		netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp);
 9210		return -EINVAL;
 9211	}
 9212
 9213	if (!strcmp(phy_type, "none"))
 9214		return -ENODEV;
 9215
 9216	strcpy(np->vpd.phy_type, phy_type);
 9217
 9218	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9219		netdev_err(dev, "%pOF: Illegal phy string [%s]\n",
 9220			   dp, np->vpd.phy_type);
 9221		return -EINVAL;
 9222	}
 9223
 9224	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9225	if (!mac_addr) {
 9226		netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n",
 9227			   dp);
 9228		return -EINVAL;
 9229	}
 9230	if (prop_len != dev->addr_len) {
 9231		netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
 9232			   dp, prop_len);
 9233	}
 9234	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
 9235	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 9236		netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
 9237		netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
 9238		return -EINVAL;
 9239	}
 9240
 9241	model = of_get_property(dp, "model", &prop_len);
 9242
 9243	if (model)
 9244		strcpy(np->vpd.model, model);
 9245
 9246	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9247		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9248			NIU_FLAGS_HOTPLUG_PHY);
 9249	}
 9250
 9251	return 0;
 9252#else
 9253	return -EINVAL;
 9254#endif
 9255}
 9256
 9257static int niu_get_invariants(struct niu *np)
 9258{
 9259	int err, have_props;
 9260	u32 offset;
 9261
 9262	err = niu_get_of_props(np);
 9263	if (err == -ENODEV)
 9264		return err;
 9265
 9266	have_props = !err;
 9267
 9268	err = niu_init_mac_ipp_pcs_base(np);
 9269	if (err)
 9270		return err;
 9271
 9272	if (have_props) {
 9273		err = niu_get_and_validate_port(np);
 9274		if (err)
 9275			return err;
 9276
 9277	} else  {
 9278		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9279			return -EINVAL;
 9280
 9281		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9282		offset = niu_pci_vpd_offset(np);
 9283		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9284			     "%s() VPD offset [%08x]\n", __func__, offset);
 9285		if (offset)
 9286			niu_pci_vpd_fetch(np, offset);
 9287		nw64(ESPC_PIO_EN, 0);
 9288
 9289		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9290			niu_pci_vpd_validate(np);
 9291			err = niu_get_and_validate_port(np);
 9292			if (err)
 9293				return err;
 9294		}
 9295
 9296		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9297			err = niu_get_and_validate_port(np);
 9298			if (err)
 9299				return err;
 9300			err = niu_pci_probe_sprom(np);
 9301			if (err)
 9302				return err;
 9303		}
 9304	}
 9305
 9306	err = niu_probe_ports(np);
 9307	if (err)
 9308		return err;
 9309
 9310	niu_ldg_init(np);
 9311
 9312	niu_classifier_swstate_init(np);
 9313	niu_link_config_init(np);
 9314
 9315	err = niu_determine_phy_disposition(np);
 9316	if (!err)
 9317		err = niu_init_link(np);
 9318
 9319	return err;
 9320}
 9321
 9322static LIST_HEAD(niu_parent_list);
 9323static DEFINE_MUTEX(niu_parent_lock);
 9324static int niu_parent_index;
 9325
 9326static ssize_t show_port_phy(struct device *dev,
 9327			     struct device_attribute *attr, char *buf)
 9328{
 9329	struct platform_device *plat_dev = to_platform_device(dev);
 9330	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9331	u32 port_phy = p->port_phy;
 9332	char *orig_buf = buf;
 9333	int i;
 9334
 9335	if (port_phy == PORT_PHY_UNKNOWN ||
 9336	    port_phy == PORT_PHY_INVALID)
 9337		return 0;
 9338
 9339	for (i = 0; i < p->num_ports; i++) {
 9340		const char *type_str;
 9341		int type;
 9342
 9343		type = phy_decode(port_phy, i);
 9344		if (type == PORT_TYPE_10G)
 9345			type_str = "10G";
 9346		else
 9347			type_str = "1G";
 9348		buf += sprintf(buf,
 9349			       (i == 0) ? "%s" : " %s",
 9350			       type_str);
 9351	}
 9352	buf += sprintf(buf, "\n");
 9353	return buf - orig_buf;
 9354}
 9355
 9356static ssize_t show_plat_type(struct device *dev,
 9357			      struct device_attribute *attr, char *buf)
 9358{
 9359	struct platform_device *plat_dev = to_platform_device(dev);
 9360	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9361	const char *type_str;
 9362
 9363	switch (p->plat_type) {
 9364	case PLAT_TYPE_ATLAS:
 9365		type_str = "atlas";
 9366		break;
 9367	case PLAT_TYPE_NIU:
 9368		type_str = "niu";
 9369		break;
 9370	case PLAT_TYPE_VF_P0:
 9371		type_str = "vf_p0";
 9372		break;
 9373	case PLAT_TYPE_VF_P1:
 9374		type_str = "vf_p1";
 9375		break;
 9376	default:
 9377		type_str = "unknown";
 9378		break;
 9379	}
 9380
 9381	return sprintf(buf, "%s\n", type_str);
 9382}
 9383
 9384static ssize_t __show_chan_per_port(struct device *dev,
 9385				    struct device_attribute *attr, char *buf,
 9386				    int rx)
 9387{
 9388	struct platform_device *plat_dev = to_platform_device(dev);
 9389	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9390	char *orig_buf = buf;
 9391	u8 *arr;
 9392	int i;
 9393
 9394	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9395
 9396	for (i = 0; i < p->num_ports; i++) {
 9397		buf += sprintf(buf,
 9398			       (i == 0) ? "%d" : " %d",
 9399			       arr[i]);
 9400	}
 9401	buf += sprintf(buf, "\n");
 9402
 9403	return buf - orig_buf;
 9404}
 9405
 9406static ssize_t show_rxchan_per_port(struct device *dev,
 9407				    struct device_attribute *attr, char *buf)
 9408{
 9409	return __show_chan_per_port(dev, attr, buf, 1);
 9410}
 9411
 9412static ssize_t show_txchan_per_port(struct device *dev,
 9413				    struct device_attribute *attr, char *buf)
 9414{
 9415	return __show_chan_per_port(dev, attr, buf, 1);
 9416}
 9417
 9418static ssize_t show_num_ports(struct device *dev,
 9419			      struct device_attribute *attr, char *buf)
 9420{
 9421	struct platform_device *plat_dev = to_platform_device(dev);
 9422	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9423
 9424	return sprintf(buf, "%d\n", p->num_ports);
 9425}
 9426
 9427static struct device_attribute niu_parent_attributes[] = {
 9428	__ATTR(port_phy, 0444, show_port_phy, NULL),
 9429	__ATTR(plat_type, 0444, show_plat_type, NULL),
 9430	__ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL),
 9431	__ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL),
 9432	__ATTR(num_ports, 0444, show_num_ports, NULL),
 9433	{}
 9434};
 9435
 9436static struct niu_parent *niu_new_parent(struct niu *np,
 9437					 union niu_parent_id *id, u8 ptype)
 9438{
 9439	struct platform_device *plat_dev;
 9440	struct niu_parent *p;
 9441	int i;
 9442
 9443	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9444						   NULL, 0);
 9445	if (IS_ERR(plat_dev))
 9446		return NULL;
 9447
 9448	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
 9449		int err = device_create_file(&plat_dev->dev,
 9450					     &niu_parent_attributes[i]);
 9451		if (err)
 9452			goto fail_unregister;
 9453	}
 9454
 9455	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9456	if (!p)
 9457		goto fail_unregister;
 9458
 9459	p->index = niu_parent_index++;
 9460
 9461	plat_dev->dev.platform_data = p;
 9462	p->plat_dev = plat_dev;
 9463
 9464	memcpy(&p->id, id, sizeof(*id));
 9465	p->plat_type = ptype;
 9466	INIT_LIST_HEAD(&p->list);
 9467	atomic_set(&p->refcnt, 0);
 9468	list_add(&p->list, &niu_parent_list);
 9469	spin_lock_init(&p->lock);
 9470
 9471	p->rxdma_clock_divider = 7500;
 9472
 9473	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9474	if (p->plat_type == PLAT_TYPE_NIU)
 9475		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9476
 9477	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9478		int index = i - CLASS_CODE_USER_PROG1;
 9479
 9480		p->tcam_key[index] = TCAM_KEY_TSEL;
 9481		p->flow_key[index] = (FLOW_KEY_IPSA |
 9482				      FLOW_KEY_IPDA |
 9483				      FLOW_KEY_PROTO |
 9484				      (FLOW_KEY_L4_BYTE12 <<
 9485				       FLOW_KEY_L4_0_SHIFT) |
 9486				      (FLOW_KEY_L4_BYTE12 <<
 9487				       FLOW_KEY_L4_1_SHIFT));
 9488	}
 9489
 9490	for (i = 0; i < LDN_MAX + 1; i++)
 9491		p->ldg_map[i] = LDG_INVALID;
 9492
 9493	return p;
 9494
 9495fail_unregister:
 9496	platform_device_unregister(plat_dev);
 9497	return NULL;
 9498}
 9499
 9500static struct niu_parent *niu_get_parent(struct niu *np,
 9501					 union niu_parent_id *id, u8 ptype)
 9502{
 9503	struct niu_parent *p, *tmp;
 9504	int port = np->port;
 9505
 9506	mutex_lock(&niu_parent_lock);
 9507	p = NULL;
 9508	list_for_each_entry(tmp, &niu_parent_list, list) {
 9509		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9510			p = tmp;
 9511			break;
 9512		}
 9513	}
 9514	if (!p)
 9515		p = niu_new_parent(np, id, ptype);
 9516
 9517	if (p) {
 9518		char port_name[8];
 9519		int err;
 9520
 9521		sprintf(port_name, "port%d", port);
 9522		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9523					&np->device->kobj,
 9524					port_name);
 9525		if (!err) {
 9526			p->ports[port] = np;
 9527			atomic_inc(&p->refcnt);
 9528		}
 9529	}
 9530	mutex_unlock(&niu_parent_lock);
 9531
 9532	return p;
 9533}
 9534
 9535static void niu_put_parent(struct niu *np)
 9536{
 9537	struct niu_parent *p = np->parent;
 9538	u8 port = np->port;
 9539	char port_name[8];
 9540
 9541	BUG_ON(!p || p->ports[port] != np);
 9542
 9543	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9544		     "%s() port[%u]\n", __func__, port);
 9545
 9546	sprintf(port_name, "port%d", port);
 9547
 9548	mutex_lock(&niu_parent_lock);
 9549
 9550	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9551
 9552	p->ports[port] = NULL;
 9553	np->parent = NULL;
 9554
 9555	if (atomic_dec_and_test(&p->refcnt)) {
 9556		list_del(&p->list);
 9557		platform_device_unregister(p->plat_dev);
 9558	}
 9559
 9560	mutex_unlock(&niu_parent_lock);
 9561}
 9562
 9563static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9564				    u64 *handle, gfp_t flag)
 9565{
 9566	dma_addr_t dh;
 9567	void *ret;
 9568
 9569	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9570	if (ret)
 9571		*handle = dh;
 9572	return ret;
 9573}
 9574
 9575static void niu_pci_free_coherent(struct device *dev, size_t size,
 9576				  void *cpu_addr, u64 handle)
 9577{
 9578	dma_free_coherent(dev, size, cpu_addr, handle);
 9579}
 9580
 9581static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9582			    unsigned long offset, size_t size,
 9583			    enum dma_data_direction direction)
 9584{
 9585	return dma_map_page(dev, page, offset, size, direction);
 9586}
 9587
 9588static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9589			       size_t size, enum dma_data_direction direction)
 9590{
 9591	dma_unmap_page(dev, dma_address, size, direction);
 9592}
 9593
 9594static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9595			      size_t size,
 9596			      enum dma_data_direction direction)
 9597{
 9598	return dma_map_single(dev, cpu_addr, size, direction);
 9599}
 9600
 9601static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9602				 size_t size,
 9603				 enum dma_data_direction direction)
 9604{
 9605	dma_unmap_single(dev, dma_address, size, direction);
 9606}
 9607
 9608static const struct niu_ops niu_pci_ops = {
 9609	.alloc_coherent	= niu_pci_alloc_coherent,
 9610	.free_coherent	= niu_pci_free_coherent,
 9611	.map_page	= niu_pci_map_page,
 9612	.unmap_page	= niu_pci_unmap_page,
 9613	.map_single	= niu_pci_map_single,
 9614	.unmap_single	= niu_pci_unmap_single,
 9615};
 9616
 9617static void niu_driver_version(void)
 9618{
 9619	static int niu_version_printed;
 9620
 9621	if (niu_version_printed++ == 0)
 9622		pr_info("%s", version);
 9623}
 9624
 9625static struct net_device *niu_alloc_and_init(struct device *gen_dev,
 9626					     struct pci_dev *pdev,
 9627					     struct platform_device *op,
 9628					     const struct niu_ops *ops, u8 port)
 9629{
 9630	struct net_device *dev;
 9631	struct niu *np;
 9632
 9633	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9634	if (!dev)
 9635		return NULL;
 9636
 9637	SET_NETDEV_DEV(dev, gen_dev);
 9638
 9639	np = netdev_priv(dev);
 9640	np->dev = dev;
 9641	np->pdev = pdev;
 9642	np->op = op;
 9643	np->device = gen_dev;
 9644	np->ops = ops;
 9645
 9646	np->msg_enable = niu_debug;
 9647
 9648	spin_lock_init(&np->lock);
 9649	INIT_WORK(&np->reset_task, niu_reset_task);
 9650
 9651	np->port = port;
 9652
 9653	return dev;
 9654}
 9655
 9656static const struct net_device_ops niu_netdev_ops = {
 9657	.ndo_open		= niu_open,
 9658	.ndo_stop		= niu_close,
 9659	.ndo_start_xmit		= niu_start_xmit,
 9660	.ndo_get_stats64	= niu_get_stats,
 9661	.ndo_set_rx_mode	= niu_set_rx_mode,
 9662	.ndo_validate_addr	= eth_validate_addr,
 9663	.ndo_set_mac_address	= niu_set_mac_addr,
 9664	.ndo_do_ioctl		= niu_ioctl,
 9665	.ndo_tx_timeout		= niu_tx_timeout,
 9666	.ndo_change_mtu		= niu_change_mtu,
 9667};
 9668
 9669static void niu_assign_netdev_ops(struct net_device *dev)
 9670{
 9671	dev->netdev_ops = &niu_netdev_ops;
 9672	dev->ethtool_ops = &niu_ethtool_ops;
 9673	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9674}
 9675
 9676static void niu_device_announce(struct niu *np)
 9677{
 9678	struct net_device *dev = np->dev;
 9679
 9680	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9681
 9682	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9683		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9684				dev->name,
 9685				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9686				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9687				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9688				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9689				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9690				np->vpd.phy_type);
 9691	} else {
 9692		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9693				dev->name,
 9694				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9695				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9696				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9697				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9698				  "COPPER")),
 9699				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9700				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9701				np->vpd.phy_type);
 9702	}
 9703}
 9704
 9705static void niu_set_basic_features(struct net_device *dev)
 9706{
 9707	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9708	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9709}
 9710
 9711static int niu_pci_init_one(struct pci_dev *pdev,
 9712			    const struct pci_device_id *ent)
 9713{
 9714	union niu_parent_id parent_id;
 9715	struct net_device *dev;
 9716	struct niu *np;
 9717	int err;
 9718	u64 dma_mask;
 9719
 9720	niu_driver_version();
 9721
 9722	err = pci_enable_device(pdev);
 9723	if (err) {
 9724		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9725		return err;
 9726	}
 9727
 9728	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9729	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9730		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9731		err = -ENODEV;
 9732		goto err_out_disable_pdev;
 9733	}
 9734
 9735	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9736	if (err) {
 9737		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9738		goto err_out_disable_pdev;
 9739	}
 9740
 9741	if (!pci_is_pcie(pdev)) {
 9742		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9743		err = -ENODEV;
 9744		goto err_out_free_res;
 9745	}
 9746
 9747	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9748				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9749	if (!dev) {
 9750		err = -ENOMEM;
 9751		goto err_out_free_res;
 9752	}
 9753	np = netdev_priv(dev);
 9754
 9755	memset(&parent_id, 0, sizeof(parent_id));
 9756	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9757	parent_id.pci.bus = pdev->bus->number;
 9758	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9759
 9760	np->parent = niu_get_parent(np, &parent_id,
 9761				    PLAT_TYPE_ATLAS);
 9762	if (!np->parent) {
 9763		err = -ENOMEM;
 9764		goto err_out_free_dev;
 9765	}
 9766
 9767	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 9768		PCI_EXP_DEVCTL_NOSNOOP_EN,
 9769		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
 9770		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
 9771		PCI_EXP_DEVCTL_RELAX_EN);
 9772
 9773	dma_mask = DMA_BIT_MASK(44);
 9774	err = pci_set_dma_mask(pdev, dma_mask);
 9775	if (!err) {
 9776		dev->features |= NETIF_F_HIGHDMA;
 9777		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9778		if (err) {
 9779			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9780			goto err_out_release_parent;
 9781		}
 9782	}
 9783	if (err) {
 9784		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9785		if (err) {
 9786			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9787			goto err_out_release_parent;
 9788		}
 9789	}
 9790
 9791	niu_set_basic_features(dev);
 9792
 9793	dev->priv_flags |= IFF_UNICAST_FLT;
 9794
 9795	np->regs = pci_ioremap_bar(pdev, 0);
 9796	if (!np->regs) {
 9797		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9798		err = -ENOMEM;
 9799		goto err_out_release_parent;
 9800	}
 9801
 9802	pci_set_master(pdev);
 9803	pci_save_state(pdev);
 9804
 9805	dev->irq = pdev->irq;
 9806
 9807	/* MTU range: 68 - 9216 */
 9808	dev->min_mtu = ETH_MIN_MTU;
 9809	dev->max_mtu = NIU_MAX_MTU;
 9810
 9811	niu_assign_netdev_ops(dev);
 9812
 9813	err = niu_get_invariants(np);
 9814	if (err) {
 9815		if (err != -ENODEV)
 9816			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9817		goto err_out_iounmap;
 9818	}
 9819
 9820	err = register_netdev(dev);
 9821	if (err) {
 9822		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9823		goto err_out_iounmap;
 9824	}
 9825
 9826	pci_set_drvdata(pdev, dev);
 9827
 9828	niu_device_announce(np);
 9829
 9830	return 0;
 9831
 9832err_out_iounmap:
 9833	if (np->regs) {
 9834		iounmap(np->regs);
 9835		np->regs = NULL;
 9836	}
 9837
 9838err_out_release_parent:
 9839	niu_put_parent(np);
 9840
 9841err_out_free_dev:
 9842	free_netdev(dev);
 9843
 9844err_out_free_res:
 9845	pci_release_regions(pdev);
 9846
 9847err_out_disable_pdev:
 9848	pci_disable_device(pdev);
 9849
 9850	return err;
 9851}
 9852
 9853static void niu_pci_remove_one(struct pci_dev *pdev)
 9854{
 9855	struct net_device *dev = pci_get_drvdata(pdev);
 9856
 9857	if (dev) {
 9858		struct niu *np = netdev_priv(dev);
 9859
 9860		unregister_netdev(dev);
 9861		if (np->regs) {
 9862			iounmap(np->regs);
 9863			np->regs = NULL;
 9864		}
 9865
 9866		niu_ldg_free(np);
 9867
 9868		niu_put_parent(np);
 9869
 9870		free_netdev(dev);
 9871		pci_release_regions(pdev);
 9872		pci_disable_device(pdev);
 9873	}
 9874}
 9875
 9876static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9877{
 9878	struct net_device *dev = pci_get_drvdata(pdev);
 9879	struct niu *np = netdev_priv(dev);
 9880	unsigned long flags;
 9881
 9882	if (!netif_running(dev))
 9883		return 0;
 9884
 9885	flush_work(&np->reset_task);
 9886	niu_netif_stop(np);
 9887
 9888	del_timer_sync(&np->timer);
 9889
 9890	spin_lock_irqsave(&np->lock, flags);
 9891	niu_enable_interrupts(np, 0);
 9892	spin_unlock_irqrestore(&np->lock, flags);
 9893
 9894	netif_device_detach(dev);
 9895
 9896	spin_lock_irqsave(&np->lock, flags);
 9897	niu_stop_hw(np);
 9898	spin_unlock_irqrestore(&np->lock, flags);
 9899
 9900	pci_save_state(pdev);
 9901
 9902	return 0;
 9903}
 9904
 9905static int niu_resume(struct pci_dev *pdev)
 9906{
 9907	struct net_device *dev = pci_get_drvdata(pdev);
 9908	struct niu *np = netdev_priv(dev);
 9909	unsigned long flags;
 9910	int err;
 9911
 9912	if (!netif_running(dev))
 9913		return 0;
 9914
 9915	pci_restore_state(pdev);
 9916
 9917	netif_device_attach(dev);
 9918
 9919	spin_lock_irqsave(&np->lock, flags);
 9920
 9921	err = niu_init_hw(np);
 9922	if (!err) {
 9923		np->timer.expires = jiffies + HZ;
 9924		add_timer(&np->timer);
 9925		niu_netif_start(np);
 9926	}
 9927
 9928	spin_unlock_irqrestore(&np->lock, flags);
 9929
 9930	return err;
 9931}
 9932
 9933static struct pci_driver niu_pci_driver = {
 9934	.name		= DRV_MODULE_NAME,
 9935	.id_table	= niu_pci_tbl,
 9936	.probe		= niu_pci_init_one,
 9937	.remove		= niu_pci_remove_one,
 9938	.suspend	= niu_suspend,
 9939	.resume		= niu_resume,
 9940};
 9941
 9942#ifdef CONFIG_SPARC64
 9943static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
 9944				     u64 *dma_addr, gfp_t flag)
 9945{
 9946	unsigned long order = get_order(size);
 9947	unsigned long page = __get_free_pages(flag, order);
 9948
 9949	if (page == 0UL)
 9950		return NULL;
 9951	memset((char *)page, 0, PAGE_SIZE << order);
 9952	*dma_addr = __pa(page);
 9953
 9954	return (void *) page;
 9955}
 9956
 9957static void niu_phys_free_coherent(struct device *dev, size_t size,
 9958				   void *cpu_addr, u64 handle)
 9959{
 9960	unsigned long order = get_order(size);
 9961
 9962	free_pages((unsigned long) cpu_addr, order);
 9963}
 9964
 9965static u64 niu_phys_map_page(struct device *dev, struct page *page,
 9966			     unsigned long offset, size_t size,
 9967			     enum dma_data_direction direction)
 9968{
 9969	return page_to_phys(page) + offset;
 9970}
 9971
 9972static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
 9973				size_t size, enum dma_data_direction direction)
 9974{
 9975	/* Nothing to do.  */
 9976}
 9977
 9978static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
 9979			       size_t size,
 9980			       enum dma_data_direction direction)
 9981{
 9982	return __pa(cpu_addr);
 9983}
 9984
 9985static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
 9986				  size_t size,
 9987				  enum dma_data_direction direction)
 9988{
 9989	/* Nothing to do.  */
 9990}
 9991
 9992static const struct niu_ops niu_phys_ops = {
 9993	.alloc_coherent	= niu_phys_alloc_coherent,
 9994	.free_coherent	= niu_phys_free_coherent,
 9995	.map_page	= niu_phys_map_page,
 9996	.unmap_page	= niu_phys_unmap_page,
 9997	.map_single	= niu_phys_map_single,
 9998	.unmap_single	= niu_phys_unmap_single,
 9999};
10000
10001static int niu_of_probe(struct platform_device *op)
10002{
10003	union niu_parent_id parent_id;
10004	struct net_device *dev;
10005	struct niu *np;
10006	const u32 *reg;
10007	int err;
10008
10009	niu_driver_version();
10010
10011	reg = of_get_property(op->dev.of_node, "reg", NULL);
10012	if (!reg) {
10013		dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n",
10014			op->dev.of_node);
10015		return -ENODEV;
10016	}
10017
10018	dev = niu_alloc_and_init(&op->dev, NULL, op,
10019				 &niu_phys_ops, reg[0] & 0x1);
10020	if (!dev) {
10021		err = -ENOMEM;
10022		goto err_out;
10023	}
10024	np = netdev_priv(dev);
10025
10026	memset(&parent_id, 0, sizeof(parent_id));
10027	parent_id.of = of_get_parent(op->dev.of_node);
10028
10029	np->parent = niu_get_parent(np, &parent_id,
10030				    PLAT_TYPE_NIU);
10031	if (!np->parent) {
10032		err = -ENOMEM;
10033		goto err_out_free_dev;
10034	}
10035
10036	niu_set_basic_features(dev);
10037
10038	np->regs = of_ioremap(&op->resource[1], 0,
10039			      resource_size(&op->resource[1]),
10040			      "niu regs");
10041	if (!np->regs) {
10042		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10043		err = -ENOMEM;
10044		goto err_out_release_parent;
10045	}
10046
10047	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10048				    resource_size(&op->resource[2]),
10049				    "niu vregs-1");
10050	if (!np->vir_regs_1) {
10051		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10052		err = -ENOMEM;
10053		goto err_out_iounmap;
10054	}
10055
10056	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10057				    resource_size(&op->resource[3]),
10058				    "niu vregs-2");
10059	if (!np->vir_regs_2) {
10060		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10061		err = -ENOMEM;
10062		goto err_out_iounmap;
10063	}
10064
10065	niu_assign_netdev_ops(dev);
10066
10067	err = niu_get_invariants(np);
10068	if (err) {
10069		if (err != -ENODEV)
10070			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10071		goto err_out_iounmap;
10072	}
10073
10074	err = register_netdev(dev);
10075	if (err) {
10076		dev_err(&op->dev, "Cannot register net device, aborting\n");
10077		goto err_out_iounmap;
10078	}
10079
10080	platform_set_drvdata(op, dev);
10081
10082	niu_device_announce(np);
10083
10084	return 0;
10085
10086err_out_iounmap:
10087	if (np->vir_regs_1) {
10088		of_iounmap(&op->resource[2], np->vir_regs_1,
10089			   resource_size(&op->resource[2]));
10090		np->vir_regs_1 = NULL;
10091	}
10092
10093	if (np->vir_regs_2) {
10094		of_iounmap(&op->resource[3], np->vir_regs_2,
10095			   resource_size(&op->resource[3]));
10096		np->vir_regs_2 = NULL;
10097	}
10098
10099	if (np->regs) {
10100		of_iounmap(&op->resource[1], np->regs,
10101			   resource_size(&op->resource[1]));
10102		np->regs = NULL;
10103	}
10104
10105err_out_release_parent:
10106	niu_put_parent(np);
10107
10108err_out_free_dev:
10109	free_netdev(dev);
10110
10111err_out:
10112	return err;
10113}
10114
10115static int niu_of_remove(struct platform_device *op)
10116{
10117	struct net_device *dev = platform_get_drvdata(op);
10118
10119	if (dev) {
10120		struct niu *np = netdev_priv(dev);
10121
10122		unregister_netdev(dev);
10123
10124		if (np->vir_regs_1) {
10125			of_iounmap(&op->resource[2], np->vir_regs_1,
10126				   resource_size(&op->resource[2]));
10127			np->vir_regs_1 = NULL;
10128		}
10129
10130		if (np->vir_regs_2) {
10131			of_iounmap(&op->resource[3], np->vir_regs_2,
10132				   resource_size(&op->resource[3]));
10133			np->vir_regs_2 = NULL;
10134		}
10135
10136		if (np->regs) {
10137			of_iounmap(&op->resource[1], np->regs,
10138				   resource_size(&op->resource[1]));
10139			np->regs = NULL;
10140		}
10141
10142		niu_ldg_free(np);
10143
10144		niu_put_parent(np);
10145
10146		free_netdev(dev);
10147	}
10148	return 0;
10149}
10150
10151static const struct of_device_id niu_match[] = {
10152	{
10153		.name = "network",
10154		.compatible = "SUNW,niusl",
10155	},
10156	{},
10157};
10158MODULE_DEVICE_TABLE(of, niu_match);
10159
10160static struct platform_driver niu_of_driver = {
10161	.driver = {
10162		.name = "niu",
10163		.of_match_table = niu_match,
10164	},
10165	.probe		= niu_of_probe,
10166	.remove		= niu_of_remove,
10167};
10168
10169#endif /* CONFIG_SPARC64 */
10170
10171static int __init niu_init(void)
10172{
10173	int err = 0;
10174
10175	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10176
10177	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10178
10179#ifdef CONFIG_SPARC64
10180	err = platform_driver_register(&niu_of_driver);
10181#endif
10182
10183	if (!err) {
10184		err = pci_register_driver(&niu_pci_driver);
10185#ifdef CONFIG_SPARC64
10186		if (err)
10187			platform_driver_unregister(&niu_of_driver);
10188#endif
10189	}
10190
10191	return err;
10192}
10193
10194static void __exit niu_exit(void)
10195{
10196	pci_unregister_driver(&niu_pci_driver);
10197#ifdef CONFIG_SPARC64
10198	platform_driver_unregister(&niu_of_driver);
10199#endif
10200}
10201
10202module_init(niu_init);
10203module_exit(niu_exit);