Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
    1/* niu.c: Neptune ethernet driver.
    2 *
    3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    4 */
    5
    6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    7
    8#include <linux/module.h>
    9#include <linux/init.h>
   10#include <linux/interrupt.h>
   11#include <linux/pci.h>
   12#include <linux/dma-mapping.h>
   13#include <linux/netdevice.h>
   14#include <linux/ethtool.h>
   15#include <linux/etherdevice.h>
   16#include <linux/platform_device.h>
   17#include <linux/delay.h>
   18#include <linux/bitops.h>
   19#include <linux/mii.h>
   20#include <linux/if_ether.h>
   21#include <linux/if_vlan.h>
   22#include <linux/ip.h>
   23#include <linux/in.h>
   24#include <linux/ipv6.h>
   25#include <linux/log2.h>
   26#include <linux/jiffies.h>
   27#include <linux/crc32.h>
   28#include <linux/list.h>
   29#include <linux/slab.h>
   30
   31#include <linux/io.h>
   32#include <linux/of_device.h>
   33
   34#include "niu.h"
   35
   36#define DRV_MODULE_NAME		"niu"
   37#define DRV_MODULE_VERSION	"1.1"
   38#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   39
   40static char version[] __devinitdata =
   41	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   42
   43MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   44MODULE_DESCRIPTION("NIU ethernet driver");
   45MODULE_LICENSE("GPL");
   46MODULE_VERSION(DRV_MODULE_VERSION);
   47
   48#ifndef readq
   49static u64 readq(void __iomem *reg)
   50{
   51	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   52}
   53
   54static void writeq(u64 val, void __iomem *reg)
   55{
   56	writel(val & 0xffffffff, reg);
   57	writel(val >> 32, reg + 0x4UL);
   58}
   59#endif
   60
   61static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
   62	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   63	{}
   64};
   65
   66MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   67
   68#define NIU_TX_TIMEOUT			(5 * HZ)
   69
   70#define nr64(reg)		readq(np->regs + (reg))
   71#define nw64(reg, val)		writeq((val), np->regs + (reg))
   72
   73#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   74#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   75
   76#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   77#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   78
   79#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   80#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   81
   82#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   83#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   84
   85#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   86
   87static int niu_debug;
   88static int debug = -1;
   89module_param(debug, int, 0);
   90MODULE_PARM_DESC(debug, "NIU debug level");
   91
   92#define niu_lock_parent(np, flags) \
   93	spin_lock_irqsave(&np->parent->lock, flags)
   94#define niu_unlock_parent(np, flags) \
   95	spin_unlock_irqrestore(&np->parent->lock, flags)
   96
   97static int serdes_init_10g_serdes(struct niu *np);
   98
   99static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  100				     u64 bits, int limit, int delay)
  101{
  102	while (--limit >= 0) {
  103		u64 val = nr64_mac(reg);
  104
  105		if (!(val & bits))
  106			break;
  107		udelay(delay);
  108	}
  109	if (limit < 0)
  110		return -ENODEV;
  111	return 0;
  112}
  113
  114static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  115					u64 bits, int limit, int delay,
  116					const char *reg_name)
  117{
  118	int err;
  119
  120	nw64_mac(reg, bits);
  121	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  122	if (err)
  123		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  124			   (unsigned long long)bits, reg_name,
  125			   (unsigned long long)nr64_mac(reg));
  126	return err;
  127}
  128
  129#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  130({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  131	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  132})
  133
  134static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  135				     u64 bits, int limit, int delay)
  136{
  137	while (--limit >= 0) {
  138		u64 val = nr64_ipp(reg);
  139
  140		if (!(val & bits))
  141			break;
  142		udelay(delay);
  143	}
  144	if (limit < 0)
  145		return -ENODEV;
  146	return 0;
  147}
  148
  149static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  150					u64 bits, int limit, int delay,
  151					const char *reg_name)
  152{
  153	int err;
  154	u64 val;
  155
  156	val = nr64_ipp(reg);
  157	val |= bits;
  158	nw64_ipp(reg, val);
  159
  160	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  161	if (err)
  162		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  163			   (unsigned long long)bits, reg_name,
  164			   (unsigned long long)nr64_ipp(reg));
  165	return err;
  166}
  167
  168#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  169({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  170	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  171})
  172
  173static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  174				 u64 bits, int limit, int delay)
  175{
  176	while (--limit >= 0) {
  177		u64 val = nr64(reg);
  178
  179		if (!(val & bits))
  180			break;
  181		udelay(delay);
  182	}
  183	if (limit < 0)
  184		return -ENODEV;
  185	return 0;
  186}
  187
  188#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  189({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  190	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  191})
  192
  193static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  194				    u64 bits, int limit, int delay,
  195				    const char *reg_name)
  196{
  197	int err;
  198
  199	nw64(reg, bits);
  200	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  201	if (err)
  202		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  203			   (unsigned long long)bits, reg_name,
  204			   (unsigned long long)nr64(reg));
  205	return err;
  206}
  207
  208#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  209({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  210	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  211})
  212
  213static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  214{
  215	u64 val = (u64) lp->timer;
  216
  217	if (on)
  218		val |= LDG_IMGMT_ARM;
  219
  220	nw64(LDG_IMGMT(lp->ldg_num), val);
  221}
  222
  223static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  224{
  225	unsigned long mask_reg, bits;
  226	u64 val;
  227
  228	if (ldn < 0 || ldn > LDN_MAX)
  229		return -EINVAL;
  230
  231	if (ldn < 64) {
  232		mask_reg = LD_IM0(ldn);
  233		bits = LD_IM0_MASK;
  234	} else {
  235		mask_reg = LD_IM1(ldn - 64);
  236		bits = LD_IM1_MASK;
  237	}
  238
  239	val = nr64(mask_reg);
  240	if (on)
  241		val &= ~bits;
  242	else
  243		val |= bits;
  244	nw64(mask_reg, val);
  245
  246	return 0;
  247}
  248
  249static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  250{
  251	struct niu_parent *parent = np->parent;
  252	int i;
  253
  254	for (i = 0; i <= LDN_MAX; i++) {
  255		int err;
  256
  257		if (parent->ldg_map[i] != lp->ldg_num)
  258			continue;
  259
  260		err = niu_ldn_irq_enable(np, i, on);
  261		if (err)
  262			return err;
  263	}
  264	return 0;
  265}
  266
  267static int niu_enable_interrupts(struct niu *np, int on)
  268{
  269	int i;
  270
  271	for (i = 0; i < np->num_ldg; i++) {
  272		struct niu_ldg *lp = &np->ldg[i];
  273		int err;
  274
  275		err = niu_enable_ldn_in_ldg(np, lp, on);
  276		if (err)
  277			return err;
  278	}
  279	for (i = 0; i < np->num_ldg; i++)
  280		niu_ldg_rearm(np, &np->ldg[i], on);
  281
  282	return 0;
  283}
  284
  285static u32 phy_encode(u32 type, int port)
  286{
  287	return type << (port * 2);
  288}
  289
  290static u32 phy_decode(u32 val, int port)
  291{
  292	return (val >> (port * 2)) & PORT_TYPE_MASK;
  293}
  294
  295static int mdio_wait(struct niu *np)
  296{
  297	int limit = 1000;
  298	u64 val;
  299
  300	while (--limit > 0) {
  301		val = nr64(MIF_FRAME_OUTPUT);
  302		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  303			return val & MIF_FRAME_OUTPUT_DATA;
  304
  305		udelay(10);
  306	}
  307
  308	return -ENODEV;
  309}
  310
  311static int mdio_read(struct niu *np, int port, int dev, int reg)
  312{
  313	int err;
  314
  315	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  316	err = mdio_wait(np);
  317	if (err < 0)
  318		return err;
  319
  320	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  321	return mdio_wait(np);
  322}
  323
  324static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  325{
  326	int err;
  327
  328	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  329	err = mdio_wait(np);
  330	if (err < 0)
  331		return err;
  332
  333	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  334	err = mdio_wait(np);
  335	if (err < 0)
  336		return err;
  337
  338	return 0;
  339}
  340
  341static int mii_read(struct niu *np, int port, int reg)
  342{
  343	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  344	return mdio_wait(np);
  345}
  346
  347static int mii_write(struct niu *np, int port, int reg, int data)
  348{
  349	int err;
  350
  351	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  352	err = mdio_wait(np);
  353	if (err < 0)
  354		return err;
  355
  356	return 0;
  357}
  358
  359static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  360{
  361	int err;
  362
  363	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  364			 ESR2_TI_PLL_TX_CFG_L(channel),
  365			 val & 0xffff);
  366	if (!err)
  367		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  368				 ESR2_TI_PLL_TX_CFG_H(channel),
  369				 val >> 16);
  370	return err;
  371}
  372
  373static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  374{
  375	int err;
  376
  377	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  378			 ESR2_TI_PLL_RX_CFG_L(channel),
  379			 val & 0xffff);
  380	if (!err)
  381		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  382				 ESR2_TI_PLL_RX_CFG_H(channel),
  383				 val >> 16);
  384	return err;
  385}
  386
  387/* Mode is always 10G fiber.  */
  388static int serdes_init_niu_10g_fiber(struct niu *np)
  389{
  390	struct niu_link_config *lp = &np->link_config;
  391	u32 tx_cfg, rx_cfg;
  392	unsigned long i;
  393
  394	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  395	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  396		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  397		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  398
  399	if (lp->loopback_mode == LOOPBACK_PHY) {
  400		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  401
  402		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  403			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  404
  405		tx_cfg |= PLL_TX_CFG_ENTEST;
  406		rx_cfg |= PLL_RX_CFG_ENTEST;
  407	}
  408
  409	/* Initialize all 4 lanes of the SERDES.  */
  410	for (i = 0; i < 4; i++) {
  411		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  412		if (err)
  413			return err;
  414	}
  415
  416	for (i = 0; i < 4; i++) {
  417		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  418		if (err)
  419			return err;
  420	}
  421
  422	return 0;
  423}
  424
  425static int serdes_init_niu_1g_serdes(struct niu *np)
  426{
  427	struct niu_link_config *lp = &np->link_config;
  428	u16 pll_cfg, pll_sts;
  429	int max_retry = 100;
  430	u64 uninitialized_var(sig), mask, val;
  431	u32 tx_cfg, rx_cfg;
  432	unsigned long i;
  433	int err;
  434
  435	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  436		  PLL_TX_CFG_RATE_HALF);
  437	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  438		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  439		  PLL_RX_CFG_RATE_HALF);
  440
  441	if (np->port == 0)
  442		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  443
  444	if (lp->loopback_mode == LOOPBACK_PHY) {
  445		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  446
  447		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  448			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  449
  450		tx_cfg |= PLL_TX_CFG_ENTEST;
  451		rx_cfg |= PLL_RX_CFG_ENTEST;
  452	}
  453
  454	/* Initialize PLL for 1G */
  455	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  456
  457	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  458			 ESR2_TI_PLL_CFG_L, pll_cfg);
  459	if (err) {
  460		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  461			   np->port, __func__);
  462		return err;
  463	}
  464
  465	pll_sts = PLL_CFG_ENPLL;
  466
  467	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  468			 ESR2_TI_PLL_STS_L, pll_sts);
  469	if (err) {
  470		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  471			   np->port, __func__);
  472		return err;
  473	}
  474
  475	udelay(200);
  476
  477	/* Initialize all 4 lanes of the SERDES.  */
  478	for (i = 0; i < 4; i++) {
  479		err = esr2_set_tx_cfg(np, i, tx_cfg);
  480		if (err)
  481			return err;
  482	}
  483
  484	for (i = 0; i < 4; i++) {
  485		err = esr2_set_rx_cfg(np, i, rx_cfg);
  486		if (err)
  487			return err;
  488	}
  489
  490	switch (np->port) {
  491	case 0:
  492		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  493		mask = val;
  494		break;
  495
  496	case 1:
  497		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  498		mask = val;
  499		break;
  500
  501	default:
  502		return -EINVAL;
  503	}
  504
  505	while (max_retry--) {
  506		sig = nr64(ESR_INT_SIGNALS);
  507		if ((sig & mask) == val)
  508			break;
  509
  510		mdelay(500);
  511	}
  512
  513	if ((sig & mask) != val) {
  514		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  515			   np->port, (int)(sig & mask), (int)val);
  516		return -ENODEV;
  517	}
  518
  519	return 0;
  520}
  521
  522static int serdes_init_niu_10g_serdes(struct niu *np)
  523{
  524	struct niu_link_config *lp = &np->link_config;
  525	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  526	int max_retry = 100;
  527	u64 uninitialized_var(sig), mask, val;
  528	unsigned long i;
  529	int err;
  530
  531	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  532	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  533		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  534		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  535
  536	if (lp->loopback_mode == LOOPBACK_PHY) {
  537		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  538
  539		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  540			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  541
  542		tx_cfg |= PLL_TX_CFG_ENTEST;
  543		rx_cfg |= PLL_RX_CFG_ENTEST;
  544	}
  545
  546	/* Initialize PLL for 10G */
  547	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  548
  549	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  550			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  551	if (err) {
  552		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  553			   np->port, __func__);
  554		return err;
  555	}
  556
  557	pll_sts = PLL_CFG_ENPLL;
  558
  559	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  560			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  561	if (err) {
  562		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  563			   np->port, __func__);
  564		return err;
  565	}
  566
  567	udelay(200);
  568
  569	/* Initialize all 4 lanes of the SERDES.  */
  570	for (i = 0; i < 4; i++) {
  571		err = esr2_set_tx_cfg(np, i, tx_cfg);
  572		if (err)
  573			return err;
  574	}
  575
  576	for (i = 0; i < 4; i++) {
  577		err = esr2_set_rx_cfg(np, i, rx_cfg);
  578		if (err)
  579			return err;
  580	}
  581
  582	/* check if serdes is ready */
  583
  584	switch (np->port) {
  585	case 0:
  586		mask = ESR_INT_SIGNALS_P0_BITS;
  587		val = (ESR_INT_SRDY0_P0 |
  588		       ESR_INT_DET0_P0 |
  589		       ESR_INT_XSRDY_P0 |
  590		       ESR_INT_XDP_P0_CH3 |
  591		       ESR_INT_XDP_P0_CH2 |
  592		       ESR_INT_XDP_P0_CH1 |
  593		       ESR_INT_XDP_P0_CH0);
  594		break;
  595
  596	case 1:
  597		mask = ESR_INT_SIGNALS_P1_BITS;
  598		val = (ESR_INT_SRDY0_P1 |
  599		       ESR_INT_DET0_P1 |
  600		       ESR_INT_XSRDY_P1 |
  601		       ESR_INT_XDP_P1_CH3 |
  602		       ESR_INT_XDP_P1_CH2 |
  603		       ESR_INT_XDP_P1_CH1 |
  604		       ESR_INT_XDP_P1_CH0);
  605		break;
  606
  607	default:
  608		return -EINVAL;
  609	}
  610
  611	while (max_retry--) {
  612		sig = nr64(ESR_INT_SIGNALS);
  613		if ((sig & mask) == val)
  614			break;
  615
  616		mdelay(500);
  617	}
  618
  619	if ((sig & mask) != val) {
  620		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  621			np->port, (int)(sig & mask), (int)val);
  622
  623		/* 10G failed, try initializing at 1G */
  624		err = serdes_init_niu_1g_serdes(np);
  625		if (!err) {
  626			np->flags &= ~NIU_FLAGS_10G;
  627			np->mac_xcvr = MAC_XCVR_PCS;
  628		}  else {
  629			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  630				   np->port);
  631			return -ENODEV;
  632		}
  633	}
  634	return 0;
  635}
  636
  637static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  638{
  639	int err;
  640
  641	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  642	if (err >= 0) {
  643		*val = (err & 0xffff);
  644		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  645				ESR_RXTX_CTRL_H(chan));
  646		if (err >= 0)
  647			*val |= ((err & 0xffff) << 16);
  648		err = 0;
  649	}
  650	return err;
  651}
  652
  653static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  654{
  655	int err;
  656
  657	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  658			ESR_GLUE_CTRL0_L(chan));
  659	if (err >= 0) {
  660		*val = (err & 0xffff);
  661		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  662				ESR_GLUE_CTRL0_H(chan));
  663		if (err >= 0) {
  664			*val |= ((err & 0xffff) << 16);
  665			err = 0;
  666		}
  667	}
  668	return err;
  669}
  670
  671static int esr_read_reset(struct niu *np, u32 *val)
  672{
  673	int err;
  674
  675	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  676			ESR_RXTX_RESET_CTRL_L);
  677	if (err >= 0) {
  678		*val = (err & 0xffff);
  679		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  680				ESR_RXTX_RESET_CTRL_H);
  681		if (err >= 0) {
  682			*val |= ((err & 0xffff) << 16);
  683			err = 0;
  684		}
  685	}
  686	return err;
  687}
  688
  689static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  690{
  691	int err;
  692
  693	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  694			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  695	if (!err)
  696		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  697				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  698	return err;
  699}
  700
  701static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  702{
  703	int err;
  704
  705	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  706			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  707	if (!err)
  708		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  709				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  710	return err;
  711}
  712
  713static int esr_reset(struct niu *np)
  714{
  715	u32 uninitialized_var(reset);
  716	int err;
  717
  718	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  719			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  720	if (err)
  721		return err;
  722	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  723			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  724	if (err)
  725		return err;
  726	udelay(200);
  727
  728	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  729			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  730	if (err)
  731		return err;
  732	udelay(200);
  733
  734	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  735			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  736	if (err)
  737		return err;
  738	udelay(200);
  739
  740	err = esr_read_reset(np, &reset);
  741	if (err)
  742		return err;
  743	if (reset != 0) {
  744		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  745			   np->port, reset);
  746		return -ENODEV;
  747	}
  748
  749	return 0;
  750}
  751
  752static int serdes_init_10g(struct niu *np)
  753{
  754	struct niu_link_config *lp = &np->link_config;
  755	unsigned long ctrl_reg, test_cfg_reg, i;
  756	u64 ctrl_val, test_cfg_val, sig, mask, val;
  757	int err;
  758
  759	switch (np->port) {
  760	case 0:
  761		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  762		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  763		break;
  764	case 1:
  765		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  766		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  767		break;
  768
  769	default:
  770		return -EINVAL;
  771	}
  772	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  773		    ENET_SERDES_CTRL_SDET_1 |
  774		    ENET_SERDES_CTRL_SDET_2 |
  775		    ENET_SERDES_CTRL_SDET_3 |
  776		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  777		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  780		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  781		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  784	test_cfg_val = 0;
  785
  786	if (lp->loopback_mode == LOOPBACK_PHY) {
  787		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  788				  ENET_SERDES_TEST_MD_0_SHIFT) |
  789				 (ENET_TEST_MD_PAD_LOOPBACK <<
  790				  ENET_SERDES_TEST_MD_1_SHIFT) |
  791				 (ENET_TEST_MD_PAD_LOOPBACK <<
  792				  ENET_SERDES_TEST_MD_2_SHIFT) |
  793				 (ENET_TEST_MD_PAD_LOOPBACK <<
  794				  ENET_SERDES_TEST_MD_3_SHIFT));
  795	}
  796
  797	nw64(ctrl_reg, ctrl_val);
  798	nw64(test_cfg_reg, test_cfg_val);
  799
  800	/* Initialize all 4 lanes of the SERDES.  */
  801	for (i = 0; i < 4; i++) {
  802		u32 rxtx_ctrl, glue0;
  803
  804		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  805		if (err)
  806			return err;
  807		err = esr_read_glue0(np, i, &glue0);
  808		if (err)
  809			return err;
  810
  811		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  812		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  813			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  814
  815		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  816			   ESR_GLUE_CTRL0_THCNT |
  817			   ESR_GLUE_CTRL0_BLTIME);
  818		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  819			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  820			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  821			  (BLTIME_300_CYCLES <<
  822			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  823
  824		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  825		if (err)
  826			return err;
  827		err = esr_write_glue0(np, i, glue0);
  828		if (err)
  829			return err;
  830	}
  831
  832	err = esr_reset(np);
  833	if (err)
  834		return err;
  835
  836	sig = nr64(ESR_INT_SIGNALS);
  837	switch (np->port) {
  838	case 0:
  839		mask = ESR_INT_SIGNALS_P0_BITS;
  840		val = (ESR_INT_SRDY0_P0 |
  841		       ESR_INT_DET0_P0 |
  842		       ESR_INT_XSRDY_P0 |
  843		       ESR_INT_XDP_P0_CH3 |
  844		       ESR_INT_XDP_P0_CH2 |
  845		       ESR_INT_XDP_P0_CH1 |
  846		       ESR_INT_XDP_P0_CH0);
  847		break;
  848
  849	case 1:
  850		mask = ESR_INT_SIGNALS_P1_BITS;
  851		val = (ESR_INT_SRDY0_P1 |
  852		       ESR_INT_DET0_P1 |
  853		       ESR_INT_XSRDY_P1 |
  854		       ESR_INT_XDP_P1_CH3 |
  855		       ESR_INT_XDP_P1_CH2 |
  856		       ESR_INT_XDP_P1_CH1 |
  857		       ESR_INT_XDP_P1_CH0);
  858		break;
  859
  860	default:
  861		return -EINVAL;
  862	}
  863
  864	if ((sig & mask) != val) {
  865		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  866			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  867			return 0;
  868		}
  869		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  870			   np->port, (int)(sig & mask), (int)val);
  871		return -ENODEV;
  872	}
  873	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  874		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  875	return 0;
  876}
  877
  878static int serdes_init_1g(struct niu *np)
  879{
  880	u64 val;
  881
  882	val = nr64(ENET_SERDES_1_PLL_CFG);
  883	val &= ~ENET_SERDES_PLL_FBDIV2;
  884	switch (np->port) {
  885	case 0:
  886		val |= ENET_SERDES_PLL_HRATE0;
  887		break;
  888	case 1:
  889		val |= ENET_SERDES_PLL_HRATE1;
  890		break;
  891	case 2:
  892		val |= ENET_SERDES_PLL_HRATE2;
  893		break;
  894	case 3:
  895		val |= ENET_SERDES_PLL_HRATE3;
  896		break;
  897	default:
  898		return -EINVAL;
  899	}
  900	nw64(ENET_SERDES_1_PLL_CFG, val);
  901
  902	return 0;
  903}
  904
  905static int serdes_init_1g_serdes(struct niu *np)
  906{
  907	struct niu_link_config *lp = &np->link_config;
  908	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  909	u64 ctrl_val, test_cfg_val, sig, mask, val;
  910	int err;
  911	u64 reset_val, val_rd;
  912
  913	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  914		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  915		ENET_SERDES_PLL_FBDIV0;
  916	switch (np->port) {
  917	case 0:
  918		reset_val =  ENET_SERDES_RESET_0;
  919		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  920		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  921		pll_cfg = ENET_SERDES_0_PLL_CFG;
  922		break;
  923	case 1:
  924		reset_val =  ENET_SERDES_RESET_1;
  925		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  926		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  927		pll_cfg = ENET_SERDES_1_PLL_CFG;
  928		break;
  929
  930	default:
  931		return -EINVAL;
  932	}
  933	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  934		    ENET_SERDES_CTRL_SDET_1 |
  935		    ENET_SERDES_CTRL_SDET_2 |
  936		    ENET_SERDES_CTRL_SDET_3 |
  937		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  938		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  941		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  942		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  945	test_cfg_val = 0;
  946
  947	if (lp->loopback_mode == LOOPBACK_PHY) {
  948		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  949				  ENET_SERDES_TEST_MD_0_SHIFT) |
  950				 (ENET_TEST_MD_PAD_LOOPBACK <<
  951				  ENET_SERDES_TEST_MD_1_SHIFT) |
  952				 (ENET_TEST_MD_PAD_LOOPBACK <<
  953				  ENET_SERDES_TEST_MD_2_SHIFT) |
  954				 (ENET_TEST_MD_PAD_LOOPBACK <<
  955				  ENET_SERDES_TEST_MD_3_SHIFT));
  956	}
  957
  958	nw64(ENET_SERDES_RESET, reset_val);
  959	mdelay(20);
  960	val_rd = nr64(ENET_SERDES_RESET);
  961	val_rd &= ~reset_val;
  962	nw64(pll_cfg, val);
  963	nw64(ctrl_reg, ctrl_val);
  964	nw64(test_cfg_reg, test_cfg_val);
  965	nw64(ENET_SERDES_RESET, val_rd);
  966	mdelay(2000);
  967
  968	/* Initialize all 4 lanes of the SERDES.  */
  969	for (i = 0; i < 4; i++) {
  970		u32 rxtx_ctrl, glue0;
  971
  972		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  973		if (err)
  974			return err;
  975		err = esr_read_glue0(np, i, &glue0);
  976		if (err)
  977			return err;
  978
  979		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  980		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  981			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  982
  983		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  984			   ESR_GLUE_CTRL0_THCNT |
  985			   ESR_GLUE_CTRL0_BLTIME);
  986		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  987			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  988			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  989			  (BLTIME_300_CYCLES <<
  990			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  991
  992		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  993		if (err)
  994			return err;
  995		err = esr_write_glue0(np, i, glue0);
  996		if (err)
  997			return err;
  998	}
  999
 1000
 1001	sig = nr64(ESR_INT_SIGNALS);
 1002	switch (np->port) {
 1003	case 0:
 1004		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1005		mask = val;
 1006		break;
 1007
 1008	case 1:
 1009		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1010		mask = val;
 1011		break;
 1012
 1013	default:
 1014		return -EINVAL;
 1015	}
 1016
 1017	if ((sig & mask) != val) {
 1018		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1019			   np->port, (int)(sig & mask), (int)val);
 1020		return -ENODEV;
 1021	}
 1022
 1023	return 0;
 1024}
 1025
 1026static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1027{
 1028	struct niu_link_config *lp = &np->link_config;
 1029	int link_up;
 1030	u64 val;
 1031	u16 current_speed;
 1032	unsigned long flags;
 1033	u8 current_duplex;
 1034
 1035	link_up = 0;
 1036	current_speed = SPEED_INVALID;
 1037	current_duplex = DUPLEX_INVALID;
 1038
 1039	spin_lock_irqsave(&np->lock, flags);
 1040
 1041	val = nr64_pcs(PCS_MII_STAT);
 1042
 1043	if (val & PCS_MII_STAT_LINK_STATUS) {
 1044		link_up = 1;
 1045		current_speed = SPEED_1000;
 1046		current_duplex = DUPLEX_FULL;
 1047	}
 1048
 1049	lp->active_speed = current_speed;
 1050	lp->active_duplex = current_duplex;
 1051	spin_unlock_irqrestore(&np->lock, flags);
 1052
 1053	*link_up_p = link_up;
 1054	return 0;
 1055}
 1056
 1057static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1058{
 1059	unsigned long flags;
 1060	struct niu_link_config *lp = &np->link_config;
 1061	int link_up = 0;
 1062	int link_ok = 1;
 1063	u64 val, val2;
 1064	u16 current_speed;
 1065	u8 current_duplex;
 1066
 1067	if (!(np->flags & NIU_FLAGS_10G))
 1068		return link_status_1g_serdes(np, link_up_p);
 1069
 1070	current_speed = SPEED_INVALID;
 1071	current_duplex = DUPLEX_INVALID;
 1072	spin_lock_irqsave(&np->lock, flags);
 1073
 1074	val = nr64_xpcs(XPCS_STATUS(0));
 1075	val2 = nr64_mac(XMAC_INTER2);
 1076	if (val2 & 0x01000000)
 1077		link_ok = 0;
 1078
 1079	if ((val & 0x1000ULL) && link_ok) {
 1080		link_up = 1;
 1081		current_speed = SPEED_10000;
 1082		current_duplex = DUPLEX_FULL;
 1083	}
 1084	lp->active_speed = current_speed;
 1085	lp->active_duplex = current_duplex;
 1086	spin_unlock_irqrestore(&np->lock, flags);
 1087	*link_up_p = link_up;
 1088	return 0;
 1089}
 1090
 1091static int link_status_mii(struct niu *np, int *link_up_p)
 1092{
 1093	struct niu_link_config *lp = &np->link_config;
 1094	int err;
 1095	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1096	int supported, advertising, active_speed, active_duplex;
 1097
 1098	err = mii_read(np, np->phy_addr, MII_BMCR);
 1099	if (unlikely(err < 0))
 1100		return err;
 1101	bmcr = err;
 1102
 1103	err = mii_read(np, np->phy_addr, MII_BMSR);
 1104	if (unlikely(err < 0))
 1105		return err;
 1106	bmsr = err;
 1107
 1108	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1109	if (unlikely(err < 0))
 1110		return err;
 1111	advert = err;
 1112
 1113	err = mii_read(np, np->phy_addr, MII_LPA);
 1114	if (unlikely(err < 0))
 1115		return err;
 1116	lpa = err;
 1117
 1118	if (likely(bmsr & BMSR_ESTATEN)) {
 1119		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1120		if (unlikely(err < 0))
 1121			return err;
 1122		estatus = err;
 1123
 1124		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1125		if (unlikely(err < 0))
 1126			return err;
 1127		ctrl1000 = err;
 1128
 1129		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1130		if (unlikely(err < 0))
 1131			return err;
 1132		stat1000 = err;
 1133	} else
 1134		estatus = ctrl1000 = stat1000 = 0;
 1135
 1136	supported = 0;
 1137	if (bmsr & BMSR_ANEGCAPABLE)
 1138		supported |= SUPPORTED_Autoneg;
 1139	if (bmsr & BMSR_10HALF)
 1140		supported |= SUPPORTED_10baseT_Half;
 1141	if (bmsr & BMSR_10FULL)
 1142		supported |= SUPPORTED_10baseT_Full;
 1143	if (bmsr & BMSR_100HALF)
 1144		supported |= SUPPORTED_100baseT_Half;
 1145	if (bmsr & BMSR_100FULL)
 1146		supported |= SUPPORTED_100baseT_Full;
 1147	if (estatus & ESTATUS_1000_THALF)
 1148		supported |= SUPPORTED_1000baseT_Half;
 1149	if (estatus & ESTATUS_1000_TFULL)
 1150		supported |= SUPPORTED_1000baseT_Full;
 1151	lp->supported = supported;
 1152
 1153	advertising = 0;
 1154	if (advert & ADVERTISE_10HALF)
 1155		advertising |= ADVERTISED_10baseT_Half;
 1156	if (advert & ADVERTISE_10FULL)
 1157		advertising |= ADVERTISED_10baseT_Full;
 1158	if (advert & ADVERTISE_100HALF)
 1159		advertising |= ADVERTISED_100baseT_Half;
 1160	if (advert & ADVERTISE_100FULL)
 1161		advertising |= ADVERTISED_100baseT_Full;
 1162	if (ctrl1000 & ADVERTISE_1000HALF)
 1163		advertising |= ADVERTISED_1000baseT_Half;
 1164	if (ctrl1000 & ADVERTISE_1000FULL)
 1165		advertising |= ADVERTISED_1000baseT_Full;
 1166
 1167	if (bmcr & BMCR_ANENABLE) {
 1168		int neg, neg1000;
 1169
 1170		lp->active_autoneg = 1;
 1171		advertising |= ADVERTISED_Autoneg;
 1172
 1173		neg = advert & lpa;
 1174		neg1000 = (ctrl1000 << 2) & stat1000;
 1175
 1176		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1177			active_speed = SPEED_1000;
 1178		else if (neg & LPA_100)
 1179			active_speed = SPEED_100;
 1180		else if (neg & (LPA_10HALF | LPA_10FULL))
 1181			active_speed = SPEED_10;
 1182		else
 1183			active_speed = SPEED_INVALID;
 1184
 1185		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1186			active_duplex = DUPLEX_FULL;
 1187		else if (active_speed != SPEED_INVALID)
 1188			active_duplex = DUPLEX_HALF;
 1189		else
 1190			active_duplex = DUPLEX_INVALID;
 1191	} else {
 1192		lp->active_autoneg = 0;
 1193
 1194		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1195			active_speed = SPEED_1000;
 1196		else if (bmcr & BMCR_SPEED100)
 1197			active_speed = SPEED_100;
 1198		else
 1199			active_speed = SPEED_10;
 1200
 1201		if (bmcr & BMCR_FULLDPLX)
 1202			active_duplex = DUPLEX_FULL;
 1203		else
 1204			active_duplex = DUPLEX_HALF;
 1205	}
 1206
 1207	lp->active_advertising = advertising;
 1208	lp->active_speed = active_speed;
 1209	lp->active_duplex = active_duplex;
 1210	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1211
 1212	return 0;
 1213}
 1214
 1215static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1216{
 1217	struct niu_link_config *lp = &np->link_config;
 1218	u16 current_speed, bmsr;
 1219	unsigned long flags;
 1220	u8 current_duplex;
 1221	int err, link_up;
 1222
 1223	link_up = 0;
 1224	current_speed = SPEED_INVALID;
 1225	current_duplex = DUPLEX_INVALID;
 1226
 1227	spin_lock_irqsave(&np->lock, flags);
 1228
 1229	err = -EINVAL;
 1230
 1231	err = mii_read(np, np->phy_addr, MII_BMSR);
 1232	if (err < 0)
 1233		goto out;
 1234
 1235	bmsr = err;
 1236	if (bmsr & BMSR_LSTATUS) {
 1237		u16 adv, lpa;
 1238
 1239		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1240		if (err < 0)
 1241			goto out;
 1242		adv = err;
 1243
 1244		err = mii_read(np, np->phy_addr, MII_LPA);
 1245		if (err < 0)
 1246			goto out;
 1247		lpa = err;
 1248
 1249		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1250		if (err < 0)
 1251			goto out;
 1252		link_up = 1;
 1253		current_speed = SPEED_1000;
 1254		current_duplex = DUPLEX_FULL;
 1255
 1256	}
 1257	lp->active_speed = current_speed;
 1258	lp->active_duplex = current_duplex;
 1259	err = 0;
 1260
 1261out:
 1262	spin_unlock_irqrestore(&np->lock, flags);
 1263
 1264	*link_up_p = link_up;
 1265	return err;
 1266}
 1267
 1268static int link_status_1g(struct niu *np, int *link_up_p)
 1269{
 1270	struct niu_link_config *lp = &np->link_config;
 1271	unsigned long flags;
 1272	int err;
 1273
 1274	spin_lock_irqsave(&np->lock, flags);
 1275
 1276	err = link_status_mii(np, link_up_p);
 1277	lp->supported |= SUPPORTED_TP;
 1278	lp->active_advertising |= ADVERTISED_TP;
 1279
 1280	spin_unlock_irqrestore(&np->lock, flags);
 1281	return err;
 1282}
 1283
 1284static int bcm8704_reset(struct niu *np)
 1285{
 1286	int err, limit;
 1287
 1288	err = mdio_read(np, np->phy_addr,
 1289			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1290	if (err < 0 || err == 0xffff)
 1291		return err;
 1292	err |= BMCR_RESET;
 1293	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1294			 MII_BMCR, err);
 1295	if (err)
 1296		return err;
 1297
 1298	limit = 1000;
 1299	while (--limit >= 0) {
 1300		err = mdio_read(np, np->phy_addr,
 1301				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1302		if (err < 0)
 1303			return err;
 1304		if (!(err & BMCR_RESET))
 1305			break;
 1306	}
 1307	if (limit < 0) {
 1308		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1309			   np->port, (err & 0xffff));
 1310		return -ENODEV;
 1311	}
 1312	return 0;
 1313}
 1314
 1315/* When written, certain PHY registers need to be read back twice
 1316 * in order for the bits to settle properly.
 1317 */
 1318static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1319{
 1320	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1321	if (err < 0)
 1322		return err;
 1323	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1324	if (err < 0)
 1325		return err;
 1326	return 0;
 1327}
 1328
 1329static int bcm8706_init_user_dev3(struct niu *np)
 1330{
 1331	int err;
 1332
 1333
 1334	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1335			BCM8704_USER_OPT_DIGITAL_CTRL);
 1336	if (err < 0)
 1337		return err;
 1338	err &= ~USER_ODIG_CTRL_GPIOS;
 1339	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1340	err |=  USER_ODIG_CTRL_RESV2;
 1341	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1342			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1343	if (err)
 1344		return err;
 1345
 1346	mdelay(1000);
 1347
 1348	return 0;
 1349}
 1350
 1351static int bcm8704_init_user_dev3(struct niu *np)
 1352{
 1353	int err;
 1354
 1355	err = mdio_write(np, np->phy_addr,
 1356			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1357			 (USER_CONTROL_OPTXRST_LVL |
 1358			  USER_CONTROL_OPBIASFLT_LVL |
 1359			  USER_CONTROL_OBTMPFLT_LVL |
 1360			  USER_CONTROL_OPPRFLT_LVL |
 1361			  USER_CONTROL_OPTXFLT_LVL |
 1362			  USER_CONTROL_OPRXLOS_LVL |
 1363			  USER_CONTROL_OPRXFLT_LVL |
 1364			  USER_CONTROL_OPTXON_LVL |
 1365			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1366	if (err)
 1367		return err;
 1368
 1369	err = mdio_write(np, np->phy_addr,
 1370			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1371			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1372			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1373			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1374			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1375	if (err)
 1376		return err;
 1377
 1378	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1379	if (err)
 1380		return err;
 1381	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1382	if (err)
 1383		return err;
 1384
 1385	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1386			BCM8704_USER_OPT_DIGITAL_CTRL);
 1387	if (err < 0)
 1388		return err;
 1389	err &= ~USER_ODIG_CTRL_GPIOS;
 1390	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1391	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1392			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1393	if (err)
 1394		return err;
 1395
 1396	mdelay(1000);
 1397
 1398	return 0;
 1399}
 1400
 1401static int mrvl88x2011_act_led(struct niu *np, int val)
 1402{
 1403	int	err;
 1404
 1405	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1406		MRVL88X2011_LED_8_TO_11_CTL);
 1407	if (err < 0)
 1408		return err;
 1409
 1410	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1411	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1412
 1413	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1414			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1415}
 1416
 1417static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1418{
 1419	int	err;
 1420
 1421	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1422			MRVL88X2011_LED_BLINK_CTL);
 1423	if (err >= 0) {
 1424		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1425		err |= (rate << 4);
 1426
 1427		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1428				 MRVL88X2011_LED_BLINK_CTL, err);
 1429	}
 1430
 1431	return err;
 1432}
 1433
 1434static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1435{
 1436	int	err;
 1437
 1438	/* Set LED functions */
 1439	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1440	if (err)
 1441		return err;
 1442
 1443	/* led activity */
 1444	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1445	if (err)
 1446		return err;
 1447
 1448	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1449			MRVL88X2011_GENERAL_CTL);
 1450	if (err < 0)
 1451		return err;
 1452
 1453	err |= MRVL88X2011_ENA_XFPREFCLK;
 1454
 1455	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1456			 MRVL88X2011_GENERAL_CTL, err);
 1457	if (err < 0)
 1458		return err;
 1459
 1460	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1461			MRVL88X2011_PMA_PMD_CTL_1);
 1462	if (err < 0)
 1463		return err;
 1464
 1465	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1466		err |= MRVL88X2011_LOOPBACK;
 1467	else
 1468		err &= ~MRVL88X2011_LOOPBACK;
 1469
 1470	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1471			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1472	if (err < 0)
 1473		return err;
 1474
 1475	/* Enable PMD  */
 1476	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1477			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1478}
 1479
 1480
 1481static int xcvr_diag_bcm870x(struct niu *np)
 1482{
 1483	u16 analog_stat0, tx_alarm_status;
 1484	int err = 0;
 1485
 1486#if 1
 1487	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1488			MII_STAT1000);
 1489	if (err < 0)
 1490		return err;
 1491	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1492
 1493	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1494	if (err < 0)
 1495		return err;
 1496	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1497
 1498	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1499			MII_NWAYTEST);
 1500	if (err < 0)
 1501		return err;
 1502	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1503#endif
 1504
 1505	/* XXX dig this out it might not be so useful XXX */
 1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1507			BCM8704_USER_ANALOG_STATUS0);
 1508	if (err < 0)
 1509		return err;
 1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1511			BCM8704_USER_ANALOG_STATUS0);
 1512	if (err < 0)
 1513		return err;
 1514	analog_stat0 = err;
 1515
 1516	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1517			BCM8704_USER_TX_ALARM_STATUS);
 1518	if (err < 0)
 1519		return err;
 1520	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1521			BCM8704_USER_TX_ALARM_STATUS);
 1522	if (err < 0)
 1523		return err;
 1524	tx_alarm_status = err;
 1525
 1526	if (analog_stat0 != 0x03fc) {
 1527		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1528			pr_info("Port %u cable not connected or bad cable\n",
 1529				np->port);
 1530		} else if (analog_stat0 == 0x639c) {
 1531			pr_info("Port %u optical module is bad or missing\n",
 1532				np->port);
 1533		}
 1534	}
 1535
 1536	return 0;
 1537}
 1538
 1539static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1540{
 1541	struct niu_link_config *lp = &np->link_config;
 1542	int err;
 1543
 1544	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1545			MII_BMCR);
 1546	if (err < 0)
 1547		return err;
 1548
 1549	err &= ~BMCR_LOOPBACK;
 1550
 1551	if (lp->loopback_mode == LOOPBACK_MAC)
 1552		err |= BMCR_LOOPBACK;
 1553
 1554	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1555			 MII_BMCR, err);
 1556	if (err)
 1557		return err;
 1558
 1559	return 0;
 1560}
 1561
 1562static int xcvr_init_10g_bcm8706(struct niu *np)
 1563{
 1564	int err = 0;
 1565	u64 val;
 1566
 1567	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1568	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1569			return err;
 1570
 1571	val = nr64_mac(XMAC_CONFIG);
 1572	val &= ~XMAC_CONFIG_LED_POLARITY;
 1573	val |= XMAC_CONFIG_FORCE_LED_ON;
 1574	nw64_mac(XMAC_CONFIG, val);
 1575
 1576	val = nr64(MIF_CONFIG);
 1577	val |= MIF_CONFIG_INDIRECT_MODE;
 1578	nw64(MIF_CONFIG, val);
 1579
 1580	err = bcm8704_reset(np);
 1581	if (err)
 1582		return err;
 1583
 1584	err = xcvr_10g_set_lb_bcm870x(np);
 1585	if (err)
 1586		return err;
 1587
 1588	err = bcm8706_init_user_dev3(np);
 1589	if (err)
 1590		return err;
 1591
 1592	err = xcvr_diag_bcm870x(np);
 1593	if (err)
 1594		return err;
 1595
 1596	return 0;
 1597}
 1598
 1599static int xcvr_init_10g_bcm8704(struct niu *np)
 1600{
 1601	int err;
 1602
 1603	err = bcm8704_reset(np);
 1604	if (err)
 1605		return err;
 1606
 1607	err = bcm8704_init_user_dev3(np);
 1608	if (err)
 1609		return err;
 1610
 1611	err = xcvr_10g_set_lb_bcm870x(np);
 1612	if (err)
 1613		return err;
 1614
 1615	err =  xcvr_diag_bcm870x(np);
 1616	if (err)
 1617		return err;
 1618
 1619	return 0;
 1620}
 1621
 1622static int xcvr_init_10g(struct niu *np)
 1623{
 1624	int phy_id, err;
 1625	u64 val;
 1626
 1627	val = nr64_mac(XMAC_CONFIG);
 1628	val &= ~XMAC_CONFIG_LED_POLARITY;
 1629	val |= XMAC_CONFIG_FORCE_LED_ON;
 1630	nw64_mac(XMAC_CONFIG, val);
 1631
 1632	/* XXX shared resource, lock parent XXX */
 1633	val = nr64(MIF_CONFIG);
 1634	val |= MIF_CONFIG_INDIRECT_MODE;
 1635	nw64(MIF_CONFIG, val);
 1636
 1637	phy_id = phy_decode(np->parent->port_phy, np->port);
 1638	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1639
 1640	/* handle different phy types */
 1641	switch (phy_id & NIU_PHY_ID_MASK) {
 1642	case NIU_PHY_ID_MRVL88X2011:
 1643		err = xcvr_init_10g_mrvl88x2011(np);
 1644		break;
 1645
 1646	default: /* bcom 8704 */
 1647		err = xcvr_init_10g_bcm8704(np);
 1648		break;
 1649	}
 1650
 1651	return err;
 1652}
 1653
 1654static int mii_reset(struct niu *np)
 1655{
 1656	int limit, err;
 1657
 1658	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1659	if (err)
 1660		return err;
 1661
 1662	limit = 1000;
 1663	while (--limit >= 0) {
 1664		udelay(500);
 1665		err = mii_read(np, np->phy_addr, MII_BMCR);
 1666		if (err < 0)
 1667			return err;
 1668		if (!(err & BMCR_RESET))
 1669			break;
 1670	}
 1671	if (limit < 0) {
 1672		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1673			   np->port, err);
 1674		return -ENODEV;
 1675	}
 1676
 1677	return 0;
 1678}
 1679
 1680static int xcvr_init_1g_rgmii(struct niu *np)
 1681{
 1682	int err;
 1683	u64 val;
 1684	u16 bmcr, bmsr, estat;
 1685
 1686	val = nr64(MIF_CONFIG);
 1687	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1688	nw64(MIF_CONFIG, val);
 1689
 1690	err = mii_reset(np);
 1691	if (err)
 1692		return err;
 1693
 1694	err = mii_read(np, np->phy_addr, MII_BMSR);
 1695	if (err < 0)
 1696		return err;
 1697	bmsr = err;
 1698
 1699	estat = 0;
 1700	if (bmsr & BMSR_ESTATEN) {
 1701		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1702		if (err < 0)
 1703			return err;
 1704		estat = err;
 1705	}
 1706
 1707	bmcr = 0;
 1708	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1709	if (err)
 1710		return err;
 1711
 1712	if (bmsr & BMSR_ESTATEN) {
 1713		u16 ctrl1000 = 0;
 1714
 1715		if (estat & ESTATUS_1000_TFULL)
 1716			ctrl1000 |= ADVERTISE_1000FULL;
 1717		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1718		if (err)
 1719			return err;
 1720	}
 1721
 1722	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1723
 1724	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1725	if (err)
 1726		return err;
 1727
 1728	err = mii_read(np, np->phy_addr, MII_BMCR);
 1729	if (err < 0)
 1730		return err;
 1731	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1732
 1733	err = mii_read(np, np->phy_addr, MII_BMSR);
 1734	if (err < 0)
 1735		return err;
 1736
 1737	return 0;
 1738}
 1739
 1740static int mii_init_common(struct niu *np)
 1741{
 1742	struct niu_link_config *lp = &np->link_config;
 1743	u16 bmcr, bmsr, adv, estat;
 1744	int err;
 1745
 1746	err = mii_reset(np);
 1747	if (err)
 1748		return err;
 1749
 1750	err = mii_read(np, np->phy_addr, MII_BMSR);
 1751	if (err < 0)
 1752		return err;
 1753	bmsr = err;
 1754
 1755	estat = 0;
 1756	if (bmsr & BMSR_ESTATEN) {
 1757		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1758		if (err < 0)
 1759			return err;
 1760		estat = err;
 1761	}
 1762
 1763	bmcr = 0;
 1764	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1765	if (err)
 1766		return err;
 1767
 1768	if (lp->loopback_mode == LOOPBACK_MAC) {
 1769		bmcr |= BMCR_LOOPBACK;
 1770		if (lp->active_speed == SPEED_1000)
 1771			bmcr |= BMCR_SPEED1000;
 1772		if (lp->active_duplex == DUPLEX_FULL)
 1773			bmcr |= BMCR_FULLDPLX;
 1774	}
 1775
 1776	if (lp->loopback_mode == LOOPBACK_PHY) {
 1777		u16 aux;
 1778
 1779		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1780		       BCM5464R_AUX_CTL_WRITE_1);
 1781		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1782		if (err)
 1783			return err;
 1784	}
 1785
 1786	if (lp->autoneg) {
 1787		u16 ctrl1000;
 1788
 1789		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1790		if ((bmsr & BMSR_10HALF) &&
 1791			(lp->advertising & ADVERTISED_10baseT_Half))
 1792			adv |= ADVERTISE_10HALF;
 1793		if ((bmsr & BMSR_10FULL) &&
 1794			(lp->advertising & ADVERTISED_10baseT_Full))
 1795			adv |= ADVERTISE_10FULL;
 1796		if ((bmsr & BMSR_100HALF) &&
 1797			(lp->advertising & ADVERTISED_100baseT_Half))
 1798			adv |= ADVERTISE_100HALF;
 1799		if ((bmsr & BMSR_100FULL) &&
 1800			(lp->advertising & ADVERTISED_100baseT_Full))
 1801			adv |= ADVERTISE_100FULL;
 1802		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1803		if (err)
 1804			return err;
 1805
 1806		if (likely(bmsr & BMSR_ESTATEN)) {
 1807			ctrl1000 = 0;
 1808			if ((estat & ESTATUS_1000_THALF) &&
 1809				(lp->advertising & ADVERTISED_1000baseT_Half))
 1810				ctrl1000 |= ADVERTISE_1000HALF;
 1811			if ((estat & ESTATUS_1000_TFULL) &&
 1812				(lp->advertising & ADVERTISED_1000baseT_Full))
 1813				ctrl1000 |= ADVERTISE_1000FULL;
 1814			err = mii_write(np, np->phy_addr,
 1815					MII_CTRL1000, ctrl1000);
 1816			if (err)
 1817				return err;
 1818		}
 1819
 1820		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1821	} else {
 1822		/* !lp->autoneg */
 1823		int fulldpx;
 1824
 1825		if (lp->duplex == DUPLEX_FULL) {
 1826			bmcr |= BMCR_FULLDPLX;
 1827			fulldpx = 1;
 1828		} else if (lp->duplex == DUPLEX_HALF)
 1829			fulldpx = 0;
 1830		else
 1831			return -EINVAL;
 1832
 1833		if (lp->speed == SPEED_1000) {
 1834			/* if X-full requested while not supported, or
 1835			   X-half requested while not supported... */
 1836			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1837				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1838				return -EINVAL;
 1839			bmcr |= BMCR_SPEED1000;
 1840		} else if (lp->speed == SPEED_100) {
 1841			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1842				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1843				return -EINVAL;
 1844			bmcr |= BMCR_SPEED100;
 1845		} else if (lp->speed == SPEED_10) {
 1846			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1847				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1848				return -EINVAL;
 1849		} else
 1850			return -EINVAL;
 1851	}
 1852
 1853	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1854	if (err)
 1855		return err;
 1856
 1857#if 0
 1858	err = mii_read(np, np->phy_addr, MII_BMCR);
 1859	if (err < 0)
 1860		return err;
 1861	bmcr = err;
 1862
 1863	err = mii_read(np, np->phy_addr, MII_BMSR);
 1864	if (err < 0)
 1865		return err;
 1866	bmsr = err;
 1867
 1868	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1869		np->port, bmcr, bmsr);
 1870#endif
 1871
 1872	return 0;
 1873}
 1874
 1875static int xcvr_init_1g(struct niu *np)
 1876{
 1877	u64 val;
 1878
 1879	/* XXX shared resource, lock parent XXX */
 1880	val = nr64(MIF_CONFIG);
 1881	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1882	nw64(MIF_CONFIG, val);
 1883
 1884	return mii_init_common(np);
 1885}
 1886
 1887static int niu_xcvr_init(struct niu *np)
 1888{
 1889	const struct niu_phy_ops *ops = np->phy_ops;
 1890	int err;
 1891
 1892	err = 0;
 1893	if (ops->xcvr_init)
 1894		err = ops->xcvr_init(np);
 1895
 1896	return err;
 1897}
 1898
 1899static int niu_serdes_init(struct niu *np)
 1900{
 1901	const struct niu_phy_ops *ops = np->phy_ops;
 1902	int err;
 1903
 1904	err = 0;
 1905	if (ops->serdes_init)
 1906		err = ops->serdes_init(np);
 1907
 1908	return err;
 1909}
 1910
 1911static void niu_init_xif(struct niu *);
 1912static void niu_handle_led(struct niu *, int status);
 1913
 1914static int niu_link_status_common(struct niu *np, int link_up)
 1915{
 1916	struct niu_link_config *lp = &np->link_config;
 1917	struct net_device *dev = np->dev;
 1918	unsigned long flags;
 1919
 1920	if (!netif_carrier_ok(dev) && link_up) {
 1921		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1922			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1923			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1924			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1925			   "10Mbit/sec",
 1926			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1927
 1928		spin_lock_irqsave(&np->lock, flags);
 1929		niu_init_xif(np);
 1930		niu_handle_led(np, 1);
 1931		spin_unlock_irqrestore(&np->lock, flags);
 1932
 1933		netif_carrier_on(dev);
 1934	} else if (netif_carrier_ok(dev) && !link_up) {
 1935		netif_warn(np, link, dev, "Link is down\n");
 1936		spin_lock_irqsave(&np->lock, flags);
 1937		niu_handle_led(np, 0);
 1938		spin_unlock_irqrestore(&np->lock, flags);
 1939		netif_carrier_off(dev);
 1940	}
 1941
 1942	return 0;
 1943}
 1944
 1945static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1946{
 1947	int err, link_up, pma_status, pcs_status;
 1948
 1949	link_up = 0;
 1950
 1951	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1952			MRVL88X2011_10G_PMD_STATUS_2);
 1953	if (err < 0)
 1954		goto out;
 1955
 1956	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1957	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1958			MRVL88X2011_PMA_PMD_STATUS_1);
 1959	if (err < 0)
 1960		goto out;
 1961
 1962	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1963
 1964        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1965	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1966			MRVL88X2011_PMA_PMD_STATUS_1);
 1967	if (err < 0)
 1968		goto out;
 1969
 1970	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1971			MRVL88X2011_PMA_PMD_STATUS_1);
 1972	if (err < 0)
 1973		goto out;
 1974
 1975	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1976
 1977        /* Check XGXS Register : 4.0018.[0-3,12] */
 1978	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1979			MRVL88X2011_10G_XGXS_LANE_STAT);
 1980	if (err < 0)
 1981		goto out;
 1982
 1983	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1984		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1985		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1986		    0x800))
 1987		link_up = (pma_status && pcs_status) ? 1 : 0;
 1988
 1989	np->link_config.active_speed = SPEED_10000;
 1990	np->link_config.active_duplex = DUPLEX_FULL;
 1991	err = 0;
 1992out:
 1993	mrvl88x2011_act_led(np, (link_up ?
 1994				 MRVL88X2011_LED_CTL_PCS_ACT :
 1995				 MRVL88X2011_LED_CTL_OFF));
 1996
 1997	*link_up_p = link_up;
 1998	return err;
 1999}
 2000
 2001static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 2002{
 2003	int err, link_up;
 2004	link_up = 0;
 2005
 2006	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2007			BCM8704_PMD_RCV_SIGDET);
 2008	if (err < 0 || err == 0xffff)
 2009		goto out;
 2010	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2011		err = 0;
 2012		goto out;
 2013	}
 2014
 2015	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2016			BCM8704_PCS_10G_R_STATUS);
 2017	if (err < 0)
 2018		goto out;
 2019
 2020	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2021		err = 0;
 2022		goto out;
 2023	}
 2024
 2025	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2026			BCM8704_PHYXS_XGXS_LANE_STAT);
 2027	if (err < 0)
 2028		goto out;
 2029	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2030		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2031		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2032		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2033		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2034		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2035		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2036		err = 0;
 2037		np->link_config.active_speed = SPEED_INVALID;
 2038		np->link_config.active_duplex = DUPLEX_INVALID;
 2039		goto out;
 2040	}
 2041
 2042	link_up = 1;
 2043	np->link_config.active_speed = SPEED_10000;
 2044	np->link_config.active_duplex = DUPLEX_FULL;
 2045	err = 0;
 2046
 2047out:
 2048	*link_up_p = link_up;
 2049	return err;
 2050}
 2051
 2052static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2053{
 2054	int err, link_up;
 2055
 2056	link_up = 0;
 2057
 2058	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2059			BCM8704_PMD_RCV_SIGDET);
 2060	if (err < 0)
 2061		goto out;
 2062	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2063		err = 0;
 2064		goto out;
 2065	}
 2066
 2067	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2068			BCM8704_PCS_10G_R_STATUS);
 2069	if (err < 0)
 2070		goto out;
 2071	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2072		err = 0;
 2073		goto out;
 2074	}
 2075
 2076	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2077			BCM8704_PHYXS_XGXS_LANE_STAT);
 2078	if (err < 0)
 2079		goto out;
 2080
 2081	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2082		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2083		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2084		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2085		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2086		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2087		err = 0;
 2088		goto out;
 2089	}
 2090
 2091	link_up = 1;
 2092	np->link_config.active_speed = SPEED_10000;
 2093	np->link_config.active_duplex = DUPLEX_FULL;
 2094	err = 0;
 2095
 2096out:
 2097	*link_up_p = link_up;
 2098	return err;
 2099}
 2100
 2101static int link_status_10g(struct niu *np, int *link_up_p)
 2102{
 2103	unsigned long flags;
 2104	int err = -EINVAL;
 2105
 2106	spin_lock_irqsave(&np->lock, flags);
 2107
 2108	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2109		int phy_id;
 2110
 2111		phy_id = phy_decode(np->parent->port_phy, np->port);
 2112		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2113
 2114		/* handle different phy types */
 2115		switch (phy_id & NIU_PHY_ID_MASK) {
 2116		case NIU_PHY_ID_MRVL88X2011:
 2117			err = link_status_10g_mrvl(np, link_up_p);
 2118			break;
 2119
 2120		default: /* bcom 8704 */
 2121			err = link_status_10g_bcom(np, link_up_p);
 2122			break;
 2123		}
 2124	}
 2125
 2126	spin_unlock_irqrestore(&np->lock, flags);
 2127
 2128	return err;
 2129}
 2130
 2131static int niu_10g_phy_present(struct niu *np)
 2132{
 2133	u64 sig, mask, val;
 2134
 2135	sig = nr64(ESR_INT_SIGNALS);
 2136	switch (np->port) {
 2137	case 0:
 2138		mask = ESR_INT_SIGNALS_P0_BITS;
 2139		val = (ESR_INT_SRDY0_P0 |
 2140		       ESR_INT_DET0_P0 |
 2141		       ESR_INT_XSRDY_P0 |
 2142		       ESR_INT_XDP_P0_CH3 |
 2143		       ESR_INT_XDP_P0_CH2 |
 2144		       ESR_INT_XDP_P0_CH1 |
 2145		       ESR_INT_XDP_P0_CH0);
 2146		break;
 2147
 2148	case 1:
 2149		mask = ESR_INT_SIGNALS_P1_BITS;
 2150		val = (ESR_INT_SRDY0_P1 |
 2151		       ESR_INT_DET0_P1 |
 2152		       ESR_INT_XSRDY_P1 |
 2153		       ESR_INT_XDP_P1_CH3 |
 2154		       ESR_INT_XDP_P1_CH2 |
 2155		       ESR_INT_XDP_P1_CH1 |
 2156		       ESR_INT_XDP_P1_CH0);
 2157		break;
 2158
 2159	default:
 2160		return 0;
 2161	}
 2162
 2163	if ((sig & mask) != val)
 2164		return 0;
 2165	return 1;
 2166}
 2167
 2168static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2169{
 2170	unsigned long flags;
 2171	int err = 0;
 2172	int phy_present;
 2173	int phy_present_prev;
 2174
 2175	spin_lock_irqsave(&np->lock, flags);
 2176
 2177	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2178		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2179			1 : 0;
 2180		phy_present = niu_10g_phy_present(np);
 2181		if (phy_present != phy_present_prev) {
 2182			/* state change */
 2183			if (phy_present) {
 2184				/* A NEM was just plugged in */
 2185				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2186				if (np->phy_ops->xcvr_init)
 2187					err = np->phy_ops->xcvr_init(np);
 2188				if (err) {
 2189					err = mdio_read(np, np->phy_addr,
 2190						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2191					if (err == 0xffff) {
 2192						/* No mdio, back-to-back XAUI */
 2193						goto out;
 2194					}
 2195					/* debounce */
 2196					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2197				}
 2198			} else {
 2199				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2200				*link_up_p = 0;
 2201				netif_warn(np, link, np->dev,
 2202					   "Hotplug PHY Removed\n");
 2203			}
 2204		}
 2205out:
 2206		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2207			err = link_status_10g_bcm8706(np, link_up_p);
 2208			if (err == 0xffff) {
 2209				/* No mdio, back-to-back XAUI: it is C10NEM */
 2210				*link_up_p = 1;
 2211				np->link_config.active_speed = SPEED_10000;
 2212				np->link_config.active_duplex = DUPLEX_FULL;
 2213			}
 2214		}
 2215	}
 2216
 2217	spin_unlock_irqrestore(&np->lock, flags);
 2218
 2219	return 0;
 2220}
 2221
 2222static int niu_link_status(struct niu *np, int *link_up_p)
 2223{
 2224	const struct niu_phy_ops *ops = np->phy_ops;
 2225	int err;
 2226
 2227	err = 0;
 2228	if (ops->link_status)
 2229		err = ops->link_status(np, link_up_p);
 2230
 2231	return err;
 2232}
 2233
 2234static void niu_timer(unsigned long __opaque)
 2235{
 2236	struct niu *np = (struct niu *) __opaque;
 2237	unsigned long off;
 2238	int err, link_up;
 2239
 2240	err = niu_link_status(np, &link_up);
 2241	if (!err)
 2242		niu_link_status_common(np, link_up);
 2243
 2244	if (netif_carrier_ok(np->dev))
 2245		off = 5 * HZ;
 2246	else
 2247		off = 1 * HZ;
 2248	np->timer.expires = jiffies + off;
 2249
 2250	add_timer(&np->timer);
 2251}
 2252
 2253static const struct niu_phy_ops phy_ops_10g_serdes = {
 2254	.serdes_init		= serdes_init_10g_serdes,
 2255	.link_status		= link_status_10g_serdes,
 2256};
 2257
 2258static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2259	.serdes_init		= serdes_init_niu_10g_serdes,
 2260	.link_status		= link_status_10g_serdes,
 2261};
 2262
 2263static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2264	.serdes_init		= serdes_init_niu_1g_serdes,
 2265	.link_status		= link_status_1g_serdes,
 2266};
 2267
 2268static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2269	.xcvr_init		= xcvr_init_1g_rgmii,
 2270	.link_status		= link_status_1g_rgmii,
 2271};
 2272
 2273static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2274	.serdes_init		= serdes_init_niu_10g_fiber,
 2275	.xcvr_init		= xcvr_init_10g,
 2276	.link_status		= link_status_10g,
 2277};
 2278
 2279static const struct niu_phy_ops phy_ops_10g_fiber = {
 2280	.serdes_init		= serdes_init_10g,
 2281	.xcvr_init		= xcvr_init_10g,
 2282	.link_status		= link_status_10g,
 2283};
 2284
 2285static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2286	.serdes_init		= serdes_init_10g,
 2287	.xcvr_init		= xcvr_init_10g_bcm8706,
 2288	.link_status		= link_status_10g_hotplug,
 2289};
 2290
 2291static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2292	.serdes_init		= serdes_init_niu_10g_fiber,
 2293	.xcvr_init		= xcvr_init_10g_bcm8706,
 2294	.link_status		= link_status_10g_hotplug,
 2295};
 2296
 2297static const struct niu_phy_ops phy_ops_10g_copper = {
 2298	.serdes_init		= serdes_init_10g,
 2299	.link_status		= link_status_10g, /* XXX */
 2300};
 2301
 2302static const struct niu_phy_ops phy_ops_1g_fiber = {
 2303	.serdes_init		= serdes_init_1g,
 2304	.xcvr_init		= xcvr_init_1g,
 2305	.link_status		= link_status_1g,
 2306};
 2307
 2308static const struct niu_phy_ops phy_ops_1g_copper = {
 2309	.xcvr_init		= xcvr_init_1g,
 2310	.link_status		= link_status_1g,
 2311};
 2312
 2313struct niu_phy_template {
 2314	const struct niu_phy_ops	*ops;
 2315	u32				phy_addr_base;
 2316};
 2317
 2318static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2319	.ops		= &phy_ops_10g_fiber_niu,
 2320	.phy_addr_base	= 16,
 2321};
 2322
 2323static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2324	.ops		= &phy_ops_10g_serdes_niu,
 2325	.phy_addr_base	= 0,
 2326};
 2327
 2328static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2329	.ops		= &phy_ops_1g_serdes_niu,
 2330	.phy_addr_base	= 0,
 2331};
 2332
 2333static const struct niu_phy_template phy_template_10g_fiber = {
 2334	.ops		= &phy_ops_10g_fiber,
 2335	.phy_addr_base	= 8,
 2336};
 2337
 2338static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2339	.ops		= &phy_ops_10g_fiber_hotplug,
 2340	.phy_addr_base	= 8,
 2341};
 2342
 2343static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2344	.ops		= &phy_ops_niu_10g_hotplug,
 2345	.phy_addr_base	= 8,
 2346};
 2347
 2348static const struct niu_phy_template phy_template_10g_copper = {
 2349	.ops		= &phy_ops_10g_copper,
 2350	.phy_addr_base	= 10,
 2351};
 2352
 2353static const struct niu_phy_template phy_template_1g_fiber = {
 2354	.ops		= &phy_ops_1g_fiber,
 2355	.phy_addr_base	= 0,
 2356};
 2357
 2358static const struct niu_phy_template phy_template_1g_copper = {
 2359	.ops		= &phy_ops_1g_copper,
 2360	.phy_addr_base	= 0,
 2361};
 2362
 2363static const struct niu_phy_template phy_template_1g_rgmii = {
 2364	.ops		= &phy_ops_1g_rgmii,
 2365	.phy_addr_base	= 0,
 2366};
 2367
 2368static const struct niu_phy_template phy_template_10g_serdes = {
 2369	.ops		= &phy_ops_10g_serdes,
 2370	.phy_addr_base	= 0,
 2371};
 2372
 2373static int niu_atca_port_num[4] = {
 2374	0, 0,  11, 10
 2375};
 2376
 2377static int serdes_init_10g_serdes(struct niu *np)
 2378{
 2379	struct niu_link_config *lp = &np->link_config;
 2380	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2381	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2382
 2383	switch (np->port) {
 2384	case 0:
 2385		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2386		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2387		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2388		break;
 2389	case 1:
 2390		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2391		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2392		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2393		break;
 2394
 2395	default:
 2396		return -EINVAL;
 2397	}
 2398	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2399		    ENET_SERDES_CTRL_SDET_1 |
 2400		    ENET_SERDES_CTRL_SDET_2 |
 2401		    ENET_SERDES_CTRL_SDET_3 |
 2402		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2403		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2404		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2405		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2406		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2407		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2408		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2409		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2410	test_cfg_val = 0;
 2411
 2412	if (lp->loopback_mode == LOOPBACK_PHY) {
 2413		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2414				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2415				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2416				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2417				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2418				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2419				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2420				  ENET_SERDES_TEST_MD_3_SHIFT));
 2421	}
 2422
 2423	esr_reset(np);
 2424	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2425	nw64(ctrl_reg, ctrl_val);
 2426	nw64(test_cfg_reg, test_cfg_val);
 2427
 2428	/* Initialize all 4 lanes of the SERDES.  */
 2429	for (i = 0; i < 4; i++) {
 2430		u32 rxtx_ctrl, glue0;
 2431		int err;
 2432
 2433		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2434		if (err)
 2435			return err;
 2436		err = esr_read_glue0(np, i, &glue0);
 2437		if (err)
 2438			return err;
 2439
 2440		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2441		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2442			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2443
 2444		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2445			   ESR_GLUE_CTRL0_THCNT |
 2446			   ESR_GLUE_CTRL0_BLTIME);
 2447		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2448			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2449			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2450			  (BLTIME_300_CYCLES <<
 2451			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2452
 2453		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2454		if (err)
 2455			return err;
 2456		err = esr_write_glue0(np, i, glue0);
 2457		if (err)
 2458			return err;
 2459	}
 2460
 2461
 2462	sig = nr64(ESR_INT_SIGNALS);
 2463	switch (np->port) {
 2464	case 0:
 2465		mask = ESR_INT_SIGNALS_P0_BITS;
 2466		val = (ESR_INT_SRDY0_P0 |
 2467		       ESR_INT_DET0_P0 |
 2468		       ESR_INT_XSRDY_P0 |
 2469		       ESR_INT_XDP_P0_CH3 |
 2470		       ESR_INT_XDP_P0_CH2 |
 2471		       ESR_INT_XDP_P0_CH1 |
 2472		       ESR_INT_XDP_P0_CH0);
 2473		break;
 2474
 2475	case 1:
 2476		mask = ESR_INT_SIGNALS_P1_BITS;
 2477		val = (ESR_INT_SRDY0_P1 |
 2478		       ESR_INT_DET0_P1 |
 2479		       ESR_INT_XSRDY_P1 |
 2480		       ESR_INT_XDP_P1_CH3 |
 2481		       ESR_INT_XDP_P1_CH2 |
 2482		       ESR_INT_XDP_P1_CH1 |
 2483		       ESR_INT_XDP_P1_CH0);
 2484		break;
 2485
 2486	default:
 2487		return -EINVAL;
 2488	}
 2489
 2490	if ((sig & mask) != val) {
 2491		int err;
 2492		err = serdes_init_1g_serdes(np);
 2493		if (!err) {
 2494			np->flags &= ~NIU_FLAGS_10G;
 2495			np->mac_xcvr = MAC_XCVR_PCS;
 2496		}  else {
 2497			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2498				   np->port);
 2499			return -ENODEV;
 2500		}
 2501	}
 2502
 2503	return 0;
 2504}
 2505
 2506static int niu_determine_phy_disposition(struct niu *np)
 2507{
 2508	struct niu_parent *parent = np->parent;
 2509	u8 plat_type = parent->plat_type;
 2510	const struct niu_phy_template *tp;
 2511	u32 phy_addr_off = 0;
 2512
 2513	if (plat_type == PLAT_TYPE_NIU) {
 2514		switch (np->flags &
 2515			(NIU_FLAGS_10G |
 2516			 NIU_FLAGS_FIBER |
 2517			 NIU_FLAGS_XCVR_SERDES)) {
 2518		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2519			/* 10G Serdes */
 2520			tp = &phy_template_niu_10g_serdes;
 2521			break;
 2522		case NIU_FLAGS_XCVR_SERDES:
 2523			/* 1G Serdes */
 2524			tp = &phy_template_niu_1g_serdes;
 2525			break;
 2526		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2527			/* 10G Fiber */
 2528		default:
 2529			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2530				tp = &phy_template_niu_10g_hotplug;
 2531				if (np->port == 0)
 2532					phy_addr_off = 8;
 2533				if (np->port == 1)
 2534					phy_addr_off = 12;
 2535			} else {
 2536				tp = &phy_template_niu_10g_fiber;
 2537				phy_addr_off += np->port;
 2538			}
 2539			break;
 2540		}
 2541	} else {
 2542		switch (np->flags &
 2543			(NIU_FLAGS_10G |
 2544			 NIU_FLAGS_FIBER |
 2545			 NIU_FLAGS_XCVR_SERDES)) {
 2546		case 0:
 2547			/* 1G copper */
 2548			tp = &phy_template_1g_copper;
 2549			if (plat_type == PLAT_TYPE_VF_P0)
 2550				phy_addr_off = 10;
 2551			else if (plat_type == PLAT_TYPE_VF_P1)
 2552				phy_addr_off = 26;
 2553
 2554			phy_addr_off += (np->port ^ 0x3);
 2555			break;
 2556
 2557		case NIU_FLAGS_10G:
 2558			/* 10G copper */
 2559			tp = &phy_template_10g_copper;
 2560			break;
 2561
 2562		case NIU_FLAGS_FIBER:
 2563			/* 1G fiber */
 2564			tp = &phy_template_1g_fiber;
 2565			break;
 2566
 2567		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2568			/* 10G fiber */
 2569			tp = &phy_template_10g_fiber;
 2570			if (plat_type == PLAT_TYPE_VF_P0 ||
 2571			    plat_type == PLAT_TYPE_VF_P1)
 2572				phy_addr_off = 8;
 2573			phy_addr_off += np->port;
 2574			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2575				tp = &phy_template_10g_fiber_hotplug;
 2576				if (np->port == 0)
 2577					phy_addr_off = 8;
 2578				if (np->port == 1)
 2579					phy_addr_off = 12;
 2580			}
 2581			break;
 2582
 2583		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2584		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2585		case NIU_FLAGS_XCVR_SERDES:
 2586			switch(np->port) {
 2587			case 0:
 2588			case 1:
 2589				tp = &phy_template_10g_serdes;
 2590				break;
 2591			case 2:
 2592			case 3:
 2593				tp = &phy_template_1g_rgmii;
 2594				break;
 2595			default:
 2596				return -EINVAL;
 2597				break;
 2598			}
 2599			phy_addr_off = niu_atca_port_num[np->port];
 2600			break;
 2601
 2602		default:
 2603			return -EINVAL;
 2604		}
 2605	}
 2606
 2607	np->phy_ops = tp->ops;
 2608	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2609
 2610	return 0;
 2611}
 2612
 2613static int niu_init_link(struct niu *np)
 2614{
 2615	struct niu_parent *parent = np->parent;
 2616	int err, ignore;
 2617
 2618	if (parent->plat_type == PLAT_TYPE_NIU) {
 2619		err = niu_xcvr_init(np);
 2620		if (err)
 2621			return err;
 2622		msleep(200);
 2623	}
 2624	err = niu_serdes_init(np);
 2625	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2626		return err;
 2627	msleep(200);
 2628	err = niu_xcvr_init(np);
 2629	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2630		niu_link_status(np, &ignore);
 2631	return 0;
 2632}
 2633
 2634static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2635{
 2636	u16 reg0 = addr[4] << 8 | addr[5];
 2637	u16 reg1 = addr[2] << 8 | addr[3];
 2638	u16 reg2 = addr[0] << 8 | addr[1];
 2639
 2640	if (np->flags & NIU_FLAGS_XMAC) {
 2641		nw64_mac(XMAC_ADDR0, reg0);
 2642		nw64_mac(XMAC_ADDR1, reg1);
 2643		nw64_mac(XMAC_ADDR2, reg2);
 2644	} else {
 2645		nw64_mac(BMAC_ADDR0, reg0);
 2646		nw64_mac(BMAC_ADDR1, reg1);
 2647		nw64_mac(BMAC_ADDR2, reg2);
 2648	}
 2649}
 2650
 2651static int niu_num_alt_addr(struct niu *np)
 2652{
 2653	if (np->flags & NIU_FLAGS_XMAC)
 2654		return XMAC_NUM_ALT_ADDR;
 2655	else
 2656		return BMAC_NUM_ALT_ADDR;
 2657}
 2658
 2659static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2660{
 2661	u16 reg0 = addr[4] << 8 | addr[5];
 2662	u16 reg1 = addr[2] << 8 | addr[3];
 2663	u16 reg2 = addr[0] << 8 | addr[1];
 2664
 2665	if (index >= niu_num_alt_addr(np))
 2666		return -EINVAL;
 2667
 2668	if (np->flags & NIU_FLAGS_XMAC) {
 2669		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2670		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2671		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2672	} else {
 2673		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2674		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2675		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2676	}
 2677
 2678	return 0;
 2679}
 2680
 2681static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2682{
 2683	unsigned long reg;
 2684	u64 val, mask;
 2685
 2686	if (index >= niu_num_alt_addr(np))
 2687		return -EINVAL;
 2688
 2689	if (np->flags & NIU_FLAGS_XMAC) {
 2690		reg = XMAC_ADDR_CMPEN;
 2691		mask = 1 << index;
 2692	} else {
 2693		reg = BMAC_ADDR_CMPEN;
 2694		mask = 1 << (index + 1);
 2695	}
 2696
 2697	val = nr64_mac(reg);
 2698	if (on)
 2699		val |= mask;
 2700	else
 2701		val &= ~mask;
 2702	nw64_mac(reg, val);
 2703
 2704	return 0;
 2705}
 2706
 2707static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2708				   int num, int mac_pref)
 2709{
 2710	u64 val = nr64_mac(reg);
 2711	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2712	val |= num;
 2713	if (mac_pref)
 2714		val |= HOST_INFO_MPR;
 2715	nw64_mac(reg, val);
 2716}
 2717
 2718static int __set_rdc_table_num(struct niu *np,
 2719			       int xmac_index, int bmac_index,
 2720			       int rdc_table_num, int mac_pref)
 2721{
 2722	unsigned long reg;
 2723
 2724	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2725		return -EINVAL;
 2726	if (np->flags & NIU_FLAGS_XMAC)
 2727		reg = XMAC_HOST_INFO(xmac_index);
 2728	else
 2729		reg = BMAC_HOST_INFO(bmac_index);
 2730	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2731	return 0;
 2732}
 2733
 2734static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2735					 int mac_pref)
 2736{
 2737	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2738}
 2739
 2740static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2741					   int mac_pref)
 2742{
 2743	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2744}
 2745
 2746static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2747				     int table_num, int mac_pref)
 2748{
 2749	if (idx >= niu_num_alt_addr(np))
 2750		return -EINVAL;
 2751	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2752}
 2753
 2754static u64 vlan_entry_set_parity(u64 reg_val)
 2755{
 2756	u64 port01_mask;
 2757	u64 port23_mask;
 2758
 2759	port01_mask = 0x00ff;
 2760	port23_mask = 0xff00;
 2761
 2762	if (hweight64(reg_val & port01_mask) & 1)
 2763		reg_val |= ENET_VLAN_TBL_PARITY0;
 2764	else
 2765		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2766
 2767	if (hweight64(reg_val & port23_mask) & 1)
 2768		reg_val |= ENET_VLAN_TBL_PARITY1;
 2769	else
 2770		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2771
 2772	return reg_val;
 2773}
 2774
 2775static void vlan_tbl_write(struct niu *np, unsigned long index,
 2776			   int port, int vpr, int rdc_table)
 2777{
 2778	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2779
 2780	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2781		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2782		     ENET_VLAN_TBL_SHIFT(port));
 2783	if (vpr)
 2784		reg_val |= (ENET_VLAN_TBL_VPR <<
 2785			    ENET_VLAN_TBL_SHIFT(port));
 2786	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2787
 2788	reg_val = vlan_entry_set_parity(reg_val);
 2789
 2790	nw64(ENET_VLAN_TBL(index), reg_val);
 2791}
 2792
 2793static void vlan_tbl_clear(struct niu *np)
 2794{
 2795	int i;
 2796
 2797	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2798		nw64(ENET_VLAN_TBL(i), 0);
 2799}
 2800
 2801static int tcam_wait_bit(struct niu *np, u64 bit)
 2802{
 2803	int limit = 1000;
 2804
 2805	while (--limit > 0) {
 2806		if (nr64(TCAM_CTL) & bit)
 2807			break;
 2808		udelay(1);
 2809	}
 2810	if (limit <= 0)
 2811		return -ENODEV;
 2812
 2813	return 0;
 2814}
 2815
 2816static int tcam_flush(struct niu *np, int index)
 2817{
 2818	nw64(TCAM_KEY_0, 0x00);
 2819	nw64(TCAM_KEY_MASK_0, 0xff);
 2820	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2821
 2822	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2823}
 2824
 2825#if 0
 2826static int tcam_read(struct niu *np, int index,
 2827		     u64 *key, u64 *mask)
 2828{
 2829	int err;
 2830
 2831	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2832	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2833	if (!err) {
 2834		key[0] = nr64(TCAM_KEY_0);
 2835		key[1] = nr64(TCAM_KEY_1);
 2836		key[2] = nr64(TCAM_KEY_2);
 2837		key[3] = nr64(TCAM_KEY_3);
 2838		mask[0] = nr64(TCAM_KEY_MASK_0);
 2839		mask[1] = nr64(TCAM_KEY_MASK_1);
 2840		mask[2] = nr64(TCAM_KEY_MASK_2);
 2841		mask[3] = nr64(TCAM_KEY_MASK_3);
 2842	}
 2843	return err;
 2844}
 2845#endif
 2846
 2847static int tcam_write(struct niu *np, int index,
 2848		      u64 *key, u64 *mask)
 2849{
 2850	nw64(TCAM_KEY_0, key[0]);
 2851	nw64(TCAM_KEY_1, key[1]);
 2852	nw64(TCAM_KEY_2, key[2]);
 2853	nw64(TCAM_KEY_3, key[3]);
 2854	nw64(TCAM_KEY_MASK_0, mask[0]);
 2855	nw64(TCAM_KEY_MASK_1, mask[1]);
 2856	nw64(TCAM_KEY_MASK_2, mask[2]);
 2857	nw64(TCAM_KEY_MASK_3, mask[3]);
 2858	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2859
 2860	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2861}
 2862
 2863#if 0
 2864static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2865{
 2866	int err;
 2867
 2868	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2869	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2870	if (!err)
 2871		*data = nr64(TCAM_KEY_1);
 2872
 2873	return err;
 2874}
 2875#endif
 2876
 2877static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2878{
 2879	nw64(TCAM_KEY_1, assoc_data);
 2880	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2881
 2882	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2883}
 2884
 2885static void tcam_enable(struct niu *np, int on)
 2886{
 2887	u64 val = nr64(FFLP_CFG_1);
 2888
 2889	if (on)
 2890		val &= ~FFLP_CFG_1_TCAM_DIS;
 2891	else
 2892		val |= FFLP_CFG_1_TCAM_DIS;
 2893	nw64(FFLP_CFG_1, val);
 2894}
 2895
 2896static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2897{
 2898	u64 val = nr64(FFLP_CFG_1);
 2899
 2900	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2901		 FFLP_CFG_1_CAMLAT |
 2902		 FFLP_CFG_1_CAMRATIO);
 2903	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2904	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2905	nw64(FFLP_CFG_1, val);
 2906
 2907	val = nr64(FFLP_CFG_1);
 2908	val |= FFLP_CFG_1_FFLPINITDONE;
 2909	nw64(FFLP_CFG_1, val);
 2910}
 2911
 2912static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2913				      int on)
 2914{
 2915	unsigned long reg;
 2916	u64 val;
 2917
 2918	if (class < CLASS_CODE_ETHERTYPE1 ||
 2919	    class > CLASS_CODE_ETHERTYPE2)
 2920		return -EINVAL;
 2921
 2922	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2923	val = nr64(reg);
 2924	if (on)
 2925		val |= L2_CLS_VLD;
 2926	else
 2927		val &= ~L2_CLS_VLD;
 2928	nw64(reg, val);
 2929
 2930	return 0;
 2931}
 2932
 2933#if 0
 2934static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2935				   u64 ether_type)
 2936{
 2937	unsigned long reg;
 2938	u64 val;
 2939
 2940	if (class < CLASS_CODE_ETHERTYPE1 ||
 2941	    class > CLASS_CODE_ETHERTYPE2 ||
 2942	    (ether_type & ~(u64)0xffff) != 0)
 2943		return -EINVAL;
 2944
 2945	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2946	val = nr64(reg);
 2947	val &= ~L2_CLS_ETYPE;
 2948	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2949	nw64(reg, val);
 2950
 2951	return 0;
 2952}
 2953#endif
 2954
 2955static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2956				     int on)
 2957{
 2958	unsigned long reg;
 2959	u64 val;
 2960
 2961	if (class < CLASS_CODE_USER_PROG1 ||
 2962	    class > CLASS_CODE_USER_PROG4)
 2963		return -EINVAL;
 2964
 2965	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2966	val = nr64(reg);
 2967	if (on)
 2968		val |= L3_CLS_VALID;
 2969	else
 2970		val &= ~L3_CLS_VALID;
 2971	nw64(reg, val);
 2972
 2973	return 0;
 2974}
 2975
 2976static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2977				  int ipv6, u64 protocol_id,
 2978				  u64 tos_mask, u64 tos_val)
 2979{
 2980	unsigned long reg;
 2981	u64 val;
 2982
 2983	if (class < CLASS_CODE_USER_PROG1 ||
 2984	    class > CLASS_CODE_USER_PROG4 ||
 2985	    (protocol_id & ~(u64)0xff) != 0 ||
 2986	    (tos_mask & ~(u64)0xff) != 0 ||
 2987	    (tos_val & ~(u64)0xff) != 0)
 2988		return -EINVAL;
 2989
 2990	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2991	val = nr64(reg);
 2992	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2993		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2994	if (ipv6)
 2995		val |= L3_CLS_IPVER;
 2996	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2997	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2998	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2999	nw64(reg, val);
 3000
 3001	return 0;
 3002}
 3003
 3004static int tcam_early_init(struct niu *np)
 3005{
 3006	unsigned long i;
 3007	int err;
 3008
 3009	tcam_enable(np, 0);
 3010	tcam_set_lat_and_ratio(np,
 3011			       DEFAULT_TCAM_LATENCY,
 3012			       DEFAULT_TCAM_ACCESS_RATIO);
 3013	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 3014		err = tcam_user_eth_class_enable(np, i, 0);
 3015		if (err)
 3016			return err;
 3017	}
 3018	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 3019		err = tcam_user_ip_class_enable(np, i, 0);
 3020		if (err)
 3021			return err;
 3022	}
 3023
 3024	return 0;
 3025}
 3026
 3027static int tcam_flush_all(struct niu *np)
 3028{
 3029	unsigned long i;
 3030
 3031	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3032		int err = tcam_flush(np, i);
 3033		if (err)
 3034			return err;
 3035	}
 3036	return 0;
 3037}
 3038
 3039static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3040{
 3041	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3042}
 3043
 3044#if 0
 3045static int hash_read(struct niu *np, unsigned long partition,
 3046		     unsigned long index, unsigned long num_entries,
 3047		     u64 *data)
 3048{
 3049	u64 val = hash_addr_regval(index, num_entries);
 3050	unsigned long i;
 3051
 3052	if (partition >= FCRAM_NUM_PARTITIONS ||
 3053	    index + num_entries > FCRAM_SIZE)
 3054		return -EINVAL;
 3055
 3056	nw64(HASH_TBL_ADDR(partition), val);
 3057	for (i = 0; i < num_entries; i++)
 3058		data[i] = nr64(HASH_TBL_DATA(partition));
 3059
 3060	return 0;
 3061}
 3062#endif
 3063
 3064static int hash_write(struct niu *np, unsigned long partition,
 3065		      unsigned long index, unsigned long num_entries,
 3066		      u64 *data)
 3067{
 3068	u64 val = hash_addr_regval(index, num_entries);
 3069	unsigned long i;
 3070
 3071	if (partition >= FCRAM_NUM_PARTITIONS ||
 3072	    index + (num_entries * 8) > FCRAM_SIZE)
 3073		return -EINVAL;
 3074
 3075	nw64(HASH_TBL_ADDR(partition), val);
 3076	for (i = 0; i < num_entries; i++)
 3077		nw64(HASH_TBL_DATA(partition), data[i]);
 3078
 3079	return 0;
 3080}
 3081
 3082static void fflp_reset(struct niu *np)
 3083{
 3084	u64 val;
 3085
 3086	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3087	udelay(10);
 3088	nw64(FFLP_CFG_1, 0);
 3089
 3090	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3091	nw64(FFLP_CFG_1, val);
 3092}
 3093
 3094static void fflp_set_timings(struct niu *np)
 3095{
 3096	u64 val = nr64(FFLP_CFG_1);
 3097
 3098	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3099	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3100	nw64(FFLP_CFG_1, val);
 3101
 3102	val = nr64(FFLP_CFG_1);
 3103	val |= FFLP_CFG_1_FFLPINITDONE;
 3104	nw64(FFLP_CFG_1, val);
 3105
 3106	val = nr64(FCRAM_REF_TMR);
 3107	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3108	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3109	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3110	nw64(FCRAM_REF_TMR, val);
 3111}
 3112
 3113static int fflp_set_partition(struct niu *np, u64 partition,
 3114			      u64 mask, u64 base, int enable)
 3115{
 3116	unsigned long reg;
 3117	u64 val;
 3118
 3119	if (partition >= FCRAM_NUM_PARTITIONS ||
 3120	    (mask & ~(u64)0x1f) != 0 ||
 3121	    (base & ~(u64)0x1f) != 0)
 3122		return -EINVAL;
 3123
 3124	reg = FLW_PRT_SEL(partition);
 3125
 3126	val = nr64(reg);
 3127	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3128	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3129	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3130	if (enable)
 3131		val |= FLW_PRT_SEL_EXT;
 3132	nw64(reg, val);
 3133
 3134	return 0;
 3135}
 3136
 3137static int fflp_disable_all_partitions(struct niu *np)
 3138{
 3139	unsigned long i;
 3140
 3141	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3142		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3143		if (err)
 3144			return err;
 3145	}
 3146	return 0;
 3147}
 3148
 3149static void fflp_llcsnap_enable(struct niu *np, int on)
 3150{
 3151	u64 val = nr64(FFLP_CFG_1);
 3152
 3153	if (on)
 3154		val |= FFLP_CFG_1_LLCSNAP;
 3155	else
 3156		val &= ~FFLP_CFG_1_LLCSNAP;
 3157	nw64(FFLP_CFG_1, val);
 3158}
 3159
 3160static void fflp_errors_enable(struct niu *np, int on)
 3161{
 3162	u64 val = nr64(FFLP_CFG_1);
 3163
 3164	if (on)
 3165		val &= ~FFLP_CFG_1_ERRORDIS;
 3166	else
 3167		val |= FFLP_CFG_1_ERRORDIS;
 3168	nw64(FFLP_CFG_1, val);
 3169}
 3170
 3171static int fflp_hash_clear(struct niu *np)
 3172{
 3173	struct fcram_hash_ipv4 ent;
 3174	unsigned long i;
 3175
 3176	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3177	memset(&ent, 0, sizeof(ent));
 3178	ent.header = HASH_HEADER_EXT;
 3179
 3180	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3181		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3182		if (err)
 3183			return err;
 3184	}
 3185	return 0;
 3186}
 3187
 3188static int fflp_early_init(struct niu *np)
 3189{
 3190	struct niu_parent *parent;
 3191	unsigned long flags;
 3192	int err;
 3193
 3194	niu_lock_parent(np, flags);
 3195
 3196	parent = np->parent;
 3197	err = 0;
 3198	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3199		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3200			fflp_reset(np);
 3201			fflp_set_timings(np);
 3202			err = fflp_disable_all_partitions(np);
 3203			if (err) {
 3204				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3205					     "fflp_disable_all_partitions failed, err=%d\n",
 3206					     err);
 3207				goto out;
 3208			}
 3209		}
 3210
 3211		err = tcam_early_init(np);
 3212		if (err) {
 3213			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3214				     "tcam_early_init failed, err=%d\n", err);
 3215			goto out;
 3216		}
 3217		fflp_llcsnap_enable(np, 1);
 3218		fflp_errors_enable(np, 0);
 3219		nw64(H1POLY, 0);
 3220		nw64(H2POLY, 0);
 3221
 3222		err = tcam_flush_all(np);
 3223		if (err) {
 3224			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3225				     "tcam_flush_all failed, err=%d\n", err);
 3226			goto out;
 3227		}
 3228		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3229			err = fflp_hash_clear(np);
 3230			if (err) {
 3231				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3232					     "fflp_hash_clear failed, err=%d\n",
 3233					     err);
 3234				goto out;
 3235			}
 3236		}
 3237
 3238		vlan_tbl_clear(np);
 3239
 3240		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3241	}
 3242out:
 3243	niu_unlock_parent(np, flags);
 3244	return err;
 3245}
 3246
 3247static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3248{
 3249	if (class_code < CLASS_CODE_USER_PROG1 ||
 3250	    class_code > CLASS_CODE_SCTP_IPV6)
 3251		return -EINVAL;
 3252
 3253	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3254	return 0;
 3255}
 3256
 3257static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3258{
 3259	if (class_code < CLASS_CODE_USER_PROG1 ||
 3260	    class_code > CLASS_CODE_SCTP_IPV6)
 3261		return -EINVAL;
 3262
 3263	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3264	return 0;
 3265}
 3266
 3267/* Entries for the ports are interleaved in the TCAM */
 3268static u16 tcam_get_index(struct niu *np, u16 idx)
 3269{
 3270	/* One entry reserved for IP fragment rule */
 3271	if (idx >= (np->clas.tcam_sz - 1))
 3272		idx = 0;
 3273	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3274}
 3275
 3276static u16 tcam_get_size(struct niu *np)
 3277{
 3278	/* One entry reserved for IP fragment rule */
 3279	return np->clas.tcam_sz - 1;
 3280}
 3281
 3282static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3283{
 3284	/* One entry reserved for IP fragment rule */
 3285	return np->clas.tcam_valid_entries - 1;
 3286}
 3287
 3288static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3289			      u32 offset, u32 size)
 3290{
 3291	int i = skb_shinfo(skb)->nr_frags;
 3292	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 3293
 3294	frag->page = page;
 3295	frag->page_offset = offset;
 3296	frag->size = size;
 3297
 3298	skb->len += size;
 3299	skb->data_len += size;
 3300	skb->truesize += size;
 3301
 3302	skb_shinfo(skb)->nr_frags = i + 1;
 3303}
 3304
 3305static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3306{
 3307	a >>= PAGE_SHIFT;
 3308	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3309
 3310	return a & (MAX_RBR_RING_SIZE - 1);
 3311}
 3312
 3313static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3314				    struct page ***link)
 3315{
 3316	unsigned int h = niu_hash_rxaddr(rp, addr);
 3317	struct page *p, **pp;
 3318
 3319	addr &= PAGE_MASK;
 3320	pp = &rp->rxhash[h];
 3321	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3322		if (p->index == addr) {
 3323			*link = pp;
 3324			goto found;
 3325		}
 3326	}
 3327	BUG();
 3328
 3329found:
 3330	return p;
 3331}
 3332
 3333static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3334{
 3335	unsigned int h = niu_hash_rxaddr(rp, base);
 3336
 3337	page->index = base;
 3338	page->mapping = (struct address_space *) rp->rxhash[h];
 3339	rp->rxhash[h] = page;
 3340}
 3341
 3342static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3343			    gfp_t mask, int start_index)
 3344{
 3345	struct page *page;
 3346	u64 addr;
 3347	int i;
 3348
 3349	page = alloc_page(mask);
 3350	if (!page)
 3351		return -ENOMEM;
 3352
 3353	addr = np->ops->map_page(np->device, page, 0,
 3354				 PAGE_SIZE, DMA_FROM_DEVICE);
 3355
 3356	niu_hash_page(rp, page, addr);
 3357	if (rp->rbr_blocks_per_page > 1)
 3358		atomic_add(rp->rbr_blocks_per_page - 1,
 3359			   &compound_head(page)->_count);
 3360
 3361	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3362		__le32 *rbr = &rp->rbr[start_index + i];
 3363
 3364		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3365		addr += rp->rbr_block_size;
 3366	}
 3367
 3368	return 0;
 3369}
 3370
 3371static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3372{
 3373	int index = rp->rbr_index;
 3374
 3375	rp->rbr_pending++;
 3376	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3377		int err = niu_rbr_add_page(np, rp, mask, index);
 3378
 3379		if (unlikely(err)) {
 3380			rp->rbr_pending--;
 3381			return;
 3382		}
 3383
 3384		rp->rbr_index += rp->rbr_blocks_per_page;
 3385		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3386		if (rp->rbr_index == rp->rbr_table_size)
 3387			rp->rbr_index = 0;
 3388
 3389		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3390			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3391			rp->rbr_pending = 0;
 3392		}
 3393	}
 3394}
 3395
 3396static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3397{
 3398	unsigned int index = rp->rcr_index;
 3399	int num_rcr = 0;
 3400
 3401	rp->rx_dropped++;
 3402	while (1) {
 3403		struct page *page, **link;
 3404		u64 addr, val;
 3405		u32 rcr_size;
 3406
 3407		num_rcr++;
 3408
 3409		val = le64_to_cpup(&rp->rcr[index]);
 3410		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3411			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3412		page = niu_find_rxpage(rp, addr, &link);
 3413
 3414		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3415					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3416		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3417			*link = (struct page *) page->mapping;
 3418			np->ops->unmap_page(np->device, page->index,
 3419					    PAGE_SIZE, DMA_FROM_DEVICE);
 3420			page->index = 0;
 3421			page->mapping = NULL;
 3422			__free_page(page);
 3423			rp->rbr_refill_pending++;
 3424		}
 3425
 3426		index = NEXT_RCR(rp, index);
 3427		if (!(val & RCR_ENTRY_MULTI))
 3428			break;
 3429
 3430	}
 3431	rp->rcr_index = index;
 3432
 3433	return num_rcr;
 3434}
 3435
 3436static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3437			      struct rx_ring_info *rp)
 3438{
 3439	unsigned int index = rp->rcr_index;
 3440	struct rx_pkt_hdr1 *rh;
 3441	struct sk_buff *skb;
 3442	int len, num_rcr;
 3443
 3444	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3445	if (unlikely(!skb))
 3446		return niu_rx_pkt_ignore(np, rp);
 3447
 3448	num_rcr = 0;
 3449	while (1) {
 3450		struct page *page, **link;
 3451		u32 rcr_size, append_size;
 3452		u64 addr, val, off;
 3453
 3454		num_rcr++;
 3455
 3456		val = le64_to_cpup(&rp->rcr[index]);
 3457
 3458		len = (val & RCR_ENTRY_L2_LEN) >>
 3459			RCR_ENTRY_L2_LEN_SHIFT;
 3460		len -= ETH_FCS_LEN;
 3461
 3462		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3463			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3464		page = niu_find_rxpage(rp, addr, &link);
 3465
 3466		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3467					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3468
 3469		off = addr & ~PAGE_MASK;
 3470		append_size = rcr_size;
 3471		if (num_rcr == 1) {
 3472			int ptype;
 3473
 3474			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3475			if ((ptype == RCR_PKT_TYPE_TCP ||
 3476			     ptype == RCR_PKT_TYPE_UDP) &&
 3477			    !(val & (RCR_ENTRY_NOPORT |
 3478				     RCR_ENTRY_ERROR)))
 3479				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3480			else
 3481				skb_checksum_none_assert(skb);
 3482		} else if (!(val & RCR_ENTRY_MULTI))
 3483			append_size = len - skb->len;
 3484
 3485		niu_rx_skb_append(skb, page, off, append_size);
 3486		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3487			*link = (struct page *) page->mapping;
 3488			np->ops->unmap_page(np->device, page->index,
 3489					    PAGE_SIZE, DMA_FROM_DEVICE);
 3490			page->index = 0;
 3491			page->mapping = NULL;
 3492			rp->rbr_refill_pending++;
 3493		} else
 3494			get_page(page);
 3495
 3496		index = NEXT_RCR(rp, index);
 3497		if (!(val & RCR_ENTRY_MULTI))
 3498			break;
 3499
 3500	}
 3501	rp->rcr_index = index;
 3502
 3503	len += sizeof(*rh);
 3504	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3505	__pskb_pull_tail(skb, len);
 3506
 3507	rh = (struct rx_pkt_hdr1 *) skb->data;
 3508	if (np->dev->features & NETIF_F_RXHASH)
 3509		skb->rxhash = ((u32)rh->hashval2_0 << 24 |
 3510			       (u32)rh->hashval2_1 << 16 |
 3511			       (u32)rh->hashval1_1 << 8 |
 3512			       (u32)rh->hashval1_2 << 0);
 3513	skb_pull(skb, sizeof(*rh));
 3514
 3515	rp->rx_packets++;
 3516	rp->rx_bytes += skb->len;
 3517
 3518	skb->protocol = eth_type_trans(skb, np->dev);
 3519	skb_record_rx_queue(skb, rp->rx_channel);
 3520	napi_gro_receive(napi, skb);
 3521
 3522	return num_rcr;
 3523}
 3524
 3525static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3526{
 3527	int blocks_per_page = rp->rbr_blocks_per_page;
 3528	int err, index = rp->rbr_index;
 3529
 3530	err = 0;
 3531	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3532		err = niu_rbr_add_page(np, rp, mask, index);
 3533		if (err)
 3534			break;
 3535
 3536		index += blocks_per_page;
 3537	}
 3538
 3539	rp->rbr_index = index;
 3540	return err;
 3541}
 3542
 3543static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3544{
 3545	int i;
 3546
 3547	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3548		struct page *page;
 3549
 3550		page = rp->rxhash[i];
 3551		while (page) {
 3552			struct page *next = (struct page *) page->mapping;
 3553			u64 base = page->index;
 3554
 3555			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3556					    DMA_FROM_DEVICE);
 3557			page->index = 0;
 3558			page->mapping = NULL;
 3559
 3560			__free_page(page);
 3561
 3562			page = next;
 3563		}
 3564	}
 3565
 3566	for (i = 0; i < rp->rbr_table_size; i++)
 3567		rp->rbr[i] = cpu_to_le32(0);
 3568	rp->rbr_index = 0;
 3569}
 3570
 3571static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3572{
 3573	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3574	struct sk_buff *skb = tb->skb;
 3575	struct tx_pkt_hdr *tp;
 3576	u64 tx_flags;
 3577	int i, len;
 3578
 3579	tp = (struct tx_pkt_hdr *) skb->data;
 3580	tx_flags = le64_to_cpup(&tp->flags);
 3581
 3582	rp->tx_packets++;
 3583	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3584			 ((tx_flags & TXHDR_PAD) / 2));
 3585
 3586	len = skb_headlen(skb);
 3587	np->ops->unmap_single(np->device, tb->mapping,
 3588			      len, DMA_TO_DEVICE);
 3589
 3590	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3591		rp->mark_pending--;
 3592
 3593	tb->skb = NULL;
 3594	do {
 3595		idx = NEXT_TX(rp, idx);
 3596		len -= MAX_TX_DESC_LEN;
 3597	} while (len > 0);
 3598
 3599	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3600		tb = &rp->tx_buffs[idx];
 3601		BUG_ON(tb->skb != NULL);
 3602		np->ops->unmap_page(np->device, tb->mapping,
 3603				    skb_shinfo(skb)->frags[i].size,
 3604				    DMA_TO_DEVICE);
 3605		idx = NEXT_TX(rp, idx);
 3606	}
 3607
 3608	dev_kfree_skb(skb);
 3609
 3610	return idx;
 3611}
 3612
 3613#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3614
 3615static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3616{
 3617	struct netdev_queue *txq;
 3618	u16 pkt_cnt, tmp;
 3619	int cons, index;
 3620	u64 cs;
 3621
 3622	index = (rp - np->tx_rings);
 3623	txq = netdev_get_tx_queue(np->dev, index);
 3624
 3625	cs = rp->tx_cs;
 3626	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3627		goto out;
 3628
 3629	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3630	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3631		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3632
 3633	rp->last_pkt_cnt = tmp;
 3634
 3635	cons = rp->cons;
 3636
 3637	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3638		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3639
 3640	while (pkt_cnt--)
 3641		cons = release_tx_packet(np, rp, cons);
 3642
 3643	rp->cons = cons;
 3644	smp_mb();
 3645
 3646out:
 3647	if (unlikely(netif_tx_queue_stopped(txq) &&
 3648		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3649		__netif_tx_lock(txq, smp_processor_id());
 3650		if (netif_tx_queue_stopped(txq) &&
 3651		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3652			netif_tx_wake_queue(txq);
 3653		__netif_tx_unlock(txq);
 3654	}
 3655}
 3656
 3657static inline void niu_sync_rx_discard_stats(struct niu *np,
 3658					     struct rx_ring_info *rp,
 3659					     const int limit)
 3660{
 3661	/* This elaborate scheme is needed for reading the RX discard
 3662	 * counters, as they are only 16-bit and can overflow quickly,
 3663	 * and because the overflow indication bit is not usable as
 3664	 * the counter value does not wrap, but remains at max value
 3665	 * 0xFFFF.
 3666	 *
 3667	 * In theory and in practice counters can be lost in between
 3668	 * reading nr64() and clearing the counter nw64().  For this
 3669	 * reason, the number of counter clearings nw64() is
 3670	 * limited/reduced though the limit parameter.
 3671	 */
 3672	int rx_channel = rp->rx_channel;
 3673	u32 misc, wred;
 3674
 3675	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3676	 * following discard events: IPP (Input Port Process),
 3677	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3678	 * Block Ring) prefetch buffer is empty.
 3679	 */
 3680	misc = nr64(RXMISC(rx_channel));
 3681	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3682		nw64(RXMISC(rx_channel), 0);
 3683		rp->rx_errors += misc & RXMISC_COUNT;
 3684
 3685		if (unlikely(misc & RXMISC_OFLOW))
 3686			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3687				rx_channel);
 3688
 3689		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3690			     "rx-%d: MISC drop=%u over=%u\n",
 3691			     rx_channel, misc, misc-limit);
 3692	}
 3693
 3694	/* WRED (Weighted Random Early Discard) by hardware */
 3695	wred = nr64(RED_DIS_CNT(rx_channel));
 3696	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3697		nw64(RED_DIS_CNT(rx_channel), 0);
 3698		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3699
 3700		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3701			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3702
 3703		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3704			     "rx-%d: WRED drop=%u over=%u\n",
 3705			     rx_channel, wred, wred-limit);
 3706	}
 3707}
 3708
 3709static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3710		       struct rx_ring_info *rp, int budget)
 3711{
 3712	int qlen, rcr_done = 0, work_done = 0;
 3713	struct rxdma_mailbox *mbox = rp->mbox;
 3714	u64 stat;
 3715
 3716#if 1
 3717	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3718	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3719#else
 3720	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3721	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3722#endif
 3723	mbox->rx_dma_ctl_stat = 0;
 3724	mbox->rcrstat_a = 0;
 3725
 3726	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3727		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3728		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3729
 3730	rcr_done = work_done = 0;
 3731	qlen = min(qlen, budget);
 3732	while (work_done < qlen) {
 3733		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3734		work_done++;
 3735	}
 3736
 3737	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3738		unsigned int i;
 3739
 3740		for (i = 0; i < rp->rbr_refill_pending; i++)
 3741			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3742		rp->rbr_refill_pending = 0;
 3743	}
 3744
 3745	stat = (RX_DMA_CTL_STAT_MEX |
 3746		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3747		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3748
 3749	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3750
 3751	/* Only sync discards stats when qlen indicate potential for drops */
 3752	if (qlen > 10)
 3753		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3754
 3755	return work_done;
 3756}
 3757
 3758static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3759{
 3760	u64 v0 = lp->v0;
 3761	u32 tx_vec = (v0 >> 32);
 3762	u32 rx_vec = (v0 & 0xffffffff);
 3763	int i, work_done = 0;
 3764
 3765	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3766		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3767
 3768	for (i = 0; i < np->num_tx_rings; i++) {
 3769		struct tx_ring_info *rp = &np->tx_rings[i];
 3770		if (tx_vec & (1 << rp->tx_channel))
 3771			niu_tx_work(np, rp);
 3772		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3773	}
 3774
 3775	for (i = 0; i < np->num_rx_rings; i++) {
 3776		struct rx_ring_info *rp = &np->rx_rings[i];
 3777
 3778		if (rx_vec & (1 << rp->rx_channel)) {
 3779			int this_work_done;
 3780
 3781			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3782						     budget);
 3783
 3784			budget -= this_work_done;
 3785			work_done += this_work_done;
 3786		}
 3787		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3788	}
 3789
 3790	return work_done;
 3791}
 3792
 3793static int niu_poll(struct napi_struct *napi, int budget)
 3794{
 3795	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3796	struct niu *np = lp->np;
 3797	int work_done;
 3798
 3799	work_done = niu_poll_core(np, lp, budget);
 3800
 3801	if (work_done < budget) {
 3802		napi_complete(napi);
 3803		niu_ldg_rearm(np, lp, 1);
 3804	}
 3805	return work_done;
 3806}
 3807
 3808static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3809				  u64 stat)
 3810{
 3811	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3812
 3813	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3814		pr_cont("RBR_TMOUT ");
 3815	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3816		pr_cont("RSP_CNT ");
 3817	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3818		pr_cont("BYTE_EN_BUS ");
 3819	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3820		pr_cont("RSP_DAT ");
 3821	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3822		pr_cont("RCR_ACK ");
 3823	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3824		pr_cont("RCR_SHA_PAR ");
 3825	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3826		pr_cont("RBR_PRE_PAR ");
 3827	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3828		pr_cont("CONFIG ");
 3829	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3830		pr_cont("RCRINCON ");
 3831	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3832		pr_cont("RCRFULL ");
 3833	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3834		pr_cont("RBRFULL ");
 3835	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3836		pr_cont("RBRLOGPAGE ");
 3837	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3838		pr_cont("CFIGLOGPAGE ");
 3839	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3840		pr_cont("DC_FIDO ");
 3841
 3842	pr_cont(")\n");
 3843}
 3844
 3845static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3846{
 3847	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3848	int err = 0;
 3849
 3850
 3851	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3852		    RX_DMA_CTL_STAT_PORT_FATAL))
 3853		err = -EINVAL;
 3854
 3855	if (err) {
 3856		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3857			   rp->rx_channel,
 3858			   (unsigned long long) stat);
 3859
 3860		niu_log_rxchan_errors(np, rp, stat);
 3861	}
 3862
 3863	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3864	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3865
 3866	return err;
 3867}
 3868
 3869static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3870				  u64 cs)
 3871{
 3872	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3873
 3874	if (cs & TX_CS_MBOX_ERR)
 3875		pr_cont("MBOX ");
 3876	if (cs & TX_CS_PKT_SIZE_ERR)
 3877		pr_cont("PKT_SIZE ");
 3878	if (cs & TX_CS_TX_RING_OFLOW)
 3879		pr_cont("TX_RING_OFLOW ");
 3880	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3881		pr_cont("PREF_BUF_PAR ");
 3882	if (cs & TX_CS_NACK_PREF)
 3883		pr_cont("NACK_PREF ");
 3884	if (cs & TX_CS_NACK_PKT_RD)
 3885		pr_cont("NACK_PKT_RD ");
 3886	if (cs & TX_CS_CONF_PART_ERR)
 3887		pr_cont("CONF_PART ");
 3888	if (cs & TX_CS_PKT_PRT_ERR)
 3889		pr_cont("PKT_PTR ");
 3890
 3891	pr_cont(")\n");
 3892}
 3893
 3894static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3895{
 3896	u64 cs, logh, logl;
 3897
 3898	cs = nr64(TX_CS(rp->tx_channel));
 3899	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3900	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3901
 3902	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3903		   rp->tx_channel,
 3904		   (unsigned long long)cs,
 3905		   (unsigned long long)logh,
 3906		   (unsigned long long)logl);
 3907
 3908	niu_log_txchan_errors(np, rp, cs);
 3909
 3910	return -ENODEV;
 3911}
 3912
 3913static int niu_mif_interrupt(struct niu *np)
 3914{
 3915	u64 mif_status = nr64(MIF_STATUS);
 3916	int phy_mdint = 0;
 3917
 3918	if (np->flags & NIU_FLAGS_XMAC) {
 3919		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3920
 3921		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3922			phy_mdint = 1;
 3923	}
 3924
 3925	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3926		   (unsigned long long)mif_status, phy_mdint);
 3927
 3928	return -ENODEV;
 3929}
 3930
 3931static void niu_xmac_interrupt(struct niu *np)
 3932{
 3933	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3934	u64 val;
 3935
 3936	val = nr64_mac(XTXMAC_STATUS);
 3937	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3938		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3939	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3940		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3941	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3942		mp->tx_fifo_errors++;
 3943	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3944		mp->tx_overflow_errors++;
 3945	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3946		mp->tx_max_pkt_size_errors++;
 3947	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3948		mp->tx_underflow_errors++;
 3949
 3950	val = nr64_mac(XRXMAC_STATUS);
 3951	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3952		mp->rx_local_faults++;
 3953	if (val & XRXMAC_STATUS_RFLT_DET)
 3954		mp->rx_remote_faults++;
 3955	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3956		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3957	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3958		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3959	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3960		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3961	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3962		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3963	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3964		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3965	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3966		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3967	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3968		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3969	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3970		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3971	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3972		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3973	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3974		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3975	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3976		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3977	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3978		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3979	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3980		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3981	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3982		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3983	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3984		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3985	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3986		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3987	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3988		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3989	if (val & XRXMAC_STATUS_RXUFLOW)
 3990		mp->rx_underflows++;
 3991	if (val & XRXMAC_STATUS_RXOFLOW)
 3992		mp->rx_overflows++;
 3993
 3994	val = nr64_mac(XMAC_FC_STAT);
 3995	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3996		mp->pause_off_state++;
 3997	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3998		mp->pause_on_state++;
 3999	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 4000		mp->pause_received++;
 4001}
 4002
 4003static void niu_bmac_interrupt(struct niu *np)
 4004{
 4005	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 4006	u64 val;
 4007
 4008	val = nr64_mac(BTXMAC_STATUS);
 4009	if (val & BTXMAC_STATUS_UNDERRUN)
 4010		mp->tx_underflow_errors++;
 4011	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 4012		mp->tx_max_pkt_size_errors++;
 4013	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 4014		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 4015	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 4016		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 4017
 4018	val = nr64_mac(BRXMAC_STATUS);
 4019	if (val & BRXMAC_STATUS_OVERFLOW)
 4020		mp->rx_overflows++;
 4021	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 4022		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 4023	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 4024		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4025	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 4026		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4027	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 4028		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 4029
 4030	val = nr64_mac(BMAC_CTRL_STATUS);
 4031	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4032		mp->pause_off_state++;
 4033	if (val & BMAC_CTRL_STATUS_PAUSE)
 4034		mp->pause_on_state++;
 4035	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4036		mp->pause_received++;
 4037}
 4038
 4039static int niu_mac_interrupt(struct niu *np)
 4040{
 4041	if (np->flags & NIU_FLAGS_XMAC)
 4042		niu_xmac_interrupt(np);
 4043	else
 4044		niu_bmac_interrupt(np);
 4045
 4046	return 0;
 4047}
 4048
 4049static void niu_log_device_error(struct niu *np, u64 stat)
 4050{
 4051	netdev_err(np->dev, "Core device errors ( ");
 4052
 4053	if (stat & SYS_ERR_MASK_META2)
 4054		pr_cont("META2 ");
 4055	if (stat & SYS_ERR_MASK_META1)
 4056		pr_cont("META1 ");
 4057	if (stat & SYS_ERR_MASK_PEU)
 4058		pr_cont("PEU ");
 4059	if (stat & SYS_ERR_MASK_TXC)
 4060		pr_cont("TXC ");
 4061	if (stat & SYS_ERR_MASK_RDMC)
 4062		pr_cont("RDMC ");
 4063	if (stat & SYS_ERR_MASK_TDMC)
 4064		pr_cont("TDMC ");
 4065	if (stat & SYS_ERR_MASK_ZCP)
 4066		pr_cont("ZCP ");
 4067	if (stat & SYS_ERR_MASK_FFLP)
 4068		pr_cont("FFLP ");
 4069	if (stat & SYS_ERR_MASK_IPP)
 4070		pr_cont("IPP ");
 4071	if (stat & SYS_ERR_MASK_MAC)
 4072		pr_cont("MAC ");
 4073	if (stat & SYS_ERR_MASK_SMX)
 4074		pr_cont("SMX ");
 4075
 4076	pr_cont(")\n");
 4077}
 4078
 4079static int niu_device_error(struct niu *np)
 4080{
 4081	u64 stat = nr64(SYS_ERR_STAT);
 4082
 4083	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4084		   (unsigned long long)stat);
 4085
 4086	niu_log_device_error(np, stat);
 4087
 4088	return -ENODEV;
 4089}
 4090
 4091static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4092			      u64 v0, u64 v1, u64 v2)
 4093{
 4094
 4095	int i, err = 0;
 4096
 4097	lp->v0 = v0;
 4098	lp->v1 = v1;
 4099	lp->v2 = v2;
 4100
 4101	if (v1 & 0x00000000ffffffffULL) {
 4102		u32 rx_vec = (v1 & 0xffffffff);
 4103
 4104		for (i = 0; i < np->num_rx_rings; i++) {
 4105			struct rx_ring_info *rp = &np->rx_rings[i];
 4106
 4107			if (rx_vec & (1 << rp->rx_channel)) {
 4108				int r = niu_rx_error(np, rp);
 4109				if (r) {
 4110					err = r;
 4111				} else {
 4112					if (!v0)
 4113						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4114						     RX_DMA_CTL_STAT_MEX);
 4115				}
 4116			}
 4117		}
 4118	}
 4119	if (v1 & 0x7fffffff00000000ULL) {
 4120		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4121
 4122		for (i = 0; i < np->num_tx_rings; i++) {
 4123			struct tx_ring_info *rp = &np->tx_rings[i];
 4124
 4125			if (tx_vec & (1 << rp->tx_channel)) {
 4126				int r = niu_tx_error(np, rp);
 4127				if (r)
 4128					err = r;
 4129			}
 4130		}
 4131	}
 4132	if ((v0 | v1) & 0x8000000000000000ULL) {
 4133		int r = niu_mif_interrupt(np);
 4134		if (r)
 4135			err = r;
 4136	}
 4137	if (v2) {
 4138		if (v2 & 0x01ef) {
 4139			int r = niu_mac_interrupt(np);
 4140			if (r)
 4141				err = r;
 4142		}
 4143		if (v2 & 0x0210) {
 4144			int r = niu_device_error(np);
 4145			if (r)
 4146				err = r;
 4147		}
 4148	}
 4149
 4150	if (err)
 4151		niu_enable_interrupts(np, 0);
 4152
 4153	return err;
 4154}
 4155
 4156static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4157			    int ldn)
 4158{
 4159	struct rxdma_mailbox *mbox = rp->mbox;
 4160	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4161
 4162	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4163		      RX_DMA_CTL_STAT_RCRTO);
 4164	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4165
 4166	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4167		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4168}
 4169
 4170static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4171			    int ldn)
 4172{
 4173	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4174
 4175	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4176		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4177}
 4178
 4179static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4180{
 4181	struct niu_parent *parent = np->parent;
 4182	u32 rx_vec, tx_vec;
 4183	int i;
 4184
 4185	tx_vec = (v0 >> 32);
 4186	rx_vec = (v0 & 0xffffffff);
 4187
 4188	for (i = 0; i < np->num_rx_rings; i++) {
 4189		struct rx_ring_info *rp = &np->rx_rings[i];
 4190		int ldn = LDN_RXDMA(rp->rx_channel);
 4191
 4192		if (parent->ldg_map[ldn] != ldg)
 4193			continue;
 4194
 4195		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4196		if (rx_vec & (1 << rp->rx_channel))
 4197			niu_rxchan_intr(np, rp, ldn);
 4198	}
 4199
 4200	for (i = 0; i < np->num_tx_rings; i++) {
 4201		struct tx_ring_info *rp = &np->tx_rings[i];
 4202		int ldn = LDN_TXDMA(rp->tx_channel);
 4203
 4204		if (parent->ldg_map[ldn] != ldg)
 4205			continue;
 4206
 4207		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4208		if (tx_vec & (1 << rp->tx_channel))
 4209			niu_txchan_intr(np, rp, ldn);
 4210	}
 4211}
 4212
 4213static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4214			      u64 v0, u64 v1, u64 v2)
 4215{
 4216	if (likely(napi_schedule_prep(&lp->napi))) {
 4217		lp->v0 = v0;
 4218		lp->v1 = v1;
 4219		lp->v2 = v2;
 4220		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4221		__napi_schedule(&lp->napi);
 4222	}
 4223}
 4224
 4225static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4226{
 4227	struct niu_ldg *lp = dev_id;
 4228	struct niu *np = lp->np;
 4229	int ldg = lp->ldg_num;
 4230	unsigned long flags;
 4231	u64 v0, v1, v2;
 4232
 4233	if (netif_msg_intr(np))
 4234		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4235		       __func__, lp, ldg);
 4236
 4237	spin_lock_irqsave(&np->lock, flags);
 4238
 4239	v0 = nr64(LDSV0(ldg));
 4240	v1 = nr64(LDSV1(ldg));
 4241	v2 = nr64(LDSV2(ldg));
 4242
 4243	if (netif_msg_intr(np))
 4244		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4245		       (unsigned long long) v0,
 4246		       (unsigned long long) v1,
 4247		       (unsigned long long) v2);
 4248
 4249	if (unlikely(!v0 && !v1 && !v2)) {
 4250		spin_unlock_irqrestore(&np->lock, flags);
 4251		return IRQ_NONE;
 4252	}
 4253
 4254	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4255		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4256		if (err)
 4257			goto out;
 4258	}
 4259	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4260		niu_schedule_napi(np, lp, v0, v1, v2);
 4261	else
 4262		niu_ldg_rearm(np, lp, 1);
 4263out:
 4264	spin_unlock_irqrestore(&np->lock, flags);
 4265
 4266	return IRQ_HANDLED;
 4267}
 4268
 4269static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4270{
 4271	if (rp->mbox) {
 4272		np->ops->free_coherent(np->device,
 4273				       sizeof(struct rxdma_mailbox),
 4274				       rp->mbox, rp->mbox_dma);
 4275		rp->mbox = NULL;
 4276	}
 4277	if (rp->rcr) {
 4278		np->ops->free_coherent(np->device,
 4279				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4280				       rp->rcr, rp->rcr_dma);
 4281		rp->rcr = NULL;
 4282		rp->rcr_table_size = 0;
 4283		rp->rcr_index = 0;
 4284	}
 4285	if (rp->rbr) {
 4286		niu_rbr_free(np, rp);
 4287
 4288		np->ops->free_coherent(np->device,
 4289				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4290				       rp->rbr, rp->rbr_dma);
 4291		rp->rbr = NULL;
 4292		rp->rbr_table_size = 0;
 4293		rp->rbr_index = 0;
 4294	}
 4295	kfree(rp->rxhash);
 4296	rp->rxhash = NULL;
 4297}
 4298
 4299static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4300{
 4301	if (rp->mbox) {
 4302		np->ops->free_coherent(np->device,
 4303				       sizeof(struct txdma_mailbox),
 4304				       rp->mbox, rp->mbox_dma);
 4305		rp->mbox = NULL;
 4306	}
 4307	if (rp->descr) {
 4308		int i;
 4309
 4310		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4311			if (rp->tx_buffs[i].skb)
 4312				(void) release_tx_packet(np, rp, i);
 4313		}
 4314
 4315		np->ops->free_coherent(np->device,
 4316				       MAX_TX_RING_SIZE * sizeof(__le64),
 4317				       rp->descr, rp->descr_dma);
 4318		rp->descr = NULL;
 4319		rp->pending = 0;
 4320		rp->prod = 0;
 4321		rp->cons = 0;
 4322		rp->wrap_bit = 0;
 4323	}
 4324}
 4325
 4326static void niu_free_channels(struct niu *np)
 4327{
 4328	int i;
 4329
 4330	if (np->rx_rings) {
 4331		for (i = 0; i < np->num_rx_rings; i++) {
 4332			struct rx_ring_info *rp = &np->rx_rings[i];
 4333
 4334			niu_free_rx_ring_info(np, rp);
 4335		}
 4336		kfree(np->rx_rings);
 4337		np->rx_rings = NULL;
 4338		np->num_rx_rings = 0;
 4339	}
 4340
 4341	if (np->tx_rings) {
 4342		for (i = 0; i < np->num_tx_rings; i++) {
 4343			struct tx_ring_info *rp = &np->tx_rings[i];
 4344
 4345			niu_free_tx_ring_info(np, rp);
 4346		}
 4347		kfree(np->tx_rings);
 4348		np->tx_rings = NULL;
 4349		np->num_tx_rings = 0;
 4350	}
 4351}
 4352
 4353static int niu_alloc_rx_ring_info(struct niu *np,
 4354				  struct rx_ring_info *rp)
 4355{
 4356	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4357
 4358	rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
 4359			     GFP_KERNEL);
 4360	if (!rp->rxhash)
 4361		return -ENOMEM;
 4362
 4363	rp->mbox = np->ops->alloc_coherent(np->device,
 4364					   sizeof(struct rxdma_mailbox),
 4365					   &rp->mbox_dma, GFP_KERNEL);
 4366	if (!rp->mbox)
 4367		return -ENOMEM;
 4368	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4369		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4370			   rp->mbox);
 4371		return -EINVAL;
 4372	}
 4373
 4374	rp->rcr = np->ops->alloc_coherent(np->device,
 4375					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4376					  &rp->rcr_dma, GFP_KERNEL);
 4377	if (!rp->rcr)
 4378		return -ENOMEM;
 4379	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4380		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4381			   rp->rcr);
 4382		return -EINVAL;
 4383	}
 4384	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4385	rp->rcr_index = 0;
 4386
 4387	rp->rbr = np->ops->alloc_coherent(np->device,
 4388					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4389					  &rp->rbr_dma, GFP_KERNEL);
 4390	if (!rp->rbr)
 4391		return -ENOMEM;
 4392	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4393		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4394			   rp->rbr);
 4395		return -EINVAL;
 4396	}
 4397	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4398	rp->rbr_index = 0;
 4399	rp->rbr_pending = 0;
 4400
 4401	return 0;
 4402}
 4403
 4404static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4405{
 4406	int mtu = np->dev->mtu;
 4407
 4408	/* These values are recommended by the HW designers for fair
 4409	 * utilization of DRR amongst the rings.
 4410	 */
 4411	rp->max_burst = mtu + 32;
 4412	if (rp->max_burst > 4096)
 4413		rp->max_burst = 4096;
 4414}
 4415
 4416static int niu_alloc_tx_ring_info(struct niu *np,
 4417				  struct tx_ring_info *rp)
 4418{
 4419	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4420
 4421	rp->mbox = np->ops->alloc_coherent(np->device,
 4422					   sizeof(struct txdma_mailbox),
 4423					   &rp->mbox_dma, GFP_KERNEL);
 4424	if (!rp->mbox)
 4425		return -ENOMEM;
 4426	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4427		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4428			   rp->mbox);
 4429		return -EINVAL;
 4430	}
 4431
 4432	rp->descr = np->ops->alloc_coherent(np->device,
 4433					    MAX_TX_RING_SIZE * sizeof(__le64),
 4434					    &rp->descr_dma, GFP_KERNEL);
 4435	if (!rp->descr)
 4436		return -ENOMEM;
 4437	if ((unsigned long)rp->descr & (64UL - 1)) {
 4438		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4439			   rp->descr);
 4440		return -EINVAL;
 4441	}
 4442
 4443	rp->pending = MAX_TX_RING_SIZE;
 4444	rp->prod = 0;
 4445	rp->cons = 0;
 4446	rp->wrap_bit = 0;
 4447
 4448	/* XXX make these configurable... XXX */
 4449	rp->mark_freq = rp->pending / 4;
 4450
 4451	niu_set_max_burst(np, rp);
 4452
 4453	return 0;
 4454}
 4455
 4456static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4457{
 4458	u16 bss;
 4459
 4460	bss = min(PAGE_SHIFT, 15);
 4461
 4462	rp->rbr_block_size = 1 << bss;
 4463	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4464
 4465	rp->rbr_sizes[0] = 256;
 4466	rp->rbr_sizes[1] = 1024;
 4467	if (np->dev->mtu > ETH_DATA_LEN) {
 4468		switch (PAGE_SIZE) {
 4469		case 4 * 1024:
 4470			rp->rbr_sizes[2] = 4096;
 4471			break;
 4472
 4473		default:
 4474			rp->rbr_sizes[2] = 8192;
 4475			break;
 4476		}
 4477	} else {
 4478		rp->rbr_sizes[2] = 2048;
 4479	}
 4480	rp->rbr_sizes[3] = rp->rbr_block_size;
 4481}
 4482
 4483static int niu_alloc_channels(struct niu *np)
 4484{
 4485	struct niu_parent *parent = np->parent;
 4486	int first_rx_channel, first_tx_channel;
 4487	int num_rx_rings, num_tx_rings;
 4488	struct rx_ring_info *rx_rings;
 4489	struct tx_ring_info *tx_rings;
 4490	int i, port, err;
 4491
 4492	port = np->port;
 4493	first_rx_channel = first_tx_channel = 0;
 4494	for (i = 0; i < port; i++) {
 4495		first_rx_channel += parent->rxchan_per_port[i];
 4496		first_tx_channel += parent->txchan_per_port[i];
 4497	}
 4498
 4499	num_rx_rings = parent->rxchan_per_port[port];
 4500	num_tx_rings = parent->txchan_per_port[port];
 4501
 4502	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4503			   GFP_KERNEL);
 4504	err = -ENOMEM;
 4505	if (!rx_rings)
 4506		goto out_err;
 4507
 4508	np->num_rx_rings = num_rx_rings;
 4509	smp_wmb();
 4510	np->rx_rings = rx_rings;
 4511
 4512	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4513
 4514	for (i = 0; i < np->num_rx_rings; i++) {
 4515		struct rx_ring_info *rp = &np->rx_rings[i];
 4516
 4517		rp->np = np;
 4518		rp->rx_channel = first_rx_channel + i;
 4519
 4520		err = niu_alloc_rx_ring_info(np, rp);
 4521		if (err)
 4522			goto out_err;
 4523
 4524		niu_size_rbr(np, rp);
 4525
 4526		/* XXX better defaults, configurable, etc... XXX */
 4527		rp->nonsyn_window = 64;
 4528		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4529		rp->syn_window = 64;
 4530		rp->syn_threshold = rp->rcr_table_size - 64;
 4531		rp->rcr_pkt_threshold = 16;
 4532		rp->rcr_timeout = 8;
 4533		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4534		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4535			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4536
 4537		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4538		if (err)
 4539			return err;
 4540	}
 4541
 4542	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4543			   GFP_KERNEL);
 4544	err = -ENOMEM;
 4545	if (!tx_rings)
 4546		goto out_err;
 4547
 4548	np->num_tx_rings = num_tx_rings;
 4549	smp_wmb();
 4550	np->tx_rings = tx_rings;
 4551
 4552	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4553
 4554	for (i = 0; i < np->num_tx_rings; i++) {
 4555		struct tx_ring_info *rp = &np->tx_rings[i];
 4556
 4557		rp->np = np;
 4558		rp->tx_channel = first_tx_channel + i;
 4559
 4560		err = niu_alloc_tx_ring_info(np, rp);
 4561		if (err)
 4562			goto out_err;
 4563	}
 4564
 4565	return 0;
 4566
 4567out_err:
 4568	niu_free_channels(np);
 4569	return err;
 4570}
 4571
 4572static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4573{
 4574	int limit = 1000;
 4575
 4576	while (--limit > 0) {
 4577		u64 val = nr64(TX_CS(channel));
 4578		if (val & TX_CS_SNG_STATE)
 4579			return 0;
 4580	}
 4581	return -ENODEV;
 4582}
 4583
 4584static int niu_tx_channel_stop(struct niu *np, int channel)
 4585{
 4586	u64 val = nr64(TX_CS(channel));
 4587
 4588	val |= TX_CS_STOP_N_GO;
 4589	nw64(TX_CS(channel), val);
 4590
 4591	return niu_tx_cs_sng_poll(np, channel);
 4592}
 4593
 4594static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4595{
 4596	int limit = 1000;
 4597
 4598	while (--limit > 0) {
 4599		u64 val = nr64(TX_CS(channel));
 4600		if (!(val & TX_CS_RST))
 4601			return 0;
 4602	}
 4603	return -ENODEV;
 4604}
 4605
 4606static int niu_tx_channel_reset(struct niu *np, int channel)
 4607{
 4608	u64 val = nr64(TX_CS(channel));
 4609	int err;
 4610
 4611	val |= TX_CS_RST;
 4612	nw64(TX_CS(channel), val);
 4613
 4614	err = niu_tx_cs_reset_poll(np, channel);
 4615	if (!err)
 4616		nw64(TX_RING_KICK(channel), 0);
 4617
 4618	return err;
 4619}
 4620
 4621static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4622{
 4623	u64 val;
 4624
 4625	nw64(TX_LOG_MASK1(channel), 0);
 4626	nw64(TX_LOG_VAL1(channel), 0);
 4627	nw64(TX_LOG_MASK2(channel), 0);
 4628	nw64(TX_LOG_VAL2(channel), 0);
 4629	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4630	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4631	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4632
 4633	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4634	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4635	nw64(TX_LOG_PAGE_VLD(channel), val);
 4636
 4637	/* XXX TXDMA 32bit mode? XXX */
 4638
 4639	return 0;
 4640}
 4641
 4642static void niu_txc_enable_port(struct niu *np, int on)
 4643{
 4644	unsigned long flags;
 4645	u64 val, mask;
 4646
 4647	niu_lock_parent(np, flags);
 4648	val = nr64(TXC_CONTROL);
 4649	mask = (u64)1 << np->port;
 4650	if (on) {
 4651		val |= TXC_CONTROL_ENABLE | mask;
 4652	} else {
 4653		val &= ~mask;
 4654		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4655			val &= ~TXC_CONTROL_ENABLE;
 4656	}
 4657	nw64(TXC_CONTROL, val);
 4658	niu_unlock_parent(np, flags);
 4659}
 4660
 4661static void niu_txc_set_imask(struct niu *np, u64 imask)
 4662{
 4663	unsigned long flags;
 4664	u64 val;
 4665
 4666	niu_lock_parent(np, flags);
 4667	val = nr64(TXC_INT_MASK);
 4668	val &= ~TXC_INT_MASK_VAL(np->port);
 4669	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4670	niu_unlock_parent(np, flags);
 4671}
 4672
 4673static void niu_txc_port_dma_enable(struct niu *np, int on)
 4674{
 4675	u64 val = 0;
 4676
 4677	if (on) {
 4678		int i;
 4679
 4680		for (i = 0; i < np->num_tx_rings; i++)
 4681			val |= (1 << np->tx_rings[i].tx_channel);
 4682	}
 4683	nw64(TXC_PORT_DMA(np->port), val);
 4684}
 4685
 4686static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4687{
 4688	int err, channel = rp->tx_channel;
 4689	u64 val, ring_len;
 4690
 4691	err = niu_tx_channel_stop(np, channel);
 4692	if (err)
 4693		return err;
 4694
 4695	err = niu_tx_channel_reset(np, channel);
 4696	if (err)
 4697		return err;
 4698
 4699	err = niu_tx_channel_lpage_init(np, channel);
 4700	if (err)
 4701		return err;
 4702
 4703	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4704	nw64(TX_ENT_MSK(channel), 0);
 4705
 4706	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4707			      TX_RNG_CFIG_STADDR)) {
 4708		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4709			   channel, (unsigned long long)rp->descr_dma);
 4710		return -EINVAL;
 4711	}
 4712
 4713	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4714	 * blocks.  rp->pending is the number of TX descriptors in
 4715	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4716	 * to get the proper value the chip wants.
 4717	 */
 4718	ring_len = (rp->pending / 8);
 4719
 4720	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4721	       rp->descr_dma);
 4722	nw64(TX_RNG_CFIG(channel), val);
 4723
 4724	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4725	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4726		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4727			    channel, (unsigned long long)rp->mbox_dma);
 4728		return -EINVAL;
 4729	}
 4730	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4731	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4732
 4733	nw64(TX_CS(channel), 0);
 4734
 4735	rp->last_pkt_cnt = 0;
 4736
 4737	return 0;
 4738}
 4739
 4740static void niu_init_rdc_groups(struct niu *np)
 4741{
 4742	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4743	int i, first_table_num = tp->first_table_num;
 4744
 4745	for (i = 0; i < tp->num_tables; i++) {
 4746		struct rdc_table *tbl = &tp->tables[i];
 4747		int this_table = first_table_num + i;
 4748		int slot;
 4749
 4750		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4751			nw64(RDC_TBL(this_table, slot),
 4752			     tbl->rxdma_channel[slot]);
 4753	}
 4754
 4755	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4756}
 4757
 4758static void niu_init_drr_weight(struct niu *np)
 4759{
 4760	int type = phy_decode(np->parent->port_phy, np->port);
 4761	u64 val;
 4762
 4763	switch (type) {
 4764	case PORT_TYPE_10G:
 4765		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4766		break;
 4767
 4768	case PORT_TYPE_1G:
 4769	default:
 4770		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4771		break;
 4772	}
 4773	nw64(PT_DRR_WT(np->port), val);
 4774}
 4775
 4776static int niu_init_hostinfo(struct niu *np)
 4777{
 4778	struct niu_parent *parent = np->parent;
 4779	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4780	int i, err, num_alt = niu_num_alt_addr(np);
 4781	int first_rdc_table = tp->first_table_num;
 4782
 4783	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4784	if (err)
 4785		return err;
 4786
 4787	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4788	if (err)
 4789		return err;
 4790
 4791	for (i = 0; i < num_alt; i++) {
 4792		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4793		if (err)
 4794			return err;
 4795	}
 4796
 4797	return 0;
 4798}
 4799
 4800static int niu_rx_channel_reset(struct niu *np, int channel)
 4801{
 4802	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4803				      RXDMA_CFIG1_RST, 1000, 10,
 4804				      "RXDMA_CFIG1");
 4805}
 4806
 4807static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4808{
 4809	u64 val;
 4810
 4811	nw64(RX_LOG_MASK1(channel), 0);
 4812	nw64(RX_LOG_VAL1(channel), 0);
 4813	nw64(RX_LOG_MASK2(channel), 0);
 4814	nw64(RX_LOG_VAL2(channel), 0);
 4815	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4816	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4817	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4818
 4819	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4820	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4821	nw64(RX_LOG_PAGE_VLD(channel), val);
 4822
 4823	return 0;
 4824}
 4825
 4826static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4827{
 4828	u64 val;
 4829
 4830	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4831	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4832	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4833	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4834	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4835}
 4836
 4837static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4838{
 4839	u64 val = 0;
 4840
 4841	*ret = 0;
 4842	switch (rp->rbr_block_size) {
 4843	case 4 * 1024:
 4844		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4845		break;
 4846	case 8 * 1024:
 4847		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4848		break;
 4849	case 16 * 1024:
 4850		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4851		break;
 4852	case 32 * 1024:
 4853		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4854		break;
 4855	default:
 4856		return -EINVAL;
 4857	}
 4858	val |= RBR_CFIG_B_VLD2;
 4859	switch (rp->rbr_sizes[2]) {
 4860	case 2 * 1024:
 4861		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4862		break;
 4863	case 4 * 1024:
 4864		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4865		break;
 4866	case 8 * 1024:
 4867		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4868		break;
 4869	case 16 * 1024:
 4870		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4871		break;
 4872
 4873	default:
 4874		return -EINVAL;
 4875	}
 4876	val |= RBR_CFIG_B_VLD1;
 4877	switch (rp->rbr_sizes[1]) {
 4878	case 1 * 1024:
 4879		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4880		break;
 4881	case 2 * 1024:
 4882		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4883		break;
 4884	case 4 * 1024:
 4885		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4886		break;
 4887	case 8 * 1024:
 4888		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4889		break;
 4890
 4891	default:
 4892		return -EINVAL;
 4893	}
 4894	val |= RBR_CFIG_B_VLD0;
 4895	switch (rp->rbr_sizes[0]) {
 4896	case 256:
 4897		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4898		break;
 4899	case 512:
 4900		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4901		break;
 4902	case 1 * 1024:
 4903		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4904		break;
 4905	case 2 * 1024:
 4906		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4907		break;
 4908
 4909	default:
 4910		return -EINVAL;
 4911	}
 4912
 4913	*ret = val;
 4914	return 0;
 4915}
 4916
 4917static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4918{
 4919	u64 val = nr64(RXDMA_CFIG1(channel));
 4920	int limit;
 4921
 4922	if (on)
 4923		val |= RXDMA_CFIG1_EN;
 4924	else
 4925		val &= ~RXDMA_CFIG1_EN;
 4926	nw64(RXDMA_CFIG1(channel), val);
 4927
 4928	limit = 1000;
 4929	while (--limit > 0) {
 4930		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4931			break;
 4932		udelay(10);
 4933	}
 4934	if (limit <= 0)
 4935		return -ENODEV;
 4936	return 0;
 4937}
 4938
 4939static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4940{
 4941	int err, channel = rp->rx_channel;
 4942	u64 val;
 4943
 4944	err = niu_rx_channel_reset(np, channel);
 4945	if (err)
 4946		return err;
 4947
 4948	err = niu_rx_channel_lpage_init(np, channel);
 4949	if (err)
 4950		return err;
 4951
 4952	niu_rx_channel_wred_init(np, rp);
 4953
 4954	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4955	nw64(RX_DMA_CTL_STAT(channel),
 4956	     (RX_DMA_CTL_STAT_MEX |
 4957	      RX_DMA_CTL_STAT_RCRTHRES |
 4958	      RX_DMA_CTL_STAT_RCRTO |
 4959	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4960	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4961	nw64(RXDMA_CFIG2(channel),
 4962	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4963	      RXDMA_CFIG2_FULL_HDR));
 4964	nw64(RBR_CFIG_A(channel),
 4965	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4966	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4967	err = niu_compute_rbr_cfig_b(rp, &val);
 4968	if (err)
 4969		return err;
 4970	nw64(RBR_CFIG_B(channel), val);
 4971	nw64(RCRCFIG_A(channel),
 4972	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4973	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4974	nw64(RCRCFIG_B(channel),
 4975	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4976	     RCRCFIG_B_ENTOUT |
 4977	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4978
 4979	err = niu_enable_rx_channel(np, channel, 1);
 4980	if (err)
 4981		return err;
 4982
 4983	nw64(RBR_KICK(channel), rp->rbr_index);
 4984
 4985	val = nr64(RX_DMA_CTL_STAT(channel));
 4986	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4987	nw64(RX_DMA_CTL_STAT(channel), val);
 4988
 4989	return 0;
 4990}
 4991
 4992static int niu_init_rx_channels(struct niu *np)
 4993{
 4994	unsigned long flags;
 4995	u64 seed = jiffies_64;
 4996	int err, i;
 4997
 4998	niu_lock_parent(np, flags);
 4999	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 5000	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 5001	niu_unlock_parent(np, flags);
 5002
 5003	/* XXX RXDMA 32bit mode? XXX */
 5004
 5005	niu_init_rdc_groups(np);
 5006	niu_init_drr_weight(np);
 5007
 5008	err = niu_init_hostinfo(np);
 5009	if (err)
 5010		return err;
 5011
 5012	for (i = 0; i < np->num_rx_rings; i++) {
 5013		struct rx_ring_info *rp = &np->rx_rings[i];
 5014
 5015		err = niu_init_one_rx_channel(np, rp);
 5016		if (err)
 5017			return err;
 5018	}
 5019
 5020	return 0;
 5021}
 5022
 5023static int niu_set_ip_frag_rule(struct niu *np)
 5024{
 5025	struct niu_parent *parent = np->parent;
 5026	struct niu_classifier *cp = &np->clas;
 5027	struct niu_tcam_entry *tp;
 5028	int index, err;
 5029
 5030	index = cp->tcam_top;
 5031	tp = &parent->tcam[index];
 5032
 5033	/* Note that the noport bit is the same in both ipv4 and
 5034	 * ipv6 format TCAM entries.
 5035	 */
 5036	memset(tp, 0, sizeof(*tp));
 5037	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5038	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5039	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5040			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5041	err = tcam_write(np, index, tp->key, tp->key_mask);
 5042	if (err)
 5043		return err;
 5044	err = tcam_assoc_write(np, index, tp->assoc_data);
 5045	if (err)
 5046		return err;
 5047	tp->valid = 1;
 5048	cp->tcam_valid_entries++;
 5049
 5050	return 0;
 5051}
 5052
 5053static int niu_init_classifier_hw(struct niu *np)
 5054{
 5055	struct niu_parent *parent = np->parent;
 5056	struct niu_classifier *cp = &np->clas;
 5057	int i, err;
 5058
 5059	nw64(H1POLY, cp->h1_init);
 5060	nw64(H2POLY, cp->h2_init);
 5061
 5062	err = niu_init_hostinfo(np);
 5063	if (err)
 5064		return err;
 5065
 5066	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5067		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5068
 5069		vlan_tbl_write(np, i, np->port,
 5070			       vp->vlan_pref, vp->rdc_num);
 5071	}
 5072
 5073	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5074		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5075
 5076		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5077						ap->rdc_num, ap->mac_pref);
 5078		if (err)
 5079			return err;
 5080	}
 5081
 5082	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5083		int index = i - CLASS_CODE_USER_PROG1;
 5084
 5085		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5086		if (err)
 5087			return err;
 5088		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5089		if (err)
 5090			return err;
 5091	}
 5092
 5093	err = niu_set_ip_frag_rule(np);
 5094	if (err)
 5095		return err;
 5096
 5097	tcam_enable(np, 1);
 5098
 5099	return 0;
 5100}
 5101
 5102static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5103{
 5104	nw64(ZCP_RAM_DATA0, data[0]);
 5105	nw64(ZCP_RAM_DATA1, data[1]);
 5106	nw64(ZCP_RAM_DATA2, data[2]);
 5107	nw64(ZCP_RAM_DATA3, data[3]);
 5108	nw64(ZCP_RAM_DATA4, data[4]);
 5109	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5110	nw64(ZCP_RAM_ACC,
 5111	     (ZCP_RAM_ACC_WRITE |
 5112	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5113	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5114
 5115	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5116				   1000, 100);
 5117}
 5118
 5119static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5120{
 5121	int err;
 5122
 5123	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5124				  1000, 100);
 5125	if (err) {
 5126		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5127			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5128		return err;
 5129	}
 5130
 5131	nw64(ZCP_RAM_ACC,
 5132	     (ZCP_RAM_ACC_READ |
 5133	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5134	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5135
 5136	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5137				  1000, 100);
 5138	if (err) {
 5139		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5140			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5141		return err;
 5142	}
 5143
 5144	data[0] = nr64(ZCP_RAM_DATA0);
 5145	data[1] = nr64(ZCP_RAM_DATA1);
 5146	data[2] = nr64(ZCP_RAM_DATA2);
 5147	data[3] = nr64(ZCP_RAM_DATA3);
 5148	data[4] = nr64(ZCP_RAM_DATA4);
 5149
 5150	return 0;
 5151}
 5152
 5153static void niu_zcp_cfifo_reset(struct niu *np)
 5154{
 5155	u64 val = nr64(RESET_CFIFO);
 5156
 5157	val |= RESET_CFIFO_RST(np->port);
 5158	nw64(RESET_CFIFO, val);
 5159	udelay(10);
 5160
 5161	val &= ~RESET_CFIFO_RST(np->port);
 5162	nw64(RESET_CFIFO, val);
 5163}
 5164
 5165static int niu_init_zcp(struct niu *np)
 5166{
 5167	u64 data[5], rbuf[5];
 5168	int i, max, err;
 5169
 5170	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5171		if (np->port == 0 || np->port == 1)
 5172			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5173		else
 5174			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5175	} else
 5176		max = NIU_CFIFO_ENTRIES;
 5177
 5178	data[0] = 0;
 5179	data[1] = 0;
 5180	data[2] = 0;
 5181	data[3] = 0;
 5182	data[4] = 0;
 5183
 5184	for (i = 0; i < max; i++) {
 5185		err = niu_zcp_write(np, i, data);
 5186		if (err)
 5187			return err;
 5188		err = niu_zcp_read(np, i, rbuf);
 5189		if (err)
 5190			return err;
 5191	}
 5192
 5193	niu_zcp_cfifo_reset(np);
 5194	nw64(CFIFO_ECC(np->port), 0);
 5195	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5196	(void) nr64(ZCP_INT_STAT);
 5197	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5198
 5199	return 0;
 5200}
 5201
 5202static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5203{
 5204	u64 val = nr64_ipp(IPP_CFIG);
 5205
 5206	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5207	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5208	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5209	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5210	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5211	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5212	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5213	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5214}
 5215
 5216static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5217{
 5218	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5219	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5220	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5221	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5222	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5223	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5224}
 5225
 5226static int niu_ipp_reset(struct niu *np)
 5227{
 5228	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5229					  1000, 100, "IPP_CFIG");
 5230}
 5231
 5232static int niu_init_ipp(struct niu *np)
 5233{
 5234	u64 data[5], rbuf[5], val;
 5235	int i, max, err;
 5236
 5237	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5238		if (np->port == 0 || np->port == 1)
 5239			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5240		else
 5241			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5242	} else
 5243		max = NIU_DFIFO_ENTRIES;
 5244
 5245	data[0] = 0;
 5246	data[1] = 0;
 5247	data[2] = 0;
 5248	data[3] = 0;
 5249	data[4] = 0;
 5250
 5251	for (i = 0; i < max; i++) {
 5252		niu_ipp_write(np, i, data);
 5253		niu_ipp_read(np, i, rbuf);
 5254	}
 5255
 5256	(void) nr64_ipp(IPP_INT_STAT);
 5257	(void) nr64_ipp(IPP_INT_STAT);
 5258
 5259	err = niu_ipp_reset(np);
 5260	if (err)
 5261		return err;
 5262
 5263	(void) nr64_ipp(IPP_PKT_DIS);
 5264	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5265	(void) nr64_ipp(IPP_ECC);
 5266
 5267	(void) nr64_ipp(IPP_INT_STAT);
 5268
 5269	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5270
 5271	val = nr64_ipp(IPP_CFIG);
 5272	val &= ~IPP_CFIG_IP_MAX_PKT;
 5273	val |= (IPP_CFIG_IPP_ENABLE |
 5274		IPP_CFIG_DFIFO_ECC_EN |
 5275		IPP_CFIG_DROP_BAD_CRC |
 5276		IPP_CFIG_CKSUM_EN |
 5277		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5278	nw64_ipp(IPP_CFIG, val);
 5279
 5280	return 0;
 5281}
 5282
 5283static void niu_handle_led(struct niu *np, int status)
 5284{
 5285	u64 val;
 5286	val = nr64_mac(XMAC_CONFIG);
 5287
 5288	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5289	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5290		if (status) {
 5291			val |= XMAC_CONFIG_LED_POLARITY;
 5292			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5293		} else {
 5294			val |= XMAC_CONFIG_FORCE_LED_ON;
 5295			val &= ~XMAC_CONFIG_LED_POLARITY;
 5296		}
 5297	}
 5298
 5299	nw64_mac(XMAC_CONFIG, val);
 5300}
 5301
 5302static void niu_init_xif_xmac(struct niu *np)
 5303{
 5304	struct niu_link_config *lp = &np->link_config;
 5305	u64 val;
 5306
 5307	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5308		val = nr64(MIF_CONFIG);
 5309		val |= MIF_CONFIG_ATCA_GE;
 5310		nw64(MIF_CONFIG, val);
 5311	}
 5312
 5313	val = nr64_mac(XMAC_CONFIG);
 5314	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5315
 5316	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5317
 5318	if (lp->loopback_mode == LOOPBACK_MAC) {
 5319		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5320		val |= XMAC_CONFIG_LOOPBACK;
 5321	} else {
 5322		val &= ~XMAC_CONFIG_LOOPBACK;
 5323	}
 5324
 5325	if (np->flags & NIU_FLAGS_10G) {
 5326		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5327	} else {
 5328		val |= XMAC_CONFIG_LFS_DISABLE;
 5329		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5330		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5331			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5332		else
 5333			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5334	}
 5335
 5336	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5337
 5338	if (lp->active_speed == SPEED_100)
 5339		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5340	else
 5341		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5342
 5343	nw64_mac(XMAC_CONFIG, val);
 5344
 5345	val = nr64_mac(XMAC_CONFIG);
 5346	val &= ~XMAC_CONFIG_MODE_MASK;
 5347	if (np->flags & NIU_FLAGS_10G) {
 5348		val |= XMAC_CONFIG_MODE_XGMII;
 5349	} else {
 5350		if (lp->active_speed == SPEED_1000)
 5351			val |= XMAC_CONFIG_MODE_GMII;
 5352		else
 5353			val |= XMAC_CONFIG_MODE_MII;
 5354	}
 5355
 5356	nw64_mac(XMAC_CONFIG, val);
 5357}
 5358
 5359static void niu_init_xif_bmac(struct niu *np)
 5360{
 5361	struct niu_link_config *lp = &np->link_config;
 5362	u64 val;
 5363
 5364	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5365
 5366	if (lp->loopback_mode == LOOPBACK_MAC)
 5367		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5368	else
 5369		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5370
 5371	if (lp->active_speed == SPEED_1000)
 5372		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5373	else
 5374		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5375
 5376	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5377		 BMAC_XIF_CONFIG_LED_POLARITY);
 5378
 5379	if (!(np->flags & NIU_FLAGS_10G) &&
 5380	    !(np->flags & NIU_FLAGS_FIBER) &&
 5381	    lp->active_speed == SPEED_100)
 5382		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5383	else
 5384		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5385
 5386	nw64_mac(BMAC_XIF_CONFIG, val);
 5387}
 5388
 5389static void niu_init_xif(struct niu *np)
 5390{
 5391	if (np->flags & NIU_FLAGS_XMAC)
 5392		niu_init_xif_xmac(np);
 5393	else
 5394		niu_init_xif_bmac(np);
 5395}
 5396
 5397static void niu_pcs_mii_reset(struct niu *np)
 5398{
 5399	int limit = 1000;
 5400	u64 val = nr64_pcs(PCS_MII_CTL);
 5401	val |= PCS_MII_CTL_RST;
 5402	nw64_pcs(PCS_MII_CTL, val);
 5403	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5404		udelay(100);
 5405		val = nr64_pcs(PCS_MII_CTL);
 5406	}
 5407}
 5408
 5409static void niu_xpcs_reset(struct niu *np)
 5410{
 5411	int limit = 1000;
 5412	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5413	val |= XPCS_CONTROL1_RESET;
 5414	nw64_xpcs(XPCS_CONTROL1, val);
 5415	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5416		udelay(100);
 5417		val = nr64_xpcs(XPCS_CONTROL1);
 5418	}
 5419}
 5420
 5421static int niu_init_pcs(struct niu *np)
 5422{
 5423	struct niu_link_config *lp = &np->link_config;
 5424	u64 val;
 5425
 5426	switch (np->flags & (NIU_FLAGS_10G |
 5427			     NIU_FLAGS_FIBER |
 5428			     NIU_FLAGS_XCVR_SERDES)) {
 5429	case NIU_FLAGS_FIBER:
 5430		/* 1G fiber */
 5431		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5432		nw64_pcs(PCS_DPATH_MODE, 0);
 5433		niu_pcs_mii_reset(np);
 5434		break;
 5435
 5436	case NIU_FLAGS_10G:
 5437	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5438	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5439		/* 10G SERDES */
 5440		if (!(np->flags & NIU_FLAGS_XMAC))
 5441			return -EINVAL;
 5442
 5443		/* 10G copper or fiber */
 5444		val = nr64_mac(XMAC_CONFIG);
 5445		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5446		nw64_mac(XMAC_CONFIG, val);
 5447
 5448		niu_xpcs_reset(np);
 5449
 5450		val = nr64_xpcs(XPCS_CONTROL1);
 5451		if (lp->loopback_mode == LOOPBACK_PHY)
 5452			val |= XPCS_CONTROL1_LOOPBACK;
 5453		else
 5454			val &= ~XPCS_CONTROL1_LOOPBACK;
 5455		nw64_xpcs(XPCS_CONTROL1, val);
 5456
 5457		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5458		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5459		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5460		break;
 5461
 5462
 5463	case NIU_FLAGS_XCVR_SERDES:
 5464		/* 1G SERDES */
 5465		niu_pcs_mii_reset(np);
 5466		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5467		nw64_pcs(PCS_DPATH_MODE, 0);
 5468		break;
 5469
 5470	case 0:
 5471		/* 1G copper */
 5472	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5473		/* 1G RGMII FIBER */
 5474		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5475		niu_pcs_mii_reset(np);
 5476		break;
 5477
 5478	default:
 5479		return -EINVAL;
 5480	}
 5481
 5482	return 0;
 5483}
 5484
 5485static int niu_reset_tx_xmac(struct niu *np)
 5486{
 5487	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5488					  (XTXMAC_SW_RST_REG_RS |
 5489					   XTXMAC_SW_RST_SOFT_RST),
 5490					  1000, 100, "XTXMAC_SW_RST");
 5491}
 5492
 5493static int niu_reset_tx_bmac(struct niu *np)
 5494{
 5495	int limit;
 5496
 5497	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5498	limit = 1000;
 5499	while (--limit >= 0) {
 5500		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5501			break;
 5502		udelay(100);
 5503	}
 5504	if (limit < 0) {
 5505		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5506			np->port,
 5507			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5508		return -ENODEV;
 5509	}
 5510
 5511	return 0;
 5512}
 5513
 5514static int niu_reset_tx_mac(struct niu *np)
 5515{
 5516	if (np->flags & NIU_FLAGS_XMAC)
 5517		return niu_reset_tx_xmac(np);
 5518	else
 5519		return niu_reset_tx_bmac(np);
 5520}
 5521
 5522static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5523{
 5524	u64 val;
 5525
 5526	val = nr64_mac(XMAC_MIN);
 5527	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5528		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5529	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5530	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5531	nw64_mac(XMAC_MIN, val);
 5532
 5533	nw64_mac(XMAC_MAX, max);
 5534
 5535	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5536
 5537	val = nr64_mac(XMAC_IPG);
 5538	if (np->flags & NIU_FLAGS_10G) {
 5539		val &= ~XMAC_IPG_IPG_XGMII;
 5540		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5541	} else {
 5542		val &= ~XMAC_IPG_IPG_MII_GMII;
 5543		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5544	}
 5545	nw64_mac(XMAC_IPG, val);
 5546
 5547	val = nr64_mac(XMAC_CONFIG);
 5548	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5549		 XMAC_CONFIG_STRETCH_MODE |
 5550		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5551		 XMAC_CONFIG_TX_ENABLE);
 5552	nw64_mac(XMAC_CONFIG, val);
 5553
 5554	nw64_mac(TXMAC_FRM_CNT, 0);
 5555	nw64_mac(TXMAC_BYTE_CNT, 0);
 5556}
 5557
 5558static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5559{
 5560	u64 val;
 5561
 5562	nw64_mac(BMAC_MIN_FRAME, min);
 5563	nw64_mac(BMAC_MAX_FRAME, max);
 5564
 5565	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5566	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5567	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5568
 5569	val = nr64_mac(BTXMAC_CONFIG);
 5570	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5571		 BTXMAC_CONFIG_ENABLE);
 5572	nw64_mac(BTXMAC_CONFIG, val);
 5573}
 5574
 5575static void niu_init_tx_mac(struct niu *np)
 5576{
 5577	u64 min, max;
 5578
 5579	min = 64;
 5580	if (np->dev->mtu > ETH_DATA_LEN)
 5581		max = 9216;
 5582	else
 5583		max = 1522;
 5584
 5585	/* The XMAC_MIN register only accepts values for TX min which
 5586	 * have the low 3 bits cleared.
 5587	 */
 5588	BUG_ON(min & 0x7);
 5589
 5590	if (np->flags & NIU_FLAGS_XMAC)
 5591		niu_init_tx_xmac(np, min, max);
 5592	else
 5593		niu_init_tx_bmac(np, min, max);
 5594}
 5595
 5596static int niu_reset_rx_xmac(struct niu *np)
 5597{
 5598	int limit;
 5599
 5600	nw64_mac(XRXMAC_SW_RST,
 5601		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5602	limit = 1000;
 5603	while (--limit >= 0) {
 5604		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5605						 XRXMAC_SW_RST_SOFT_RST)))
 5606			break;
 5607		udelay(100);
 5608	}
 5609	if (limit < 0) {
 5610		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5611			np->port,
 5612			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5613		return -ENODEV;
 5614	}
 5615
 5616	return 0;
 5617}
 5618
 5619static int niu_reset_rx_bmac(struct niu *np)
 5620{
 5621	int limit;
 5622
 5623	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5624	limit = 1000;
 5625	while (--limit >= 0) {
 5626		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5627			break;
 5628		udelay(100);
 5629	}
 5630	if (limit < 0) {
 5631		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5632			np->port,
 5633			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5634		return -ENODEV;
 5635	}
 5636
 5637	return 0;
 5638}
 5639
 5640static int niu_reset_rx_mac(struct niu *np)
 5641{
 5642	if (np->flags & NIU_FLAGS_XMAC)
 5643		return niu_reset_rx_xmac(np);
 5644	else
 5645		return niu_reset_rx_bmac(np);
 5646}
 5647
 5648static void niu_init_rx_xmac(struct niu *np)
 5649{
 5650	struct niu_parent *parent = np->parent;
 5651	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5652	int first_rdc_table = tp->first_table_num;
 5653	unsigned long i;
 5654	u64 val;
 5655
 5656	nw64_mac(XMAC_ADD_FILT0, 0);
 5657	nw64_mac(XMAC_ADD_FILT1, 0);
 5658	nw64_mac(XMAC_ADD_FILT2, 0);
 5659	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5660	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5661	for (i = 0; i < MAC_NUM_HASH; i++)
 5662		nw64_mac(XMAC_HASH_TBL(i), 0);
 5663	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5664	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5665	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5666
 5667	val = nr64_mac(XMAC_CONFIG);
 5668	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5669		 XMAC_CONFIG_PROMISCUOUS |
 5670		 XMAC_CONFIG_PROMISC_GROUP |
 5671		 XMAC_CONFIG_ERR_CHK_DIS |
 5672		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5673		 XMAC_CONFIG_RESERVED_MULTICAST |
 5674		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5675		 XMAC_CONFIG_ADDR_FILTER_EN |
 5676		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5677		 XMAC_CONFIG_STRIP_CRC |
 5678		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5679		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5680	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5681	nw64_mac(XMAC_CONFIG, val);
 5682
 5683	nw64_mac(RXMAC_BT_CNT, 0);
 5684	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5685	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5686	nw64_mac(RXMAC_FRAG_CNT, 0);
 5687	nw64_mac(RXMAC_HIST_CNT1, 0);
 5688	nw64_mac(RXMAC_HIST_CNT2, 0);
 5689	nw64_mac(RXMAC_HIST_CNT3, 0);
 5690	nw64_mac(RXMAC_HIST_CNT4, 0);
 5691	nw64_mac(RXMAC_HIST_CNT5, 0);
 5692	nw64_mac(RXMAC_HIST_CNT6, 0);
 5693	nw64_mac(RXMAC_HIST_CNT7, 0);
 5694	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5695	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5696	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5697	nw64_mac(LINK_FAULT_CNT, 0);
 5698}
 5699
 5700static void niu_init_rx_bmac(struct niu *np)
 5701{
 5702	struct niu_parent *parent = np->parent;
 5703	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5704	int first_rdc_table = tp->first_table_num;
 5705	unsigned long i;
 5706	u64 val;
 5707
 5708	nw64_mac(BMAC_ADD_FILT0, 0);
 5709	nw64_mac(BMAC_ADD_FILT1, 0);
 5710	nw64_mac(BMAC_ADD_FILT2, 0);
 5711	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5712	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5713	for (i = 0; i < MAC_NUM_HASH; i++)
 5714		nw64_mac(BMAC_HASH_TBL(i), 0);
 5715	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5716	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5717	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5718
 5719	val = nr64_mac(BRXMAC_CONFIG);
 5720	val &= ~(BRXMAC_CONFIG_ENABLE |
 5721		 BRXMAC_CONFIG_STRIP_PAD |
 5722		 BRXMAC_CONFIG_STRIP_FCS |
 5723		 BRXMAC_CONFIG_PROMISC |
 5724		 BRXMAC_CONFIG_PROMISC_GRP |
 5725		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5726		 BRXMAC_CONFIG_DISCARD_DIS);
 5727	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5728	nw64_mac(BRXMAC_CONFIG, val);
 5729
 5730	val = nr64_mac(BMAC_ADDR_CMPEN);
 5731	val |= BMAC_ADDR_CMPEN_EN0;
 5732	nw64_mac(BMAC_ADDR_CMPEN, val);
 5733}
 5734
 5735static void niu_init_rx_mac(struct niu *np)
 5736{
 5737	niu_set_primary_mac(np, np->dev->dev_addr);
 5738
 5739	if (np->flags & NIU_FLAGS_XMAC)
 5740		niu_init_rx_xmac(np);
 5741	else
 5742		niu_init_rx_bmac(np);
 5743}
 5744
 5745static void niu_enable_tx_xmac(struct niu *np, int on)
 5746{
 5747	u64 val = nr64_mac(XMAC_CONFIG);
 5748
 5749	if (on)
 5750		val |= XMAC_CONFIG_TX_ENABLE;
 5751	else
 5752		val &= ~XMAC_CONFIG_TX_ENABLE;
 5753	nw64_mac(XMAC_CONFIG, val);
 5754}
 5755
 5756static void niu_enable_tx_bmac(struct niu *np, int on)
 5757{
 5758	u64 val = nr64_mac(BTXMAC_CONFIG);
 5759
 5760	if (on)
 5761		val |= BTXMAC_CONFIG_ENABLE;
 5762	else
 5763		val &= ~BTXMAC_CONFIG_ENABLE;
 5764	nw64_mac(BTXMAC_CONFIG, val);
 5765}
 5766
 5767static void niu_enable_tx_mac(struct niu *np, int on)
 5768{
 5769	if (np->flags & NIU_FLAGS_XMAC)
 5770		niu_enable_tx_xmac(np, on);
 5771	else
 5772		niu_enable_tx_bmac(np, on);
 5773}
 5774
 5775static void niu_enable_rx_xmac(struct niu *np, int on)
 5776{
 5777	u64 val = nr64_mac(XMAC_CONFIG);
 5778
 5779	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5780		 XMAC_CONFIG_PROMISCUOUS);
 5781
 5782	if (np->flags & NIU_FLAGS_MCAST)
 5783		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5784	if (np->flags & NIU_FLAGS_PROMISC)
 5785		val |= XMAC_CONFIG_PROMISCUOUS;
 5786
 5787	if (on)
 5788		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5789	else
 5790		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5791	nw64_mac(XMAC_CONFIG, val);
 5792}
 5793
 5794static void niu_enable_rx_bmac(struct niu *np, int on)
 5795{
 5796	u64 val = nr64_mac(BRXMAC_CONFIG);
 5797
 5798	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5799		 BRXMAC_CONFIG_PROMISC);
 5800
 5801	if (np->flags & NIU_FLAGS_MCAST)
 5802		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5803	if (np->flags & NIU_FLAGS_PROMISC)
 5804		val |= BRXMAC_CONFIG_PROMISC;
 5805
 5806	if (on)
 5807		val |= BRXMAC_CONFIG_ENABLE;
 5808	else
 5809		val &= ~BRXMAC_CONFIG_ENABLE;
 5810	nw64_mac(BRXMAC_CONFIG, val);
 5811}
 5812
 5813static void niu_enable_rx_mac(struct niu *np, int on)
 5814{
 5815	if (np->flags & NIU_FLAGS_XMAC)
 5816		niu_enable_rx_xmac(np, on);
 5817	else
 5818		niu_enable_rx_bmac(np, on);
 5819}
 5820
 5821static int niu_init_mac(struct niu *np)
 5822{
 5823	int err;
 5824
 5825	niu_init_xif(np);
 5826	err = niu_init_pcs(np);
 5827	if (err)
 5828		return err;
 5829
 5830	err = niu_reset_tx_mac(np);
 5831	if (err)
 5832		return err;
 5833	niu_init_tx_mac(np);
 5834	err = niu_reset_rx_mac(np);
 5835	if (err)
 5836		return err;
 5837	niu_init_rx_mac(np);
 5838
 5839	/* This looks hookey but the RX MAC reset we just did will
 5840	 * undo some of the state we setup in niu_init_tx_mac() so we
 5841	 * have to call it again.  In particular, the RX MAC reset will
 5842	 * set the XMAC_MAX register back to it's default value.
 5843	 */
 5844	niu_init_tx_mac(np);
 5845	niu_enable_tx_mac(np, 1);
 5846
 5847	niu_enable_rx_mac(np, 1);
 5848
 5849	return 0;
 5850}
 5851
 5852static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5853{
 5854	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5855}
 5856
 5857static void niu_stop_tx_channels(struct niu *np)
 5858{
 5859	int i;
 5860
 5861	for (i = 0; i < np->num_tx_rings; i++) {
 5862		struct tx_ring_info *rp = &np->tx_rings[i];
 5863
 5864		niu_stop_one_tx_channel(np, rp);
 5865	}
 5866}
 5867
 5868static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5869{
 5870	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5871}
 5872
 5873static void niu_reset_tx_channels(struct niu *np)
 5874{
 5875	int i;
 5876
 5877	for (i = 0; i < np->num_tx_rings; i++) {
 5878		struct tx_ring_info *rp = &np->tx_rings[i];
 5879
 5880		niu_reset_one_tx_channel(np, rp);
 5881	}
 5882}
 5883
 5884static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5885{
 5886	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5887}
 5888
 5889static void niu_stop_rx_channels(struct niu *np)
 5890{
 5891	int i;
 5892
 5893	for (i = 0; i < np->num_rx_rings; i++) {
 5894		struct rx_ring_info *rp = &np->rx_rings[i];
 5895
 5896		niu_stop_one_rx_channel(np, rp);
 5897	}
 5898}
 5899
 5900static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5901{
 5902	int channel = rp->rx_channel;
 5903
 5904	(void) niu_rx_channel_reset(np, channel);
 5905	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5906	nw64(RX_DMA_CTL_STAT(channel), 0);
 5907	(void) niu_enable_rx_channel(np, channel, 0);
 5908}
 5909
 5910static void niu_reset_rx_channels(struct niu *np)
 5911{
 5912	int i;
 5913
 5914	for (i = 0; i < np->num_rx_rings; i++) {
 5915		struct rx_ring_info *rp = &np->rx_rings[i];
 5916
 5917		niu_reset_one_rx_channel(np, rp);
 5918	}
 5919}
 5920
 5921static void niu_disable_ipp(struct niu *np)
 5922{
 5923	u64 rd, wr, val;
 5924	int limit;
 5925
 5926	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5927	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5928	limit = 100;
 5929	while (--limit >= 0 && (rd != wr)) {
 5930		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5931		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5932	}
 5933	if (limit < 0 &&
 5934	    (rd != 0 && wr != 1)) {
 5935		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5936			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5937			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5938	}
 5939
 5940	val = nr64_ipp(IPP_CFIG);
 5941	val &= ~(IPP_CFIG_IPP_ENABLE |
 5942		 IPP_CFIG_DFIFO_ECC_EN |
 5943		 IPP_CFIG_DROP_BAD_CRC |
 5944		 IPP_CFIG_CKSUM_EN);
 5945	nw64_ipp(IPP_CFIG, val);
 5946
 5947	(void) niu_ipp_reset(np);
 5948}
 5949
 5950static int niu_init_hw(struct niu *np)
 5951{
 5952	int i, err;
 5953
 5954	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5955	niu_txc_enable_port(np, 1);
 5956	niu_txc_port_dma_enable(np, 1);
 5957	niu_txc_set_imask(np, 0);
 5958
 5959	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5960	for (i = 0; i < np->num_tx_rings; i++) {
 5961		struct tx_ring_info *rp = &np->tx_rings[i];
 5962
 5963		err = niu_init_one_tx_channel(np, rp);
 5964		if (err)
 5965			return err;
 5966	}
 5967
 5968	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5969	err = niu_init_rx_channels(np);
 5970	if (err)
 5971		goto out_uninit_tx_channels;
 5972
 5973	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5974	err = niu_init_classifier_hw(np);
 5975	if (err)
 5976		goto out_uninit_rx_channels;
 5977
 5978	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5979	err = niu_init_zcp(np);
 5980	if (err)
 5981		goto out_uninit_rx_channels;
 5982
 5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5984	err = niu_init_ipp(np);
 5985	if (err)
 5986		goto out_uninit_rx_channels;
 5987
 5988	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5989	err = niu_init_mac(np);
 5990	if (err)
 5991		goto out_uninit_ipp;
 5992
 5993	return 0;
 5994
 5995out_uninit_ipp:
 5996	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5997	niu_disable_ipp(np);
 5998
 5999out_uninit_rx_channels:
 6000	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 6001	niu_stop_rx_channels(np);
 6002	niu_reset_rx_channels(np);
 6003
 6004out_uninit_tx_channels:
 6005	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 6006	niu_stop_tx_channels(np);
 6007	niu_reset_tx_channels(np);
 6008
 6009	return err;
 6010}
 6011
 6012static void niu_stop_hw(struct niu *np)
 6013{
 6014	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 6015	niu_enable_interrupts(np, 0);
 6016
 6017	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 6018	niu_enable_rx_mac(np, 0);
 6019
 6020	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 6021	niu_disable_ipp(np);
 6022
 6023	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 6024	niu_stop_tx_channels(np);
 6025
 6026	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 6027	niu_stop_rx_channels(np);
 6028
 6029	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 6030	niu_reset_tx_channels(np);
 6031
 6032	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6033	niu_reset_rx_channels(np);
 6034}
 6035
 6036static void niu_set_irq_name(struct niu *np)
 6037{
 6038	int port = np->port;
 6039	int i, j = 1;
 6040
 6041	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6042
 6043	if (port == 0) {
 6044		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6045		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6046		j = 3;
 6047	}
 6048
 6049	for (i = 0; i < np->num_ldg - j; i++) {
 6050		if (i < np->num_rx_rings)
 6051			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6052				np->dev->name, i);
 6053		else if (i < np->num_tx_rings + np->num_rx_rings)
 6054			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6055				i - np->num_rx_rings);
 6056	}
 6057}
 6058
 6059static int niu_request_irq(struct niu *np)
 6060{
 6061	int i, j, err;
 6062
 6063	niu_set_irq_name(np);
 6064
 6065	err = 0;
 6066	for (i = 0; i < np->num_ldg; i++) {
 6067		struct niu_ldg *lp = &np->ldg[i];
 6068
 6069		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6070				  np->irq_name[i], lp);
 6071		if (err)
 6072			goto out_free_irqs;
 6073
 6074	}
 6075
 6076	return 0;
 6077
 6078out_free_irqs:
 6079	for (j = 0; j < i; j++) {
 6080		struct niu_ldg *lp = &np->ldg[j];
 6081
 6082		free_irq(lp->irq, lp);
 6083	}
 6084	return err;
 6085}
 6086
 6087static void niu_free_irq(struct niu *np)
 6088{
 6089	int i;
 6090
 6091	for (i = 0; i < np->num_ldg; i++) {
 6092		struct niu_ldg *lp = &np->ldg[i];
 6093
 6094		free_irq(lp->irq, lp);
 6095	}
 6096}
 6097
 6098static void niu_enable_napi(struct niu *np)
 6099{
 6100	int i;
 6101
 6102	for (i = 0; i < np->num_ldg; i++)
 6103		napi_enable(&np->ldg[i].napi);
 6104}
 6105
 6106static void niu_disable_napi(struct niu *np)
 6107{
 6108	int i;
 6109
 6110	for (i = 0; i < np->num_ldg; i++)
 6111		napi_disable(&np->ldg[i].napi);
 6112}
 6113
 6114static int niu_open(struct net_device *dev)
 6115{
 6116	struct niu *np = netdev_priv(dev);
 6117	int err;
 6118
 6119	netif_carrier_off(dev);
 6120
 6121	err = niu_alloc_channels(np);
 6122	if (err)
 6123		goto out_err;
 6124
 6125	err = niu_enable_interrupts(np, 0);
 6126	if (err)
 6127		goto out_free_channels;
 6128
 6129	err = niu_request_irq(np);
 6130	if (err)
 6131		goto out_free_channels;
 6132
 6133	niu_enable_napi(np);
 6134
 6135	spin_lock_irq(&np->lock);
 6136
 6137	err = niu_init_hw(np);
 6138	if (!err) {
 6139		init_timer(&np->timer);
 6140		np->timer.expires = jiffies + HZ;
 6141		np->timer.data = (unsigned long) np;
 6142		np->timer.function = niu_timer;
 6143
 6144		err = niu_enable_interrupts(np, 1);
 6145		if (err)
 6146			niu_stop_hw(np);
 6147	}
 6148
 6149	spin_unlock_irq(&np->lock);
 6150
 6151	if (err) {
 6152		niu_disable_napi(np);
 6153		goto out_free_irq;
 6154	}
 6155
 6156	netif_tx_start_all_queues(dev);
 6157
 6158	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6159		netif_carrier_on(dev);
 6160
 6161	add_timer(&np->timer);
 6162
 6163	return 0;
 6164
 6165out_free_irq:
 6166	niu_free_irq(np);
 6167
 6168out_free_channels:
 6169	niu_free_channels(np);
 6170
 6171out_err:
 6172	return err;
 6173}
 6174
 6175static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6176{
 6177	cancel_work_sync(&np->reset_task);
 6178
 6179	niu_disable_napi(np);
 6180	netif_tx_stop_all_queues(dev);
 6181
 6182	del_timer_sync(&np->timer);
 6183
 6184	spin_lock_irq(&np->lock);
 6185
 6186	niu_stop_hw(np);
 6187
 6188	spin_unlock_irq(&np->lock);
 6189}
 6190
 6191static int niu_close(struct net_device *dev)
 6192{
 6193	struct niu *np = netdev_priv(dev);
 6194
 6195	niu_full_shutdown(np, dev);
 6196
 6197	niu_free_irq(np);
 6198
 6199	niu_free_channels(np);
 6200
 6201	niu_handle_led(np, 0);
 6202
 6203	return 0;
 6204}
 6205
 6206static void niu_sync_xmac_stats(struct niu *np)
 6207{
 6208	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6209
 6210	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6211	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6212
 6213	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6214	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6215	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6216	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6217	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6218	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6219	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6220	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6221	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6222	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6223	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6224	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6225	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6226	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6227	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6228	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6229}
 6230
 6231static void niu_sync_bmac_stats(struct niu *np)
 6232{
 6233	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6234
 6235	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6236	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6237
 6238	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6239	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6240	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6241	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6242}
 6243
 6244static void niu_sync_mac_stats(struct niu *np)
 6245{
 6246	if (np->flags & NIU_FLAGS_XMAC)
 6247		niu_sync_xmac_stats(np);
 6248	else
 6249		niu_sync_bmac_stats(np);
 6250}
 6251
 6252static void niu_get_rx_stats(struct niu *np,
 6253			     struct rtnl_link_stats64 *stats)
 6254{
 6255	u64 pkts, dropped, errors, bytes;
 6256	struct rx_ring_info *rx_rings;
 6257	int i;
 6258
 6259	pkts = dropped = errors = bytes = 0;
 6260
 6261	rx_rings = ACCESS_ONCE(np->rx_rings);
 6262	if (!rx_rings)
 6263		goto no_rings;
 6264
 6265	for (i = 0; i < np->num_rx_rings; i++) {
 6266		struct rx_ring_info *rp = &rx_rings[i];
 6267
 6268		niu_sync_rx_discard_stats(np, rp, 0);
 6269
 6270		pkts += rp->rx_packets;
 6271		bytes += rp->rx_bytes;
 6272		dropped += rp->rx_dropped;
 6273		errors += rp->rx_errors;
 6274	}
 6275
 6276no_rings:
 6277	stats->rx_packets = pkts;
 6278	stats->rx_bytes = bytes;
 6279	stats->rx_dropped = dropped;
 6280	stats->rx_errors = errors;
 6281}
 6282
 6283static void niu_get_tx_stats(struct niu *np,
 6284			     struct rtnl_link_stats64 *stats)
 6285{
 6286	u64 pkts, errors, bytes;
 6287	struct tx_ring_info *tx_rings;
 6288	int i;
 6289
 6290	pkts = errors = bytes = 0;
 6291
 6292	tx_rings = ACCESS_ONCE(np->tx_rings);
 6293	if (!tx_rings)
 6294		goto no_rings;
 6295
 6296	for (i = 0; i < np->num_tx_rings; i++) {
 6297		struct tx_ring_info *rp = &tx_rings[i];
 6298
 6299		pkts += rp->tx_packets;
 6300		bytes += rp->tx_bytes;
 6301		errors += rp->tx_errors;
 6302	}
 6303
 6304no_rings:
 6305	stats->tx_packets = pkts;
 6306	stats->tx_bytes = bytes;
 6307	stats->tx_errors = errors;
 6308}
 6309
 6310static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
 6311					       struct rtnl_link_stats64 *stats)
 6312{
 6313	struct niu *np = netdev_priv(dev);
 6314
 6315	if (netif_running(dev)) {
 6316		niu_get_rx_stats(np, stats);
 6317		niu_get_tx_stats(np, stats);
 6318	}
 6319
 6320	return stats;
 6321}
 6322
 6323static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6324{
 6325	int i;
 6326
 6327	for (i = 0; i < 16; i++)
 6328		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6329}
 6330
 6331static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6332{
 6333	int i;
 6334
 6335	for (i = 0; i < 16; i++)
 6336		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6337}
 6338
 6339static void niu_load_hash(struct niu *np, u16 *hash)
 6340{
 6341	if (np->flags & NIU_FLAGS_XMAC)
 6342		niu_load_hash_xmac(np, hash);
 6343	else
 6344		niu_load_hash_bmac(np, hash);
 6345}
 6346
 6347static void niu_set_rx_mode(struct net_device *dev)
 6348{
 6349	struct niu *np = netdev_priv(dev);
 6350	int i, alt_cnt, err;
 6351	struct netdev_hw_addr *ha;
 6352	unsigned long flags;
 6353	u16 hash[16] = { 0, };
 6354
 6355	spin_lock_irqsave(&np->lock, flags);
 6356	niu_enable_rx_mac(np, 0);
 6357
 6358	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6359	if (dev->flags & IFF_PROMISC)
 6360		np->flags |= NIU_FLAGS_PROMISC;
 6361	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6362		np->flags |= NIU_FLAGS_MCAST;
 6363
 6364	alt_cnt = netdev_uc_count(dev);
 6365	if (alt_cnt > niu_num_alt_addr(np)) {
 6366		alt_cnt = 0;
 6367		np->flags |= NIU_FLAGS_PROMISC;
 6368	}
 6369
 6370	if (alt_cnt) {
 6371		int index = 0;
 6372
 6373		netdev_for_each_uc_addr(ha, dev) {
 6374			err = niu_set_alt_mac(np, index, ha->addr);
 6375			if (err)
 6376				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6377					    err, index);
 6378			err = niu_enable_alt_mac(np, index, 1);
 6379			if (err)
 6380				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6381					    err, index);
 6382
 6383			index++;
 6384		}
 6385	} else {
 6386		int alt_start;
 6387		if (np->flags & NIU_FLAGS_XMAC)
 6388			alt_start = 0;
 6389		else
 6390			alt_start = 1;
 6391		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6392			err = niu_enable_alt_mac(np, i, 0);
 6393			if (err)
 6394				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6395					    err, i);
 6396		}
 6397	}
 6398	if (dev->flags & IFF_ALLMULTI) {
 6399		for (i = 0; i < 16; i++)
 6400			hash[i] = 0xffff;
 6401	} else if (!netdev_mc_empty(dev)) {
 6402		netdev_for_each_mc_addr(ha, dev) {
 6403			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6404
 6405			crc >>= 24;
 6406			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6407		}
 6408	}
 6409
 6410	if (np->flags & NIU_FLAGS_MCAST)
 6411		niu_load_hash(np, hash);
 6412
 6413	niu_enable_rx_mac(np, 1);
 6414	spin_unlock_irqrestore(&np->lock, flags);
 6415}
 6416
 6417static int niu_set_mac_addr(struct net_device *dev, void *p)
 6418{
 6419	struct niu *np = netdev_priv(dev);
 6420	struct sockaddr *addr = p;
 6421	unsigned long flags;
 6422
 6423	if (!is_valid_ether_addr(addr->sa_data))
 6424		return -EINVAL;
 6425
 6426	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6427
 6428	if (!netif_running(dev))
 6429		return 0;
 6430
 6431	spin_lock_irqsave(&np->lock, flags);
 6432	niu_enable_rx_mac(np, 0);
 6433	niu_set_primary_mac(np, dev->dev_addr);
 6434	niu_enable_rx_mac(np, 1);
 6435	spin_unlock_irqrestore(&np->lock, flags);
 6436
 6437	return 0;
 6438}
 6439
 6440static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6441{
 6442	return -EOPNOTSUPP;
 6443}
 6444
 6445static void niu_netif_stop(struct niu *np)
 6446{
 6447	np->dev->trans_start = jiffies;	/* prevent tx timeout */
 6448
 6449	niu_disable_napi(np);
 6450
 6451	netif_tx_disable(np->dev);
 6452}
 6453
 6454static void niu_netif_start(struct niu *np)
 6455{
 6456	/* NOTE: unconditional netif_wake_queue is only appropriate
 6457	 * so long as all callers are assured to have free tx slots
 6458	 * (such as after niu_init_hw).
 6459	 */
 6460	netif_tx_wake_all_queues(np->dev);
 6461
 6462	niu_enable_napi(np);
 6463
 6464	niu_enable_interrupts(np, 1);
 6465}
 6466
 6467static void niu_reset_buffers(struct niu *np)
 6468{
 6469	int i, j, k, err;
 6470
 6471	if (np->rx_rings) {
 6472		for (i = 0; i < np->num_rx_rings; i++) {
 6473			struct rx_ring_info *rp = &np->rx_rings[i];
 6474
 6475			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6476				struct page *page;
 6477
 6478				page = rp->rxhash[j];
 6479				while (page) {
 6480					struct page *next =
 6481						(struct page *) page->mapping;
 6482					u64 base = page->index;
 6483					base = base >> RBR_DESCR_ADDR_SHIFT;
 6484					rp->rbr[k++] = cpu_to_le32(base);
 6485					page = next;
 6486				}
 6487			}
 6488			for (; k < MAX_RBR_RING_SIZE; k++) {
 6489				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6490				if (unlikely(err))
 6491					break;
 6492			}
 6493
 6494			rp->rbr_index = rp->rbr_table_size - 1;
 6495			rp->rcr_index = 0;
 6496			rp->rbr_pending = 0;
 6497			rp->rbr_refill_pending = 0;
 6498		}
 6499	}
 6500	if (np->tx_rings) {
 6501		for (i = 0; i < np->num_tx_rings; i++) {
 6502			struct tx_ring_info *rp = &np->tx_rings[i];
 6503
 6504			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6505				if (rp->tx_buffs[j].skb)
 6506					(void) release_tx_packet(np, rp, j);
 6507			}
 6508
 6509			rp->pending = MAX_TX_RING_SIZE;
 6510			rp->prod = 0;
 6511			rp->cons = 0;
 6512			rp->wrap_bit = 0;
 6513		}
 6514	}
 6515}
 6516
 6517static void niu_reset_task(struct work_struct *work)
 6518{
 6519	struct niu *np = container_of(work, struct niu, reset_task);
 6520	unsigned long flags;
 6521	int err;
 6522
 6523	spin_lock_irqsave(&np->lock, flags);
 6524	if (!netif_running(np->dev)) {
 6525		spin_unlock_irqrestore(&np->lock, flags);
 6526		return;
 6527	}
 6528
 6529	spin_unlock_irqrestore(&np->lock, flags);
 6530
 6531	del_timer_sync(&np->timer);
 6532
 6533	niu_netif_stop(np);
 6534
 6535	spin_lock_irqsave(&np->lock, flags);
 6536
 6537	niu_stop_hw(np);
 6538
 6539	spin_unlock_irqrestore(&np->lock, flags);
 6540
 6541	niu_reset_buffers(np);
 6542
 6543	spin_lock_irqsave(&np->lock, flags);
 6544
 6545	err = niu_init_hw(np);
 6546	if (!err) {
 6547		np->timer.expires = jiffies + HZ;
 6548		add_timer(&np->timer);
 6549		niu_netif_start(np);
 6550	}
 6551
 6552	spin_unlock_irqrestore(&np->lock, flags);
 6553}
 6554
 6555static void niu_tx_timeout(struct net_device *dev)
 6556{
 6557	struct niu *np = netdev_priv(dev);
 6558
 6559	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6560		dev->name);
 6561
 6562	schedule_work(&np->reset_task);
 6563}
 6564
 6565static void niu_set_txd(struct tx_ring_info *rp, int index,
 6566			u64 mapping, u64 len, u64 mark,
 6567			u64 n_frags)
 6568{
 6569	__le64 *desc = &rp->descr[index];
 6570
 6571	*desc = cpu_to_le64(mark |
 6572			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6573			    (len << TX_DESC_TR_LEN_SHIFT) |
 6574			    (mapping & TX_DESC_SAD));
 6575}
 6576
 6577static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6578				u64 pad_bytes, u64 len)
 6579{
 6580	u16 eth_proto, eth_proto_inner;
 6581	u64 csum_bits, l3off, ihl, ret;
 6582	u8 ip_proto;
 6583	int ipv6;
 6584
 6585	eth_proto = be16_to_cpu(ehdr->h_proto);
 6586	eth_proto_inner = eth_proto;
 6587	if (eth_proto == ETH_P_8021Q) {
 6588		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6589		__be16 val = vp->h_vlan_encapsulated_proto;
 6590
 6591		eth_proto_inner = be16_to_cpu(val);
 6592	}
 6593
 6594	ipv6 = ihl = 0;
 6595	switch (skb->protocol) {
 6596	case cpu_to_be16(ETH_P_IP):
 6597		ip_proto = ip_hdr(skb)->protocol;
 6598		ihl = ip_hdr(skb)->ihl;
 6599		break;
 6600	case cpu_to_be16(ETH_P_IPV6):
 6601		ip_proto = ipv6_hdr(skb)->nexthdr;
 6602		ihl = (40 >> 2);
 6603		ipv6 = 1;
 6604		break;
 6605	default:
 6606		ip_proto = ihl = 0;
 6607		break;
 6608	}
 6609
 6610	csum_bits = TXHDR_CSUM_NONE;
 6611	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6612		u64 start, stuff;
 6613
 6614		csum_bits = (ip_proto == IPPROTO_TCP ?
 6615			     TXHDR_CSUM_TCP :
 6616			     (ip_proto == IPPROTO_UDP ?
 6617			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6618
 6619		start = skb_checksum_start_offset(skb) -
 6620			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6621		stuff = start + skb->csum_offset;
 6622
 6623		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6624		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6625	}
 6626
 6627	l3off = skb_network_offset(skb) -
 6628		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6629
 6630	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6631	       (len << TXHDR_LEN_SHIFT) |
 6632	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6633	       (ihl << TXHDR_IHL_SHIFT) |
 6634	       ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
 6635	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6636	       (ipv6 ? TXHDR_IP_VER : 0) |
 6637	       csum_bits);
 6638
 6639	return ret;
 6640}
 6641
 6642static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6643				  struct net_device *dev)
 6644{
 6645	struct niu *np = netdev_priv(dev);
 6646	unsigned long align, headroom;
 6647	struct netdev_queue *txq;
 6648	struct tx_ring_info *rp;
 6649	struct tx_pkt_hdr *tp;
 6650	unsigned int len, nfg;
 6651	struct ethhdr *ehdr;
 6652	int prod, i, tlen;
 6653	u64 mapping, mrk;
 6654
 6655	i = skb_get_queue_mapping(skb);
 6656	rp = &np->tx_rings[i];
 6657	txq = netdev_get_tx_queue(dev, i);
 6658
 6659	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6660		netif_tx_stop_queue(txq);
 6661		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6662		rp->tx_errors++;
 6663		return NETDEV_TX_BUSY;
 6664	}
 6665
 6666	if (skb->len < ETH_ZLEN) {
 6667		unsigned int pad_bytes = ETH_ZLEN - skb->len;
 6668
 6669		if (skb_pad(skb, pad_bytes))
 6670			goto out;
 6671		skb_put(skb, pad_bytes);
 6672	}
 6673
 6674	len = sizeof(struct tx_pkt_hdr) + 15;
 6675	if (skb_headroom(skb) < len) {
 6676		struct sk_buff *skb_new;
 6677
 6678		skb_new = skb_realloc_headroom(skb, len);
 6679		if (!skb_new) {
 6680			rp->tx_errors++;
 6681			goto out_drop;
 6682		}
 6683		kfree_skb(skb);
 6684		skb = skb_new;
 6685	} else
 6686		skb_orphan(skb);
 6687
 6688	align = ((unsigned long) skb->data & (16 - 1));
 6689	headroom = align + sizeof(struct tx_pkt_hdr);
 6690
 6691	ehdr = (struct ethhdr *) skb->data;
 6692	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
 6693
 6694	len = skb->len - sizeof(struct tx_pkt_hdr);
 6695	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6696	tp->resv = 0;
 6697
 6698	len = skb_headlen(skb);
 6699	mapping = np->ops->map_single(np->device, skb->data,
 6700				      len, DMA_TO_DEVICE);
 6701
 6702	prod = rp->prod;
 6703
 6704	rp->tx_buffs[prod].skb = skb;
 6705	rp->tx_buffs[prod].mapping = mapping;
 6706
 6707	mrk = TX_DESC_SOP;
 6708	if (++rp->mark_counter == rp->mark_freq) {
 6709		rp->mark_counter = 0;
 6710		mrk |= TX_DESC_MARK;
 6711		rp->mark_pending++;
 6712	}
 6713
 6714	tlen = len;
 6715	nfg = skb_shinfo(skb)->nr_frags;
 6716	while (tlen > 0) {
 6717		tlen -= MAX_TX_DESC_LEN;
 6718		nfg++;
 6719	}
 6720
 6721	while (len > 0) {
 6722		unsigned int this_len = len;
 6723
 6724		if (this_len > MAX_TX_DESC_LEN)
 6725			this_len = MAX_TX_DESC_LEN;
 6726
 6727		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6728		mrk = nfg = 0;
 6729
 6730		prod = NEXT_TX(rp, prod);
 6731		mapping += this_len;
 6732		len -= this_len;
 6733	}
 6734
 6735	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6736		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6737
 6738		len = frag->size;
 6739		mapping = np->ops->map_page(np->device, frag->page,
 6740					    frag->page_offset, len,
 6741					    DMA_TO_DEVICE);
 6742
 6743		rp->tx_buffs[prod].skb = NULL;
 6744		rp->tx_buffs[prod].mapping = mapping;
 6745
 6746		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6747
 6748		prod = NEXT_TX(rp, prod);
 6749	}
 6750
 6751	if (prod < rp->prod)
 6752		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6753	rp->prod = prod;
 6754
 6755	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6756
 6757	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6758		netif_tx_stop_queue(txq);
 6759		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6760			netif_tx_wake_queue(txq);
 6761	}
 6762
 6763out:
 6764	return NETDEV_TX_OK;
 6765
 6766out_drop:
 6767	rp->tx_errors++;
 6768	kfree_skb(skb);
 6769	goto out;
 6770}
 6771
 6772static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6773{
 6774	struct niu *np = netdev_priv(dev);
 6775	int err, orig_jumbo, new_jumbo;
 6776
 6777	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
 6778		return -EINVAL;
 6779
 6780	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6781	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6782
 6783	dev->mtu = new_mtu;
 6784
 6785	if (!netif_running(dev) ||
 6786	    (orig_jumbo == new_jumbo))
 6787		return 0;
 6788
 6789	niu_full_shutdown(np, dev);
 6790
 6791	niu_free_channels(np);
 6792
 6793	niu_enable_napi(np);
 6794
 6795	err = niu_alloc_channels(np);
 6796	if (err)
 6797		return err;
 6798
 6799	spin_lock_irq(&np->lock);
 6800
 6801	err = niu_init_hw(np);
 6802	if (!err) {
 6803		init_timer(&np->timer);
 6804		np->timer.expires = jiffies + HZ;
 6805		np->timer.data = (unsigned long) np;
 6806		np->timer.function = niu_timer;
 6807
 6808		err = niu_enable_interrupts(np, 1);
 6809		if (err)
 6810			niu_stop_hw(np);
 6811	}
 6812
 6813	spin_unlock_irq(&np->lock);
 6814
 6815	if (!err) {
 6816		netif_tx_start_all_queues(dev);
 6817		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6818			netif_carrier_on(dev);
 6819
 6820		add_timer(&np->timer);
 6821	}
 6822
 6823	return err;
 6824}
 6825
 6826static void niu_get_drvinfo(struct net_device *dev,
 6827			    struct ethtool_drvinfo *info)
 6828{
 6829	struct niu *np = netdev_priv(dev);
 6830	struct niu_vpd *vpd = &np->vpd;
 6831
 6832	strcpy(info->driver, DRV_MODULE_NAME);
 6833	strcpy(info->version, DRV_MODULE_VERSION);
 6834	sprintf(info->fw_version, "%d.%d",
 6835		vpd->fcode_major, vpd->fcode_minor);
 6836	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6837		strcpy(info->bus_info, pci_name(np->pdev));
 6838}
 6839
 6840static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6841{
 6842	struct niu *np = netdev_priv(dev);
 6843	struct niu_link_config *lp;
 6844
 6845	lp = &np->link_config;
 6846
 6847	memset(cmd, 0, sizeof(*cmd));
 6848	cmd->phy_address = np->phy_addr;
 6849	cmd->supported = lp->supported;
 6850	cmd->advertising = lp->active_advertising;
 6851	cmd->autoneg = lp->active_autoneg;
 6852	ethtool_cmd_speed_set(cmd, lp->active_speed);
 6853	cmd->duplex = lp->active_duplex;
 6854	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6855	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
 6856		XCVR_EXTERNAL : XCVR_INTERNAL;
 6857
 6858	return 0;
 6859}
 6860
 6861static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6862{
 6863	struct niu *np = netdev_priv(dev);
 6864	struct niu_link_config *lp = &np->link_config;
 6865
 6866	lp->advertising = cmd->advertising;
 6867	lp->speed = ethtool_cmd_speed(cmd);
 6868	lp->duplex = cmd->duplex;
 6869	lp->autoneg = cmd->autoneg;
 6870	return niu_init_link(np);
 6871}
 6872
 6873static u32 niu_get_msglevel(struct net_device *dev)
 6874{
 6875	struct niu *np = netdev_priv(dev);
 6876	return np->msg_enable;
 6877}
 6878
 6879static void niu_set_msglevel(struct net_device *dev, u32 value)
 6880{
 6881	struct niu *np = netdev_priv(dev);
 6882	np->msg_enable = value;
 6883}
 6884
 6885static int niu_nway_reset(struct net_device *dev)
 6886{
 6887	struct niu *np = netdev_priv(dev);
 6888
 6889	if (np->link_config.autoneg)
 6890		return niu_init_link(np);
 6891
 6892	return 0;
 6893}
 6894
 6895static int niu_get_eeprom_len(struct net_device *dev)
 6896{
 6897	struct niu *np = netdev_priv(dev);
 6898
 6899	return np->eeprom_len;
 6900}
 6901
 6902static int niu_get_eeprom(struct net_device *dev,
 6903			  struct ethtool_eeprom *eeprom, u8 *data)
 6904{
 6905	struct niu *np = netdev_priv(dev);
 6906	u32 offset, len, val;
 6907
 6908	offset = eeprom->offset;
 6909	len = eeprom->len;
 6910
 6911	if (offset + len < offset)
 6912		return -EINVAL;
 6913	if (offset >= np->eeprom_len)
 6914		return -EINVAL;
 6915	if (offset + len > np->eeprom_len)
 6916		len = eeprom->len = np->eeprom_len - offset;
 6917
 6918	if (offset & 3) {
 6919		u32 b_offset, b_count;
 6920
 6921		b_offset = offset & 3;
 6922		b_count = 4 - b_offset;
 6923		if (b_count > len)
 6924			b_count = len;
 6925
 6926		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6927		memcpy(data, ((char *)&val) + b_offset, b_count);
 6928		data += b_count;
 6929		len -= b_count;
 6930		offset += b_count;
 6931	}
 6932	while (len >= 4) {
 6933		val = nr64(ESPC_NCR(offset / 4));
 6934		memcpy(data, &val, 4);
 6935		data += 4;
 6936		len -= 4;
 6937		offset += 4;
 6938	}
 6939	if (len) {
 6940		val = nr64(ESPC_NCR(offset / 4));
 6941		memcpy(data, &val, len);
 6942	}
 6943	return 0;
 6944}
 6945
 6946static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6947{
 6948	switch (flow_type) {
 6949	case TCP_V4_FLOW:
 6950	case TCP_V6_FLOW:
 6951		*pid = IPPROTO_TCP;
 6952		break;
 6953	case UDP_V4_FLOW:
 6954	case UDP_V6_FLOW:
 6955		*pid = IPPROTO_UDP;
 6956		break;
 6957	case SCTP_V4_FLOW:
 6958	case SCTP_V6_FLOW:
 6959		*pid = IPPROTO_SCTP;
 6960		break;
 6961	case AH_V4_FLOW:
 6962	case AH_V6_FLOW:
 6963		*pid = IPPROTO_AH;
 6964		break;
 6965	case ESP_V4_FLOW:
 6966	case ESP_V6_FLOW:
 6967		*pid = IPPROTO_ESP;
 6968		break;
 6969	default:
 6970		*pid = 0;
 6971		break;
 6972	}
 6973}
 6974
 6975static int niu_class_to_ethflow(u64 class, int *flow_type)
 6976{
 6977	switch (class) {
 6978	case CLASS_CODE_TCP_IPV4:
 6979		*flow_type = TCP_V4_FLOW;
 6980		break;
 6981	case CLASS_CODE_UDP_IPV4:
 6982		*flow_type = UDP_V4_FLOW;
 6983		break;
 6984	case CLASS_CODE_AH_ESP_IPV4:
 6985		*flow_type = AH_V4_FLOW;
 6986		break;
 6987	case CLASS_CODE_SCTP_IPV4:
 6988		*flow_type = SCTP_V4_FLOW;
 6989		break;
 6990	case CLASS_CODE_TCP_IPV6:
 6991		*flow_type = TCP_V6_FLOW;
 6992		break;
 6993	case CLASS_CODE_UDP_IPV6:
 6994		*flow_type = UDP_V6_FLOW;
 6995		break;
 6996	case CLASS_CODE_AH_ESP_IPV6:
 6997		*flow_type = AH_V6_FLOW;
 6998		break;
 6999	case CLASS_CODE_SCTP_IPV6:
 7000		*flow_type = SCTP_V6_FLOW;
 7001		break;
 7002	case CLASS_CODE_USER_PROG1:
 7003	case CLASS_CODE_USER_PROG2:
 7004	case CLASS_CODE_USER_PROG3:
 7005	case CLASS_CODE_USER_PROG4:
 7006		*flow_type = IP_USER_FLOW;
 7007		break;
 7008	default:
 7009		return 0;
 7010	}
 7011
 7012	return 1;
 7013}
 7014
 7015static int niu_ethflow_to_class(int flow_type, u64 *class)
 7016{
 7017	switch (flow_type) {
 7018	case TCP_V4_FLOW:
 7019		*class = CLASS_CODE_TCP_IPV4;
 7020		break;
 7021	case UDP_V4_FLOW:
 7022		*class = CLASS_CODE_UDP_IPV4;
 7023		break;
 7024	case AH_ESP_V4_FLOW:
 7025	case AH_V4_FLOW:
 7026	case ESP_V4_FLOW:
 7027		*class = CLASS_CODE_AH_ESP_IPV4;
 7028		break;
 7029	case SCTP_V4_FLOW:
 7030		*class = CLASS_CODE_SCTP_IPV4;
 7031		break;
 7032	case TCP_V6_FLOW:
 7033		*class = CLASS_CODE_TCP_IPV6;
 7034		break;
 7035	case UDP_V6_FLOW:
 7036		*class = CLASS_CODE_UDP_IPV6;
 7037		break;
 7038	case AH_ESP_V6_FLOW:
 7039	case AH_V6_FLOW:
 7040	case ESP_V6_FLOW:
 7041		*class = CLASS_CODE_AH_ESP_IPV6;
 7042		break;
 7043	case SCTP_V6_FLOW:
 7044		*class = CLASS_CODE_SCTP_IPV6;
 7045		break;
 7046	default:
 7047		return 0;
 7048	}
 7049
 7050	return 1;
 7051}
 7052
 7053static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7054{
 7055	u64 ethflow = 0;
 7056
 7057	if (flow_key & FLOW_KEY_L2DA)
 7058		ethflow |= RXH_L2DA;
 7059	if (flow_key & FLOW_KEY_VLAN)
 7060		ethflow |= RXH_VLAN;
 7061	if (flow_key & FLOW_KEY_IPSA)
 7062		ethflow |= RXH_IP_SRC;
 7063	if (flow_key & FLOW_KEY_IPDA)
 7064		ethflow |= RXH_IP_DST;
 7065	if (flow_key & FLOW_KEY_PROTO)
 7066		ethflow |= RXH_L3_PROTO;
 7067	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7068		ethflow |= RXH_L4_B_0_1;
 7069	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7070		ethflow |= RXH_L4_B_2_3;
 7071
 7072	return ethflow;
 7073
 7074}
 7075
 7076static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7077{
 7078	u64 key = 0;
 7079
 7080	if (ethflow & RXH_L2DA)
 7081		key |= FLOW_KEY_L2DA;
 7082	if (ethflow & RXH_VLAN)
 7083		key |= FLOW_KEY_VLAN;
 7084	if (ethflow & RXH_IP_SRC)
 7085		key |= FLOW_KEY_IPSA;
 7086	if (ethflow & RXH_IP_DST)
 7087		key |= FLOW_KEY_IPDA;
 7088	if (ethflow & RXH_L3_PROTO)
 7089		key |= FLOW_KEY_PROTO;
 7090	if (ethflow & RXH_L4_B_0_1)
 7091		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7092	if (ethflow & RXH_L4_B_2_3)
 7093		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7094
 7095	*flow_key = key;
 7096
 7097	return 1;
 7098
 7099}
 7100
 7101static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7102{
 7103	u64 class;
 7104
 7105	nfc->data = 0;
 7106
 7107	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7108		return -EINVAL;
 7109
 7110	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7111	    TCAM_KEY_DISC)
 7112		nfc->data = RXH_DISCARD;
 7113	else
 7114		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7115						      CLASS_CODE_USER_PROG1]);
 7116	return 0;
 7117}
 7118
 7119static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7120					struct ethtool_rx_flow_spec *fsp)
 7121{
 7122	u32 tmp;
 7123	u16 prt;
 7124
 7125	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7126	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7127
 7128	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7129	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7130
 7131	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7132	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7133
 7134	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7135	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7136
 7137	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7138		TCAM_V4KEY2_TOS_SHIFT;
 7139	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7140		TCAM_V4KEY2_TOS_SHIFT;
 7141
 7142	switch (fsp->flow_type) {
 7143	case TCP_V4_FLOW:
 7144	case UDP_V4_FLOW:
 7145	case SCTP_V4_FLOW:
 7146		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7147			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7148		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7149
 7150		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7151			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7152		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7153
 7154		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7155			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7156		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7157
 7158		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7159			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7160		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7161		break;
 7162	case AH_V4_FLOW:
 7163	case ESP_V4_FLOW:
 7164		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7165			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7166		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7167
 7168		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7169			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7170		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7171		break;
 7172	case IP_USER_FLOW:
 7173		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7174			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7175		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7176
 7177		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7178			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7179		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7180
 7181		fsp->h_u.usr_ip4_spec.proto =
 7182			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7183			TCAM_V4KEY2_PROTO_SHIFT;
 7184		fsp->m_u.usr_ip4_spec.proto =
 7185			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7186			TCAM_V4KEY2_PROTO_SHIFT;
 7187
 7188		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7189		break;
 7190	default:
 7191		break;
 7192	}
 7193}
 7194
 7195static int niu_get_ethtool_tcam_entry(struct niu *np,
 7196				      struct ethtool_rxnfc *nfc)
 7197{
 7198	struct niu_parent *parent = np->parent;
 7199	struct niu_tcam_entry *tp;
 7200	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7201	u16 idx;
 7202	u64 class;
 7203	int ret = 0;
 7204
 7205	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7206
 7207	tp = &parent->tcam[idx];
 7208	if (!tp->valid) {
 7209		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7210			    parent->index, (u16)nfc->fs.location, idx);
 7211		return -EINVAL;
 7212	}
 7213
 7214	/* fill the flow spec entry */
 7215	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7216		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7217	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7218
 7219	if (ret < 0) {
 7220		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7221			    parent->index);
 7222		ret = -EINVAL;
 7223		goto out;
 7224	}
 7225
 7226	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7227		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7228			TCAM_V4KEY2_PROTO_SHIFT;
 7229		if (proto == IPPROTO_ESP) {
 7230			if (fsp->flow_type == AH_V4_FLOW)
 7231				fsp->flow_type = ESP_V4_FLOW;
 7232			else
 7233				fsp->flow_type = ESP_V6_FLOW;
 7234		}
 7235	}
 7236
 7237	switch (fsp->flow_type) {
 7238	case TCP_V4_FLOW:
 7239	case UDP_V4_FLOW:
 7240	case SCTP_V4_FLOW:
 7241	case AH_V4_FLOW:
 7242	case ESP_V4_FLOW:
 7243		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7244		break;
 7245	case TCP_V6_FLOW:
 7246	case UDP_V6_FLOW:
 7247	case SCTP_V6_FLOW:
 7248	case AH_V6_FLOW:
 7249	case ESP_V6_FLOW:
 7250		/* Not yet implemented */
 7251		ret = -EINVAL;
 7252		break;
 7253	case IP_USER_FLOW:
 7254		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7255		break;
 7256	default:
 7257		ret = -EINVAL;
 7258		break;
 7259	}
 7260
 7261	if (ret < 0)
 7262		goto out;
 7263
 7264	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7265		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7266	else
 7267		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7268			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7269
 7270	/* put the tcam size here */
 7271	nfc->data = tcam_get_size(np);
 7272out:
 7273	return ret;
 7274}
 7275
 7276static int niu_get_ethtool_tcam_all(struct niu *np,
 7277				    struct ethtool_rxnfc *nfc,
 7278				    u32 *rule_locs)
 7279{
 7280	struct niu_parent *parent = np->parent;
 7281	struct niu_tcam_entry *tp;
 7282	int i, idx, cnt;
 7283	unsigned long flags;
 7284	int ret = 0;
 7285
 7286	/* put the tcam size here */
 7287	nfc->data = tcam_get_size(np);
 7288
 7289	niu_lock_parent(np, flags);
 7290	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7291		idx = tcam_get_index(np, i);
 7292		tp = &parent->tcam[idx];
 7293		if (!tp->valid)
 7294			continue;
 7295		if (cnt == nfc->rule_cnt) {
 7296			ret = -EMSGSIZE;
 7297			break;
 7298		}
 7299		rule_locs[cnt] = i;
 7300		cnt++;
 7301	}
 7302	niu_unlock_parent(np, flags);
 7303
 7304	return ret;
 7305}
 7306
 7307static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7308		       void *rule_locs)
 7309{
 7310	struct niu *np = netdev_priv(dev);
 7311	int ret = 0;
 7312
 7313	switch (cmd->cmd) {
 7314	case ETHTOOL_GRXFH:
 7315		ret = niu_get_hash_opts(np, cmd);
 7316		break;
 7317	case ETHTOOL_GRXRINGS:
 7318		cmd->data = np->num_rx_rings;
 7319		break;
 7320	case ETHTOOL_GRXCLSRLCNT:
 7321		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7322		break;
 7323	case ETHTOOL_GRXCLSRULE:
 7324		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7325		break;
 7326	case ETHTOOL_GRXCLSRLALL:
 7327		ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
 7328		break;
 7329	default:
 7330		ret = -EINVAL;
 7331		break;
 7332	}
 7333
 7334	return ret;
 7335}
 7336
 7337static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7338{
 7339	u64 class;
 7340	u64 flow_key = 0;
 7341	unsigned long flags;
 7342
 7343	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7344		return -EINVAL;
 7345
 7346	if (class < CLASS_CODE_USER_PROG1 ||
 7347	    class > CLASS_CODE_SCTP_IPV6)
 7348		return -EINVAL;
 7349
 7350	if (nfc->data & RXH_DISCARD) {
 7351		niu_lock_parent(np, flags);
 7352		flow_key = np->parent->tcam_key[class -
 7353					       CLASS_CODE_USER_PROG1];
 7354		flow_key |= TCAM_KEY_DISC;
 7355		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7356		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7357		niu_unlock_parent(np, flags);
 7358		return 0;
 7359	} else {
 7360		/* Discard was set before, but is not set now */
 7361		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7362		    TCAM_KEY_DISC) {
 7363			niu_lock_parent(np, flags);
 7364			flow_key = np->parent->tcam_key[class -
 7365					       CLASS_CODE_USER_PROG1];
 7366			flow_key &= ~TCAM_KEY_DISC;
 7367			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7368			     flow_key);
 7369			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7370				flow_key;
 7371			niu_unlock_parent(np, flags);
 7372		}
 7373	}
 7374
 7375	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7376		return -EINVAL;
 7377
 7378	niu_lock_parent(np, flags);
 7379	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7380	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7381	niu_unlock_parent(np, flags);
 7382
 7383	return 0;
 7384}
 7385
 7386static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7387				       struct niu_tcam_entry *tp,
 7388				       int l2_rdc_tab, u64 class)
 7389{
 7390	u8 pid = 0;
 7391	u32 sip, dip, sipm, dipm, spi, spim;
 7392	u16 sport, dport, spm, dpm;
 7393
 7394	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7395	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7396	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7397	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7398
 7399	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7400	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7401	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7402	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7403
 7404	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7405	tp->key[3] |= dip;
 7406
 7407	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7408	tp->key_mask[3] |= dipm;
 7409
 7410	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7411		       TCAM_V4KEY2_TOS_SHIFT);
 7412	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7413			    TCAM_V4KEY2_TOS_SHIFT);
 7414	switch (fsp->flow_type) {
 7415	case TCP_V4_FLOW:
 7416	case UDP_V4_FLOW:
 7417	case SCTP_V4_FLOW:
 7418		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7419		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7420		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7421		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7422
 7423		tp->key[2] |= (((u64)sport << 16) | dport);
 7424		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7425		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7426		break;
 7427	case AH_V4_FLOW:
 7428	case ESP_V4_FLOW:
 7429		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7430		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7431
 7432		tp->key[2] |= spi;
 7433		tp->key_mask[2] |= spim;
 7434		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7435		break;
 7436	case IP_USER_FLOW:
 7437		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7438		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7439
 7440		tp->key[2] |= spi;
 7441		tp->key_mask[2] |= spim;
 7442		pid = fsp->h_u.usr_ip4_spec.proto;
 7443		break;
 7444	default:
 7445		break;
 7446	}
 7447
 7448	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7449	if (pid) {
 7450		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7451	}
 7452}
 7453
 7454static int niu_add_ethtool_tcam_entry(struct niu *np,
 7455				      struct ethtool_rxnfc *nfc)
 7456{
 7457	struct niu_parent *parent = np->parent;
 7458	struct niu_tcam_entry *tp;
 7459	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7460	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7461	int l2_rdc_table = rdc_table->first_table_num;
 7462	u16 idx;
 7463	u64 class;
 7464	unsigned long flags;
 7465	int err, ret;
 7466
 7467	ret = 0;
 7468
 7469	idx = nfc->fs.location;
 7470	if (idx >= tcam_get_size(np))
 7471		return -EINVAL;
 7472
 7473	if (fsp->flow_type == IP_USER_FLOW) {
 7474		int i;
 7475		int add_usr_cls = 0;
 7476		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7477		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7478
 7479		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7480			return -EINVAL;
 7481
 7482		niu_lock_parent(np, flags);
 7483
 7484		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7485			if (parent->l3_cls[i]) {
 7486				if (uspec->proto == parent->l3_cls_pid[i]) {
 7487					class = parent->l3_cls[i];
 7488					parent->l3_cls_refcnt[i]++;
 7489					add_usr_cls = 1;
 7490					break;
 7491				}
 7492			} else {
 7493				/* Program new user IP class */
 7494				switch (i) {
 7495				case 0:
 7496					class = CLASS_CODE_USER_PROG1;
 7497					break;
 7498				case 1:
 7499					class = CLASS_CODE_USER_PROG2;
 7500					break;
 7501				case 2:
 7502					class = CLASS_CODE_USER_PROG3;
 7503					break;
 7504				case 3:
 7505					class = CLASS_CODE_USER_PROG4;
 7506					break;
 7507				default:
 7508					break;
 7509				}
 7510				ret = tcam_user_ip_class_set(np, class, 0,
 7511							     uspec->proto,
 7512							     uspec->tos,
 7513							     umask->tos);
 7514				if (ret)
 7515					goto out;
 7516
 7517				ret = tcam_user_ip_class_enable(np, class, 1);
 7518				if (ret)
 7519					goto out;
 7520				parent->l3_cls[i] = class;
 7521				parent->l3_cls_pid[i] = uspec->proto;
 7522				parent->l3_cls_refcnt[i]++;
 7523				add_usr_cls = 1;
 7524				break;
 7525			}
 7526		}
 7527		if (!add_usr_cls) {
 7528			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7529				    parent->index, __func__, uspec->proto);
 7530			ret = -EINVAL;
 7531			goto out;
 7532		}
 7533		niu_unlock_parent(np, flags);
 7534	} else {
 7535		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7536			return -EINVAL;
 7537		}
 7538	}
 7539
 7540	niu_lock_parent(np, flags);
 7541
 7542	idx = tcam_get_index(np, idx);
 7543	tp = &parent->tcam[idx];
 7544
 7545	memset(tp, 0, sizeof(*tp));
 7546
 7547	/* fill in the tcam key and mask */
 7548	switch (fsp->flow_type) {
 7549	case TCP_V4_FLOW:
 7550	case UDP_V4_FLOW:
 7551	case SCTP_V4_FLOW:
 7552	case AH_V4_FLOW:
 7553	case ESP_V4_FLOW:
 7554		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7555		break;
 7556	case TCP_V6_FLOW:
 7557	case UDP_V6_FLOW:
 7558	case SCTP_V6_FLOW:
 7559	case AH_V6_FLOW:
 7560	case ESP_V6_FLOW:
 7561		/* Not yet implemented */
 7562		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7563			    parent->index, __func__, fsp->flow_type);
 7564		ret = -EINVAL;
 7565		goto out;
 7566	case IP_USER_FLOW:
 7567		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7568		break;
 7569	default:
 7570		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7571			    parent->index, __func__, fsp->flow_type);
 7572		ret = -EINVAL;
 7573		goto out;
 7574	}
 7575
 7576	/* fill in the assoc data */
 7577	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7578		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7579	} else {
 7580		if (fsp->ring_cookie >= np->num_rx_rings) {
 7581			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7582				    parent->index, __func__,
 7583				    (long long)fsp->ring_cookie);
 7584			ret = -EINVAL;
 7585			goto out;
 7586		}
 7587		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7588				  (fsp->ring_cookie <<
 7589				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7590	}
 7591
 7592	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7593	if (err) {
 7594		ret = -EINVAL;
 7595		goto out;
 7596	}
 7597	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7598	if (err) {
 7599		ret = -EINVAL;
 7600		goto out;
 7601	}
 7602
 7603	/* validate the entry */
 7604	tp->valid = 1;
 7605	np->clas.tcam_valid_entries++;
 7606out:
 7607	niu_unlock_parent(np, flags);
 7608
 7609	return ret;
 7610}
 7611
 7612static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7613{
 7614	struct niu_parent *parent = np->parent;
 7615	struct niu_tcam_entry *tp;
 7616	u16 idx;
 7617	unsigned long flags;
 7618	u64 class;
 7619	int ret = 0;
 7620
 7621	if (loc >= tcam_get_size(np))
 7622		return -EINVAL;
 7623
 7624	niu_lock_parent(np, flags);
 7625
 7626	idx = tcam_get_index(np, loc);
 7627	tp = &parent->tcam[idx];
 7628
 7629	/* if the entry is of a user defined class, then update*/
 7630	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7631		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7632
 7633	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7634		int i;
 7635		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7636			if (parent->l3_cls[i] == class) {
 7637				parent->l3_cls_refcnt[i]--;
 7638				if (!parent->l3_cls_refcnt[i]) {
 7639					/* disable class */
 7640					ret = tcam_user_ip_class_enable(np,
 7641									class,
 7642									0);
 7643					if (ret)
 7644						goto out;
 7645					parent->l3_cls[i] = 0;
 7646					parent->l3_cls_pid[i] = 0;
 7647				}
 7648				break;
 7649			}
 7650		}
 7651		if (i == NIU_L3_PROG_CLS) {
 7652			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7653				    parent->index, __func__,
 7654				    (unsigned long long)class);
 7655			ret = -EINVAL;
 7656			goto out;
 7657		}
 7658	}
 7659
 7660	ret = tcam_flush(np, idx);
 7661	if (ret)
 7662		goto out;
 7663
 7664	/* invalidate the entry */
 7665	tp->valid = 0;
 7666	np->clas.tcam_valid_entries--;
 7667out:
 7668	niu_unlock_parent(np, flags);
 7669
 7670	return ret;
 7671}
 7672
 7673static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7674{
 7675	struct niu *np = netdev_priv(dev);
 7676	int ret = 0;
 7677
 7678	switch (cmd->cmd) {
 7679	case ETHTOOL_SRXFH:
 7680		ret = niu_set_hash_opts(np, cmd);
 7681		break;
 7682	case ETHTOOL_SRXCLSRLINS:
 7683		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7684		break;
 7685	case ETHTOOL_SRXCLSRLDEL:
 7686		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7687		break;
 7688	default:
 7689		ret = -EINVAL;
 7690		break;
 7691	}
 7692
 7693	return ret;
 7694}
 7695
 7696static const struct {
 7697	const char string[ETH_GSTRING_LEN];
 7698} niu_xmac_stat_keys[] = {
 7699	{ "tx_frames" },
 7700	{ "tx_bytes" },
 7701	{ "tx_fifo_errors" },
 7702	{ "tx_overflow_errors" },
 7703	{ "tx_max_pkt_size_errors" },
 7704	{ "tx_underflow_errors" },
 7705	{ "rx_local_faults" },
 7706	{ "rx_remote_faults" },
 7707	{ "rx_link_faults" },
 7708	{ "rx_align_errors" },
 7709	{ "rx_frags" },
 7710	{ "rx_mcasts" },
 7711	{ "rx_bcasts" },
 7712	{ "rx_hist_cnt1" },
 7713	{ "rx_hist_cnt2" },
 7714	{ "rx_hist_cnt3" },
 7715	{ "rx_hist_cnt4" },
 7716	{ "rx_hist_cnt5" },
 7717	{ "rx_hist_cnt6" },
 7718	{ "rx_hist_cnt7" },
 7719	{ "rx_octets" },
 7720	{ "rx_code_violations" },
 7721	{ "rx_len_errors" },
 7722	{ "rx_crc_errors" },
 7723	{ "rx_underflows" },
 7724	{ "rx_overflows" },
 7725	{ "pause_off_state" },
 7726	{ "pause_on_state" },
 7727	{ "pause_received" },
 7728};
 7729
 7730#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7731
 7732static const struct {
 7733	const char string[ETH_GSTRING_LEN];
 7734} niu_bmac_stat_keys[] = {
 7735	{ "tx_underflow_errors" },
 7736	{ "tx_max_pkt_size_errors" },
 7737	{ "tx_bytes" },
 7738	{ "tx_frames" },
 7739	{ "rx_overflows" },
 7740	{ "rx_frames" },
 7741	{ "rx_align_errors" },
 7742	{ "rx_crc_errors" },
 7743	{ "rx_len_errors" },
 7744	{ "pause_off_state" },
 7745	{ "pause_on_state" },
 7746	{ "pause_received" },
 7747};
 7748
 7749#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7750
 7751static const struct {
 7752	const char string[ETH_GSTRING_LEN];
 7753} niu_rxchan_stat_keys[] = {
 7754	{ "rx_channel" },
 7755	{ "rx_packets" },
 7756	{ "rx_bytes" },
 7757	{ "rx_dropped" },
 7758	{ "rx_errors" },
 7759};
 7760
 7761#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7762
 7763static const struct {
 7764	const char string[ETH_GSTRING_LEN];
 7765} niu_txchan_stat_keys[] = {
 7766	{ "tx_channel" },
 7767	{ "tx_packets" },
 7768	{ "tx_bytes" },
 7769	{ "tx_errors" },
 7770};
 7771
 7772#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7773
 7774static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7775{
 7776	struct niu *np = netdev_priv(dev);
 7777	int i;
 7778
 7779	if (stringset != ETH_SS_STATS)
 7780		return;
 7781
 7782	if (np->flags & NIU_FLAGS_XMAC) {
 7783		memcpy(data, niu_xmac_stat_keys,
 7784		       sizeof(niu_xmac_stat_keys));
 7785		data += sizeof(niu_xmac_stat_keys);
 7786	} else {
 7787		memcpy(data, niu_bmac_stat_keys,
 7788		       sizeof(niu_bmac_stat_keys));
 7789		data += sizeof(niu_bmac_stat_keys);
 7790	}
 7791	for (i = 0; i < np->num_rx_rings; i++) {
 7792		memcpy(data, niu_rxchan_stat_keys,
 7793		       sizeof(niu_rxchan_stat_keys));
 7794		data += sizeof(niu_rxchan_stat_keys);
 7795	}
 7796	for (i = 0; i < np->num_tx_rings; i++) {
 7797		memcpy(data, niu_txchan_stat_keys,
 7798		       sizeof(niu_txchan_stat_keys));
 7799		data += sizeof(niu_txchan_stat_keys);
 7800	}
 7801}
 7802
 7803static int niu_get_sset_count(struct net_device *dev, int stringset)
 7804{
 7805	struct niu *np = netdev_priv(dev);
 7806
 7807	if (stringset != ETH_SS_STATS)
 7808		return -EINVAL;
 7809
 7810	return (np->flags & NIU_FLAGS_XMAC ?
 7811		 NUM_XMAC_STAT_KEYS :
 7812		 NUM_BMAC_STAT_KEYS) +
 7813		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7814		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7815}
 7816
 7817static void niu_get_ethtool_stats(struct net_device *dev,
 7818				  struct ethtool_stats *stats, u64 *data)
 7819{
 7820	struct niu *np = netdev_priv(dev);
 7821	int i;
 7822
 7823	niu_sync_mac_stats(np);
 7824	if (np->flags & NIU_FLAGS_XMAC) {
 7825		memcpy(data, &np->mac_stats.xmac,
 7826		       sizeof(struct niu_xmac_stats));
 7827		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7828	} else {
 7829		memcpy(data, &np->mac_stats.bmac,
 7830		       sizeof(struct niu_bmac_stats));
 7831		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7832	}
 7833	for (i = 0; i < np->num_rx_rings; i++) {
 7834		struct rx_ring_info *rp = &np->rx_rings[i];
 7835
 7836		niu_sync_rx_discard_stats(np, rp, 0);
 7837
 7838		data[0] = rp->rx_channel;
 7839		data[1] = rp->rx_packets;
 7840		data[2] = rp->rx_bytes;
 7841		data[3] = rp->rx_dropped;
 7842		data[4] = rp->rx_errors;
 7843		data += 5;
 7844	}
 7845	for (i = 0; i < np->num_tx_rings; i++) {
 7846		struct tx_ring_info *rp = &np->tx_rings[i];
 7847
 7848		data[0] = rp->tx_channel;
 7849		data[1] = rp->tx_packets;
 7850		data[2] = rp->tx_bytes;
 7851		data[3] = rp->tx_errors;
 7852		data += 4;
 7853	}
 7854}
 7855
 7856static u64 niu_led_state_save(struct niu *np)
 7857{
 7858	if (np->flags & NIU_FLAGS_XMAC)
 7859		return nr64_mac(XMAC_CONFIG);
 7860	else
 7861		return nr64_mac(BMAC_XIF_CONFIG);
 7862}
 7863
 7864static void niu_led_state_restore(struct niu *np, u64 val)
 7865{
 7866	if (np->flags & NIU_FLAGS_XMAC)
 7867		nw64_mac(XMAC_CONFIG, val);
 7868	else
 7869		nw64_mac(BMAC_XIF_CONFIG, val);
 7870}
 7871
 7872static void niu_force_led(struct niu *np, int on)
 7873{
 7874	u64 val, reg, bit;
 7875
 7876	if (np->flags & NIU_FLAGS_XMAC) {
 7877		reg = XMAC_CONFIG;
 7878		bit = XMAC_CONFIG_FORCE_LED_ON;
 7879	} else {
 7880		reg = BMAC_XIF_CONFIG;
 7881		bit = BMAC_XIF_CONFIG_LINK_LED;
 7882	}
 7883
 7884	val = nr64_mac(reg);
 7885	if (on)
 7886		val |= bit;
 7887	else
 7888		val &= ~bit;
 7889	nw64_mac(reg, val);
 7890}
 7891
 7892static int niu_set_phys_id(struct net_device *dev,
 7893			   enum ethtool_phys_id_state state)
 7894
 7895{
 7896	struct niu *np = netdev_priv(dev);
 7897
 7898	if (!netif_running(dev))
 7899		return -EAGAIN;
 7900
 7901	switch (state) {
 7902	case ETHTOOL_ID_ACTIVE:
 7903		np->orig_led_state = niu_led_state_save(np);
 7904		return 1;	/* cycle on/off once per second */
 7905
 7906	case ETHTOOL_ID_ON:
 7907		niu_force_led(np, 1);
 7908		break;
 7909
 7910	case ETHTOOL_ID_OFF:
 7911		niu_force_led(np, 0);
 7912		break;
 7913
 7914	case ETHTOOL_ID_INACTIVE:
 7915		niu_led_state_restore(np, np->orig_led_state);
 7916	}
 7917
 7918	return 0;
 7919}
 7920
 7921static const struct ethtool_ops niu_ethtool_ops = {
 7922	.get_drvinfo		= niu_get_drvinfo,
 7923	.get_link		= ethtool_op_get_link,
 7924	.get_msglevel		= niu_get_msglevel,
 7925	.set_msglevel		= niu_set_msglevel,
 7926	.nway_reset		= niu_nway_reset,
 7927	.get_eeprom_len		= niu_get_eeprom_len,
 7928	.get_eeprom		= niu_get_eeprom,
 7929	.get_settings		= niu_get_settings,
 7930	.set_settings		= niu_set_settings,
 7931	.get_strings		= niu_get_strings,
 7932	.get_sset_count		= niu_get_sset_count,
 7933	.get_ethtool_stats	= niu_get_ethtool_stats,
 7934	.set_phys_id		= niu_set_phys_id,
 7935	.get_rxnfc		= niu_get_nfc,
 7936	.set_rxnfc		= niu_set_nfc,
 7937};
 7938
 7939static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7940			      int ldg, int ldn)
 7941{
 7942	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7943		return -EINVAL;
 7944	if (ldn < 0 || ldn > LDN_MAX)
 7945		return -EINVAL;
 7946
 7947	parent->ldg_map[ldn] = ldg;
 7948
 7949	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7950		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7951		 * the firmware, and we're not supposed to change them.
 7952		 * Validate the mapping, because if it's wrong we probably
 7953		 * won't get any interrupts and that's painful to debug.
 7954		 */
 7955		if (nr64(LDG_NUM(ldn)) != ldg) {
 7956			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7957				np->port, ldn, ldg,
 7958				(unsigned long long) nr64(LDG_NUM(ldn)));
 7959			return -EINVAL;
 7960		}
 7961	} else
 7962		nw64(LDG_NUM(ldn), ldg);
 7963
 7964	return 0;
 7965}
 7966
 7967static int niu_set_ldg_timer_res(struct niu *np, int res)
 7968{
 7969	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7970		return -EINVAL;
 7971
 7972
 7973	nw64(LDG_TIMER_RES, res);
 7974
 7975	return 0;
 7976}
 7977
 7978static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7979{
 7980	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7981	    (func < 0 || func > 3) ||
 7982	    (vector < 0 || vector > 0x1f))
 7983		return -EINVAL;
 7984
 7985	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7986
 7987	return 0;
 7988}
 7989
 7990static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
 7991{
 7992	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7993				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7994	int limit;
 7995
 7996	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7997		return -EINVAL;
 7998
 7999	frame = frame_base;
 8000	nw64(ESPC_PIO_STAT, frame);
 8001	limit = 64;
 8002	do {
 8003		udelay(5);
 8004		frame = nr64(ESPC_PIO_STAT);
 8005		if (frame & ESPC_PIO_STAT_READ_END)
 8006			break;
 8007	} while (limit--);
 8008	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8009		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8010			(unsigned long long) frame);
 8011		return -ENODEV;
 8012	}
 8013
 8014	frame = frame_base;
 8015	nw64(ESPC_PIO_STAT, frame);
 8016	limit = 64;
 8017	do {
 8018		udelay(5);
 8019		frame = nr64(ESPC_PIO_STAT);
 8020		if (frame & ESPC_PIO_STAT_READ_END)
 8021			break;
 8022	} while (limit--);
 8023	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8024		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8025			(unsigned long long) frame);
 8026		return -ENODEV;
 8027	}
 8028
 8029	frame = nr64(ESPC_PIO_STAT);
 8030	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 8031}
 8032
 8033static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
 8034{
 8035	int err = niu_pci_eeprom_read(np, off);
 8036	u16 val;
 8037
 8038	if (err < 0)
 8039		return err;
 8040	val = (err << 8);
 8041	err = niu_pci_eeprom_read(np, off + 1);
 8042	if (err < 0)
 8043		return err;
 8044	val |= (err & 0xff);
 8045
 8046	return val;
 8047}
 8048
 8049static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8050{
 8051	int err = niu_pci_eeprom_read(np, off);
 8052	u16 val;
 8053
 8054	if (err < 0)
 8055		return err;
 8056
 8057	val = (err & 0xff);
 8058	err = niu_pci_eeprom_read(np, off + 1);
 8059	if (err < 0)
 8060		return err;
 8061
 8062	val |= (err & 0xff) << 8;
 8063
 8064	return val;
 8065}
 8066
 8067static int __devinit niu_pci_vpd_get_propname(struct niu *np,
 8068					      u32 off,
 8069					      char *namebuf,
 8070					      int namebuf_len)
 8071{
 8072	int i;
 8073
 8074	for (i = 0; i < namebuf_len; i++) {
 8075		int err = niu_pci_eeprom_read(np, off + i);
 8076		if (err < 0)
 8077			return err;
 8078		*namebuf++ = err;
 8079		if (!err)
 8080			break;
 8081	}
 8082	if (i >= namebuf_len)
 8083		return -EINVAL;
 8084
 8085	return i + 1;
 8086}
 8087
 8088static void __devinit niu_vpd_parse_version(struct niu *np)
 8089{
 8090	struct niu_vpd *vpd = &np->vpd;
 8091	int len = strlen(vpd->version) + 1;
 8092	const char *s = vpd->version;
 8093	int i;
 8094
 8095	for (i = 0; i < len - 5; i++) {
 8096		if (!strncmp(s + i, "FCode ", 6))
 8097			break;
 8098	}
 8099	if (i >= len - 5)
 8100		return;
 8101
 8102	s += i + 5;
 8103	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8104
 8105	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8106		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8107		     vpd->fcode_major, vpd->fcode_minor);
 8108	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8109	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8110	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8111		np->flags |= NIU_FLAGS_VPD_VALID;
 8112}
 8113
 8114/* ESPC_PIO_EN_ENABLE must be set */
 8115static int __devinit niu_pci_vpd_scan_props(struct niu *np,
 8116					    u32 start, u32 end)
 8117{
 8118	unsigned int found_mask = 0;
 8119#define FOUND_MASK_MODEL	0x00000001
 8120#define FOUND_MASK_BMODEL	0x00000002
 8121#define FOUND_MASK_VERS		0x00000004
 8122#define FOUND_MASK_MAC		0x00000008
 8123#define FOUND_MASK_NMAC		0x00000010
 8124#define FOUND_MASK_PHY		0x00000020
 8125#define FOUND_MASK_ALL		0x0000003f
 8126
 8127	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8128		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8129	while (start < end) {
 8130		int len, err, prop_len;
 8131		char namebuf[64];
 8132		u8 *prop_buf;
 8133		int max_len;
 8134
 8135		if (found_mask == FOUND_MASK_ALL) {
 8136			niu_vpd_parse_version(np);
 8137			return 1;
 8138		}
 8139
 8140		err = niu_pci_eeprom_read(np, start + 2);
 8141		if (err < 0)
 8142			return err;
 8143		len = err;
 8144		start += 3;
 8145
 8146		prop_len = niu_pci_eeprom_read(np, start + 4);
 8147		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8148		if (err < 0)
 8149			return err;
 8150
 8151		prop_buf = NULL;
 8152		max_len = 0;
 8153		if (!strcmp(namebuf, "model")) {
 8154			prop_buf = np->vpd.model;
 8155			max_len = NIU_VPD_MODEL_MAX;
 8156			found_mask |= FOUND_MASK_MODEL;
 8157		} else if (!strcmp(namebuf, "board-model")) {
 8158			prop_buf = np->vpd.board_model;
 8159			max_len = NIU_VPD_BD_MODEL_MAX;
 8160			found_mask |= FOUND_MASK_BMODEL;
 8161		} else if (!strcmp(namebuf, "version")) {
 8162			prop_buf = np->vpd.version;
 8163			max_len = NIU_VPD_VERSION_MAX;
 8164			found_mask |= FOUND_MASK_VERS;
 8165		} else if (!strcmp(namebuf, "local-mac-address")) {
 8166			prop_buf = np->vpd.local_mac;
 8167			max_len = ETH_ALEN;
 8168			found_mask |= FOUND_MASK_MAC;
 8169		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8170			prop_buf = &np->vpd.mac_num;
 8171			max_len = 1;
 8172			found_mask |= FOUND_MASK_NMAC;
 8173		} else if (!strcmp(namebuf, "phy-type")) {
 8174			prop_buf = np->vpd.phy_type;
 8175			max_len = NIU_VPD_PHY_TYPE_MAX;
 8176			found_mask |= FOUND_MASK_PHY;
 8177		}
 8178
 8179		if (max_len && prop_len > max_len) {
 8180			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8181			return -EINVAL;
 8182		}
 8183
 8184		if (prop_buf) {
 8185			u32 off = start + 5 + err;
 8186			int i;
 8187
 8188			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8189				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8190				     namebuf, prop_len);
 8191			for (i = 0; i < prop_len; i++)
 8192				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
 8193		}
 8194
 8195		start += len;
 8196	}
 8197
 8198	return 0;
 8199}
 8200
 8201/* ESPC_PIO_EN_ENABLE must be set */
 8202static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
 8203{
 8204	u32 offset;
 8205	int err;
 8206
 8207	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8208	if (err < 0)
 8209		return;
 8210
 8211	offset = err + 3;
 8212
 8213	while (start + offset < ESPC_EEPROM_SIZE) {
 8214		u32 here = start + offset;
 8215		u32 end;
 8216
 8217		err = niu_pci_eeprom_read(np, here);
 8218		if (err != 0x90)
 8219			return;
 8220
 8221		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8222		if (err < 0)
 8223			return;
 8224
 8225		here = start + offset + 3;
 8226		end = start + offset + err;
 8227
 8228		offset += err;
 8229
 8230		err = niu_pci_vpd_scan_props(np, here, end);
 8231		if (err < 0 || err == 1)
 8232			return;
 8233	}
 8234}
 8235
 8236/* ESPC_PIO_EN_ENABLE must be set */
 8237static u32 __devinit niu_pci_vpd_offset(struct niu *np)
 8238{
 8239	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8240	int err;
 8241
 8242	while (start < end) {
 8243		ret = start;
 8244
 8245		/* ROM header signature?  */
 8246		err = niu_pci_eeprom_read16(np, start +  0);
 8247		if (err != 0x55aa)
 8248			return 0;
 8249
 8250		/* Apply offset to PCI data structure.  */
 8251		err = niu_pci_eeprom_read16(np, start + 23);
 8252		if (err < 0)
 8253			return 0;
 8254		start += err;
 8255
 8256		/* Check for "PCIR" signature.  */
 8257		err = niu_pci_eeprom_read16(np, start +  0);
 8258		if (err != 0x5043)
 8259			return 0;
 8260		err = niu_pci_eeprom_read16(np, start +  2);
 8261		if (err != 0x4952)
 8262			return 0;
 8263
 8264		/* Check for OBP image type.  */
 8265		err = niu_pci_eeprom_read(np, start + 20);
 8266		if (err < 0)
 8267			return 0;
 8268		if (err != 0x01) {
 8269			err = niu_pci_eeprom_read(np, ret + 2);
 8270			if (err < 0)
 8271				return 0;
 8272
 8273			start = ret + (err * 512);
 8274			continue;
 8275		}
 8276
 8277		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8278		if (err < 0)
 8279			return err;
 8280		ret += err;
 8281
 8282		err = niu_pci_eeprom_read(np, ret + 0);
 8283		if (err != 0x82)
 8284			return 0;
 8285
 8286		return ret;
 8287	}
 8288
 8289	return 0;
 8290}
 8291
 8292static int __devinit niu_phy_type_prop_decode(struct niu *np,
 8293					      const char *phy_prop)
 8294{
 8295	if (!strcmp(phy_prop, "mif")) {
 8296		/* 1G copper, MII */
 8297		np->flags &= ~(NIU_FLAGS_FIBER |
 8298			       NIU_FLAGS_10G);
 8299		np->mac_xcvr = MAC_XCVR_MII;
 8300	} else if (!strcmp(phy_prop, "xgf")) {
 8301		/* 10G fiber, XPCS */
 8302		np->flags |= (NIU_FLAGS_10G |
 8303			      NIU_FLAGS_FIBER);
 8304		np->mac_xcvr = MAC_XCVR_XPCS;
 8305	} else if (!strcmp(phy_prop, "pcs")) {
 8306		/* 1G fiber, PCS */
 8307		np->flags &= ~NIU_FLAGS_10G;
 8308		np->flags |= NIU_FLAGS_FIBER;
 8309		np->mac_xcvr = MAC_XCVR_PCS;
 8310	} else if (!strcmp(phy_prop, "xgc")) {
 8311		/* 10G copper, XPCS */
 8312		np->flags |= NIU_FLAGS_10G;
 8313		np->flags &= ~NIU_FLAGS_FIBER;
 8314		np->mac_xcvr = MAC_XCVR_XPCS;
 8315	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8316		/* 10G Serdes or 1G Serdes, default to 10G */
 8317		np->flags |= NIU_FLAGS_10G;
 8318		np->flags &= ~NIU_FLAGS_FIBER;
 8319		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8320		np->mac_xcvr = MAC_XCVR_XPCS;
 8321	} else {
 8322		return -EINVAL;
 8323	}
 8324	return 0;
 8325}
 8326
 8327static int niu_pci_vpd_get_nports(struct niu *np)
 8328{
 8329	int ports = 0;
 8330
 8331	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8332	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8333	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8334	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8335	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8336		ports = 4;
 8337	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8338		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8339		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8340		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8341		ports = 2;
 8342	}
 8343
 8344	return ports;
 8345}
 8346
 8347static void __devinit niu_pci_vpd_validate(struct niu *np)
 8348{
 8349	struct net_device *dev = np->dev;
 8350	struct niu_vpd *vpd = &np->vpd;
 8351	u8 val8;
 8352
 8353	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8354		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8355
 8356		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8357		return;
 8358	}
 8359
 8360	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8361	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8362		np->flags |= NIU_FLAGS_10G;
 8363		np->flags &= ~NIU_FLAGS_FIBER;
 8364		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8365		np->mac_xcvr = MAC_XCVR_PCS;
 8366		if (np->port > 1) {
 8367			np->flags |= NIU_FLAGS_FIBER;
 8368			np->flags &= ~NIU_FLAGS_10G;
 8369		}
 8370		if (np->flags & NIU_FLAGS_10G)
 8371			np->mac_xcvr = MAC_XCVR_XPCS;
 8372	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8373		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8374			      NIU_FLAGS_HOTPLUG_PHY);
 8375	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8376		dev_err(np->device, "Illegal phy string [%s]\n",
 8377			np->vpd.phy_type);
 8378		dev_err(np->device, "Falling back to SPROM\n");
 8379		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8380		return;
 8381	}
 8382
 8383	memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
 8384
 8385	val8 = dev->perm_addr[5];
 8386	dev->perm_addr[5] += np->port;
 8387	if (dev->perm_addr[5] < val8)
 8388		dev->perm_addr[4]++;
 8389
 8390	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 8391}
 8392
 8393static int __devinit niu_pci_probe_sprom(struct niu *np)
 8394{
 8395	struct net_device *dev = np->dev;
 8396	int len, i;
 8397	u64 val, sum;
 8398	u8 val8;
 8399
 8400	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8401	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8402	len = val / 4;
 8403
 8404	np->eeprom_len = len;
 8405
 8406	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8407		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8408
 8409	sum = 0;
 8410	for (i = 0; i < len; i++) {
 8411		val = nr64(ESPC_NCR(i));
 8412		sum += (val >>  0) & 0xff;
 8413		sum += (val >>  8) & 0xff;
 8414		sum += (val >> 16) & 0xff;
 8415		sum += (val >> 24) & 0xff;
 8416	}
 8417	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8418		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8419	if ((sum & 0xff) != 0xab) {
 8420		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8421		return -EINVAL;
 8422	}
 8423
 8424	val = nr64(ESPC_PHY_TYPE);
 8425	switch (np->port) {
 8426	case 0:
 8427		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8428			ESPC_PHY_TYPE_PORT0_SHIFT;
 8429		break;
 8430	case 1:
 8431		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8432			ESPC_PHY_TYPE_PORT1_SHIFT;
 8433		break;
 8434	case 2:
 8435		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8436			ESPC_PHY_TYPE_PORT2_SHIFT;
 8437		break;
 8438	case 3:
 8439		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8440			ESPC_PHY_TYPE_PORT3_SHIFT;
 8441		break;
 8442	default:
 8443		dev_err(np->device, "Bogus port number %u\n",
 8444			np->port);
 8445		return -EINVAL;
 8446	}
 8447	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8448		     "SPROM: PHY type %x\n", val8);
 8449
 8450	switch (val8) {
 8451	case ESPC_PHY_TYPE_1G_COPPER:
 8452		/* 1G copper, MII */
 8453		np->flags &= ~(NIU_FLAGS_FIBER |
 8454			       NIU_FLAGS_10G);
 8455		np->mac_xcvr = MAC_XCVR_MII;
 8456		break;
 8457
 8458	case ESPC_PHY_TYPE_1G_FIBER:
 8459		/* 1G fiber, PCS */
 8460		np->flags &= ~NIU_FLAGS_10G;
 8461		np->flags |= NIU_FLAGS_FIBER;
 8462		np->mac_xcvr = MAC_XCVR_PCS;
 8463		break;
 8464
 8465	case ESPC_PHY_TYPE_10G_COPPER:
 8466		/* 10G copper, XPCS */
 8467		np->flags |= NIU_FLAGS_10G;
 8468		np->flags &= ~NIU_FLAGS_FIBER;
 8469		np->mac_xcvr = MAC_XCVR_XPCS;
 8470		break;
 8471
 8472	case ESPC_PHY_TYPE_10G_FIBER:
 8473		/* 10G fiber, XPCS */
 8474		np->flags |= (NIU_FLAGS_10G |
 8475			      NIU_FLAGS_FIBER);
 8476		np->mac_xcvr = MAC_XCVR_XPCS;
 8477		break;
 8478
 8479	default:
 8480		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8481		return -EINVAL;
 8482	}
 8483
 8484	val = nr64(ESPC_MAC_ADDR0);
 8485	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8486		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8487	dev->perm_addr[0] = (val >>  0) & 0xff;
 8488	dev->perm_addr[1] = (val >>  8) & 0xff;
 8489	dev->perm_addr[2] = (val >> 16) & 0xff;
 8490	dev->perm_addr[3] = (val >> 24) & 0xff;
 8491
 8492	val = nr64(ESPC_MAC_ADDR1);
 8493	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8494		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8495	dev->perm_addr[4] = (val >>  0) & 0xff;
 8496	dev->perm_addr[5] = (val >>  8) & 0xff;
 8497
 8498	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
 8499		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8500			dev->perm_addr);
 8501		return -EINVAL;
 8502	}
 8503
 8504	val8 = dev->perm_addr[5];
 8505	dev->perm_addr[5] += np->port;
 8506	if (dev->perm_addr[5] < val8)
 8507		dev->perm_addr[4]++;
 8508
 8509	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 8510
 8511	val = nr64(ESPC_MOD_STR_LEN);
 8512	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8513		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8514	if (val >= 8 * 4)
 8515		return -EINVAL;
 8516
 8517	for (i = 0; i < val; i += 4) {
 8518		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8519
 8520		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8521		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8522		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8523		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8524	}
 8525	np->vpd.model[val] = '\0';
 8526
 8527	val = nr64(ESPC_BD_MOD_STR_LEN);
 8528	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8529		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8530	if (val >= 4 * 4)
 8531		return -EINVAL;
 8532
 8533	for (i = 0; i < val; i += 4) {
 8534		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8535
 8536		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8537		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8538		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8539		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8540	}
 8541	np->vpd.board_model[val] = '\0';
 8542
 8543	np->vpd.mac_num =
 8544		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8545	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8546		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8547
 8548	return 0;
 8549}
 8550
 8551static int __devinit niu_get_and_validate_port(struct niu *np)
 8552{
 8553	struct niu_parent *parent = np->parent;
 8554
 8555	if (np->port <= 1)
 8556		np->flags |= NIU_FLAGS_XMAC;
 8557
 8558	if (!parent->num_ports) {
 8559		if (parent->plat_type == PLAT_TYPE_NIU) {
 8560			parent->num_ports = 2;
 8561		} else {
 8562			parent->num_ports = niu_pci_vpd_get_nports(np);
 8563			if (!parent->num_ports) {
 8564				/* Fall back to SPROM as last resort.
 8565				 * This will fail on most cards.
 8566				 */
 8567				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8568					ESPC_NUM_PORTS_MACS_VAL;
 8569
 8570				/* All of the current probing methods fail on
 8571				 * Maramba on-board parts.
 8572				 */
 8573				if (!parent->num_ports)
 8574					parent->num_ports = 4;
 8575			}
 8576		}
 8577	}
 8578
 8579	if (np->port >= parent->num_ports)
 8580		return -ENODEV;
 8581
 8582	return 0;
 8583}
 8584
 8585static int __devinit phy_record(struct niu_parent *parent,
 8586				struct phy_probe_info *p,
 8587				int dev_id_1, int dev_id_2, u8 phy_port,
 8588				int type)
 8589{
 8590	u32 id = (dev_id_1 << 16) | dev_id_2;
 8591	u8 idx;
 8592
 8593	if (dev_id_1 < 0 || dev_id_2 < 0)
 8594		return 0;
 8595	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8596		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8597		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
 8598		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
 8599			return 0;
 8600	} else {
 8601		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8602			return 0;
 8603	}
 8604
 8605	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8606		parent->index, id,
 8607		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8608		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8609		phy_port);
 8610
 8611	if (p->cur[type] >= NIU_MAX_PORTS) {
 8612		pr_err("Too many PHY ports\n");
 8613		return -EINVAL;
 8614	}
 8615	idx = p->cur[type];
 8616	p->phy_id[type][idx] = id;
 8617	p->phy_port[type][idx] = phy_port;
 8618	p->cur[type] = idx + 1;
 8619	return 0;
 8620}
 8621
 8622static int __devinit port_has_10g(struct phy_probe_info *p, int port)
 8623{
 8624	int i;
 8625
 8626	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8627		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8628			return 1;
 8629	}
 8630	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8631		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8632			return 1;
 8633	}
 8634
 8635	return 0;
 8636}
 8637
 8638static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
 8639{
 8640	int port, cnt;
 8641
 8642	cnt = 0;
 8643	*lowest = 32;
 8644	for (port = 8; port < 32; port++) {
 8645		if (port_has_10g(p, port)) {
 8646			if (!cnt)
 8647				*lowest = port;
 8648			cnt++;
 8649		}
 8650	}
 8651
 8652	return cnt;
 8653}
 8654
 8655static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
 8656{
 8657	*lowest = 32;
 8658	if (p->cur[PHY_TYPE_MII])
 8659		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8660
 8661	return p->cur[PHY_TYPE_MII];
 8662}
 8663
 8664static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
 8665{
 8666	int num_ports = parent->num_ports;
 8667	int i;
 8668
 8669	for (i = 0; i < num_ports; i++) {
 8670		parent->rxchan_per_port[i] = (16 / num_ports);
 8671		parent->txchan_per_port[i] = (16 / num_ports);
 8672
 8673		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8674			parent->index, i,
 8675			parent->rxchan_per_port[i],
 8676			parent->txchan_per_port[i]);
 8677	}
 8678}
 8679
 8680static void __devinit niu_divide_channels(struct niu_parent *parent,
 8681					  int num_10g, int num_1g)
 8682{
 8683	int num_ports = parent->num_ports;
 8684	int rx_chans_per_10g, rx_chans_per_1g;
 8685	int tx_chans_per_10g, tx_chans_per_1g;
 8686	int i, tot_rx, tot_tx;
 8687
 8688	if (!num_10g || !num_1g) {
 8689		rx_chans_per_10g = rx_chans_per_1g =
 8690			(NIU_NUM_RXCHAN / num_ports);
 8691		tx_chans_per_10g = tx_chans_per_1g =
 8692			(NIU_NUM_TXCHAN / num_ports);
 8693	} else {
 8694		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8695		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8696				    (rx_chans_per_1g * num_1g)) /
 8697			num_10g;
 8698
 8699		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8700		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8701				    (tx_chans_per_1g * num_1g)) /
 8702			num_10g;
 8703	}
 8704
 8705	tot_rx = tot_tx = 0;
 8706	for (i = 0; i < num_ports; i++) {
 8707		int type = phy_decode(parent->port_phy, i);
 8708
 8709		if (type == PORT_TYPE_10G) {
 8710			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8711			parent->txchan_per_port[i] = tx_chans_per_10g;
 8712		} else {
 8713			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8714			parent->txchan_per_port[i] = tx_chans_per_1g;
 8715		}
 8716		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8717			parent->index, i,
 8718			parent->rxchan_per_port[i],
 8719			parent->txchan_per_port[i]);
 8720		tot_rx += parent->rxchan_per_port[i];
 8721		tot_tx += parent->txchan_per_port[i];
 8722	}
 8723
 8724	if (tot_rx > NIU_NUM_RXCHAN) {
 8725		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8726		       parent->index, tot_rx);
 8727		for (i = 0; i < num_ports; i++)
 8728			parent->rxchan_per_port[i] = 1;
 8729	}
 8730	if (tot_tx > NIU_NUM_TXCHAN) {
 8731		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8732		       parent->index, tot_tx);
 8733		for (i = 0; i < num_ports; i++)
 8734			parent->txchan_per_port[i] = 1;
 8735	}
 8736	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8737		pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8738			   parent->index, tot_rx, tot_tx);
 8739	}
 8740}
 8741
 8742static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
 8743					    int num_10g, int num_1g)
 8744{
 8745	int i, num_ports = parent->num_ports;
 8746	int rdc_group, rdc_groups_per_port;
 8747	int rdc_channel_base;
 8748
 8749	rdc_group = 0;
 8750	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8751
 8752	rdc_channel_base = 0;
 8753
 8754	for (i = 0; i < num_ports; i++) {
 8755		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8756		int grp, num_channels = parent->rxchan_per_port[i];
 8757		int this_channel_offset;
 8758
 8759		tp->first_table_num = rdc_group;
 8760		tp->num_tables = rdc_groups_per_port;
 8761		this_channel_offset = 0;
 8762		for (grp = 0; grp < tp->num_tables; grp++) {
 8763			struct rdc_table *rt = &tp->tables[grp];
 8764			int slot;
 8765
 8766			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8767				parent->index, i, tp->first_table_num + grp);
 8768			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8769				rt->rxdma_channel[slot] =
 8770					rdc_channel_base + this_channel_offset;
 8771
 8772				pr_cont("%d ", rt->rxdma_channel[slot]);
 8773
 8774				if (++this_channel_offset == num_channels)
 8775					this_channel_offset = 0;
 8776			}
 8777			pr_cont("]\n");
 8778		}
 8779
 8780		parent->rdc_default[i] = rdc_channel_base;
 8781
 8782		rdc_channel_base += num_channels;
 8783		rdc_group += rdc_groups_per_port;
 8784	}
 8785}
 8786
 8787static int __devinit fill_phy_probe_info(struct niu *np,
 8788					 struct niu_parent *parent,
 8789					 struct phy_probe_info *info)
 8790{
 8791	unsigned long flags;
 8792	int port, err;
 8793
 8794	memset(info, 0, sizeof(*info));
 8795
 8796	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8797	niu_lock_parent(np, flags);
 8798	err = 0;
 8799	for (port = 8; port < 32; port++) {
 8800		int dev_id_1, dev_id_2;
 8801
 8802		dev_id_1 = mdio_read(np, port,
 8803				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8804		dev_id_2 = mdio_read(np, port,
 8805				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8806		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8807				 PHY_TYPE_PMA_PMD);
 8808		if (err)
 8809			break;
 8810		dev_id_1 = mdio_read(np, port,
 8811				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8812		dev_id_2 = mdio_read(np, port,
 8813				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8814		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8815				 PHY_TYPE_PCS);
 8816		if (err)
 8817			break;
 8818		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8819		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8820		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8821				 PHY_TYPE_MII);
 8822		if (err)
 8823			break;
 8824	}
 8825	niu_unlock_parent(np, flags);
 8826
 8827	return err;
 8828}
 8829
 8830static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
 8831{
 8832	struct phy_probe_info *info = &parent->phy_probe_info;
 8833	int lowest_10g, lowest_1g;
 8834	int num_10g, num_1g;
 8835	u32 val;
 8836	int err;
 8837
 8838	num_10g = num_1g = 0;
 8839
 8840	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8841	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8842		num_10g = 0;
 8843		num_1g = 2;
 8844		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8845		parent->num_ports = 4;
 8846		val = (phy_encode(PORT_TYPE_1G, 0) |
 8847		       phy_encode(PORT_TYPE_1G, 1) |
 8848		       phy_encode(PORT_TYPE_1G, 2) |
 8849		       phy_encode(PORT_TYPE_1G, 3));
 8850	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8851		num_10g = 2;
 8852		num_1g = 0;
 8853		parent->num_ports = 2;
 8854		val = (phy_encode(PORT_TYPE_10G, 0) |
 8855		       phy_encode(PORT_TYPE_10G, 1));
 8856	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8857		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8858		/* this is the Monza case */
 8859		if (np->flags & NIU_FLAGS_10G) {
 8860			val = (phy_encode(PORT_TYPE_10G, 0) |
 8861			       phy_encode(PORT_TYPE_10G, 1));
 8862		} else {
 8863			val = (phy_encode(PORT_TYPE_1G, 0) |
 8864			       phy_encode(PORT_TYPE_1G, 1));
 8865		}
 8866	} else {
 8867		err = fill_phy_probe_info(np, parent, info);
 8868		if (err)
 8869			return err;
 8870
 8871		num_10g = count_10g_ports(info, &lowest_10g);
 8872		num_1g = count_1g_ports(info, &lowest_1g);
 8873
 8874		switch ((num_10g << 4) | num_1g) {
 8875		case 0x24:
 8876			if (lowest_1g == 10)
 8877				parent->plat_type = PLAT_TYPE_VF_P0;
 8878			else if (lowest_1g == 26)
 8879				parent->plat_type = PLAT_TYPE_VF_P1;
 8880			else
 8881				goto unknown_vg_1g_port;
 8882
 8883			/* fallthru */
 8884		case 0x22:
 8885			val = (phy_encode(PORT_TYPE_10G, 0) |
 8886			       phy_encode(PORT_TYPE_10G, 1) |
 8887			       phy_encode(PORT_TYPE_1G, 2) |
 8888			       phy_encode(PORT_TYPE_1G, 3));
 8889			break;
 8890
 8891		case 0x20:
 8892			val = (phy_encode(PORT_TYPE_10G, 0) |
 8893			       phy_encode(PORT_TYPE_10G, 1));
 8894			break;
 8895
 8896		case 0x10:
 8897			val = phy_encode(PORT_TYPE_10G, np->port);
 8898			break;
 8899
 8900		case 0x14:
 8901			if (lowest_1g == 10)
 8902				parent->plat_type = PLAT_TYPE_VF_P0;
 8903			else if (lowest_1g == 26)
 8904				parent->plat_type = PLAT_TYPE_VF_P1;
 8905			else
 8906				goto unknown_vg_1g_port;
 8907
 8908			/* fallthru */
 8909		case 0x13:
 8910			if ((lowest_10g & 0x7) == 0)
 8911				val = (phy_encode(PORT_TYPE_10G, 0) |
 8912				       phy_encode(PORT_TYPE_1G, 1) |
 8913				       phy_encode(PORT_TYPE_1G, 2) |
 8914				       phy_encode(PORT_TYPE_1G, 3));
 8915			else
 8916				val = (phy_encode(PORT_TYPE_1G, 0) |
 8917				       phy_encode(PORT_TYPE_10G, 1) |
 8918				       phy_encode(PORT_TYPE_1G, 2) |
 8919				       phy_encode(PORT_TYPE_1G, 3));
 8920			break;
 8921
 8922		case 0x04:
 8923			if (lowest_1g == 10)
 8924				parent->plat_type = PLAT_TYPE_VF_P0;
 8925			else if (lowest_1g == 26)
 8926				parent->plat_type = PLAT_TYPE_VF_P1;
 8927			else
 8928				goto unknown_vg_1g_port;
 8929
 8930			val = (phy_encode(PORT_TYPE_1G, 0) |
 8931			       phy_encode(PORT_TYPE_1G, 1) |
 8932			       phy_encode(PORT_TYPE_1G, 2) |
 8933			       phy_encode(PORT_TYPE_1G, 3));
 8934			break;
 8935
 8936		default:
 8937			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8938			       num_10g, num_1g);
 8939			return -EINVAL;
 8940		}
 8941	}
 8942
 8943	parent->port_phy = val;
 8944
 8945	if (parent->plat_type == PLAT_TYPE_NIU)
 8946		niu_n2_divide_channels(parent);
 8947	else
 8948		niu_divide_channels(parent, num_10g, num_1g);
 8949
 8950	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8951
 8952	return 0;
 8953
 8954unknown_vg_1g_port:
 8955	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8956	return -EINVAL;
 8957}
 8958
 8959static int __devinit niu_probe_ports(struct niu *np)
 8960{
 8961	struct niu_parent *parent = np->parent;
 8962	int err, i;
 8963
 8964	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8965		err = walk_phys(np, parent);
 8966		if (err)
 8967			return err;
 8968
 8969		niu_set_ldg_timer_res(np, 2);
 8970		for (i = 0; i <= LDN_MAX; i++)
 8971			niu_ldn_irq_enable(np, i, 0);
 8972	}
 8973
 8974	if (parent->port_phy == PORT_PHY_INVALID)
 8975		return -EINVAL;
 8976
 8977	return 0;
 8978}
 8979
 8980static int __devinit niu_classifier_swstate_init(struct niu *np)
 8981{
 8982	struct niu_classifier *cp = &np->clas;
 8983
 8984	cp->tcam_top = (u16) np->port;
 8985	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8986	cp->h1_init = 0xffffffff;
 8987	cp->h2_init = 0xffff;
 8988
 8989	return fflp_early_init(np);
 8990}
 8991
 8992static void __devinit niu_link_config_init(struct niu *np)
 8993{
 8994	struct niu_link_config *lp = &np->link_config;
 8995
 8996	lp->advertising = (ADVERTISED_10baseT_Half |
 8997			   ADVERTISED_10baseT_Full |
 8998			   ADVERTISED_100baseT_Half |
 8999			   ADVERTISED_100baseT_Full |
 9000			   ADVERTISED_1000baseT_Half |
 9001			   ADVERTISED_1000baseT_Full |
 9002			   ADVERTISED_10000baseT_Full |
 9003			   ADVERTISED_Autoneg);
 9004	lp->speed = lp->active_speed = SPEED_INVALID;
 9005	lp->duplex = DUPLEX_FULL;
 9006	lp->active_duplex = DUPLEX_INVALID;
 9007	lp->autoneg = 1;
 9008#if 0
 9009	lp->loopback_mode = LOOPBACK_MAC;
 9010	lp->active_speed = SPEED_10000;
 9011	lp->active_duplex = DUPLEX_FULL;
 9012#else
 9013	lp->loopback_mode = LOOPBACK_DISABLED;
 9014#endif
 9015}
 9016
 9017static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
 9018{
 9019	switch (np->port) {
 9020	case 0:
 9021		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 9022		np->ipp_off  = 0x00000;
 9023		np->pcs_off  = 0x04000;
 9024		np->xpcs_off = 0x02000;
 9025		break;
 9026
 9027	case 1:
 9028		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 9029		np->ipp_off  = 0x08000;
 9030		np->pcs_off  = 0x0a000;
 9031		np->xpcs_off = 0x08000;
 9032		break;
 9033
 9034	case 2:
 9035		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 9036		np->ipp_off  = 0x04000;
 9037		np->pcs_off  = 0x0e000;
 9038		np->xpcs_off = ~0UL;
 9039		break;
 9040
 9041	case 3:
 9042		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 9043		np->ipp_off  = 0x0c000;
 9044		np->pcs_off  = 0x12000;
 9045		np->xpcs_off = ~0UL;
 9046		break;
 9047
 9048	default:
 9049		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9050		return -EINVAL;
 9051	}
 9052
 9053	return 0;
 9054}
 9055
 9056static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9057{
 9058	struct msix_entry msi_vec[NIU_NUM_LDG];
 9059	struct niu_parent *parent = np->parent;
 9060	struct pci_dev *pdev = np->pdev;
 9061	int i, num_irqs, err;
 9062	u8 first_ldg;
 9063
 9064	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9065	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9066		ldg_num_map[i] = first_ldg + i;
 9067
 9068	num_irqs = (parent->rxchan_per_port[np->port] +
 9069		    parent->txchan_per_port[np->port] +
 9070		    (np->port == 0 ? 3 : 1));
 9071	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9072
 9073retry:
 9074	for (i = 0; i < num_irqs; i++) {
 9075		msi_vec[i].vector = 0;
 9076		msi_vec[i].entry = i;
 9077	}
 9078
 9079	err = pci_enable_msix(pdev, msi_vec, num_irqs);
 9080	if (err < 0) {
 9081		np->flags &= ~NIU_FLAGS_MSIX;
 9082		return;
 9083	}
 9084	if (err > 0) {
 9085		num_irqs = err;
 9086		goto retry;
 9087	}
 9088
 9089	np->flags |= NIU_FLAGS_MSIX;
 9090	for (i = 0; i < num_irqs; i++)
 9091		np->ldg[i].irq = msi_vec[i].vector;
 9092	np->num_ldg = num_irqs;
 9093}
 9094
 9095static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9096{
 9097#ifdef CONFIG_SPARC64
 9098	struct platform_device *op = np->op;
 9099	const u32 *int_prop;
 9100	int i;
 9101
 9102	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9103	if (!int_prop)
 9104		return -ENODEV;
 9105
 9106	for (i = 0; i < op->archdata.num_irqs; i++) {
 9107		ldg_num_map[i] = int_prop[i];
 9108		np->ldg[i].irq = op->archdata.irqs[i];
 9109	}
 9110
 9111	np->num_ldg = op->archdata.num_irqs;
 9112
 9113	return 0;
 9114#else
 9115	return -EINVAL;
 9116#endif
 9117}
 9118
 9119static int __devinit niu_ldg_init(struct niu *np)
 9120{
 9121	struct niu_parent *parent = np->parent;
 9122	u8 ldg_num_map[NIU_NUM_LDG];
 9123	int first_chan, num_chan;
 9124	int i, err, ldg_rotor;
 9125	u8 port;
 9126
 9127	np->num_ldg = 1;
 9128	np->ldg[0].irq = np->dev->irq;
 9129	if (parent->plat_type == PLAT_TYPE_NIU) {
 9130		err = niu_n2_irq_init(np, ldg_num_map);
 9131		if (err)
 9132			return err;
 9133	} else
 9134		niu_try_msix(np, ldg_num_map);
 9135
 9136	port = np->port;
 9137	for (i = 0; i < np->num_ldg; i++) {
 9138		struct niu_ldg *lp = &np->ldg[i];
 9139
 9140		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9141
 9142		lp->np = np;
 9143		lp->ldg_num = ldg_num_map[i];
 9144		lp->timer = 2; /* XXX */
 9145
 9146		/* On N2 NIU the firmware has setup the SID mappings so they go
 9147		 * to the correct values that will route the LDG to the proper
 9148		 * interrupt in the NCU interrupt table.
 9149		 */
 9150		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9151			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9152			if (err)
 9153				return err;
 9154		}
 9155	}
 9156
 9157	/* We adopt the LDG assignment ordering used by the N2 NIU
 9158	 * 'interrupt' properties because that simplifies a lot of
 9159	 * things.  This ordering is:
 9160	 *
 9161	 *	MAC
 9162	 *	MIF	(if port zero)
 9163	 *	SYSERR	(if port zero)
 9164	 *	RX channels
 9165	 *	TX channels
 9166	 */
 9167
 9168	ldg_rotor = 0;
 9169
 9170	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9171				  LDN_MAC(port));
 9172	if (err)
 9173		return err;
 9174
 9175	ldg_rotor++;
 9176	if (ldg_rotor == np->num_ldg)
 9177		ldg_rotor = 0;
 9178
 9179	if (port == 0) {
 9180		err = niu_ldg_assign_ldn(np, parent,
 9181					 ldg_num_map[ldg_rotor],
 9182					 LDN_MIF);
 9183		if (err)
 9184			return err;
 9185
 9186		ldg_rotor++;
 9187		if (ldg_rotor == np->num_ldg)
 9188			ldg_rotor = 0;
 9189
 9190		err = niu_ldg_assign_ldn(np, parent,
 9191					 ldg_num_map[ldg_rotor],
 9192					 LDN_DEVICE_ERROR);
 9193		if (err)
 9194			return err;
 9195
 9196		ldg_rotor++;
 9197		if (ldg_rotor == np->num_ldg)
 9198			ldg_rotor = 0;
 9199
 9200	}
 9201
 9202	first_chan = 0;
 9203	for (i = 0; i < port; i++)
 9204		first_chan += parent->rxchan_per_port[i];
 9205	num_chan = parent->rxchan_per_port[port];
 9206
 9207	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9208		err = niu_ldg_assign_ldn(np, parent,
 9209					 ldg_num_map[ldg_rotor],
 9210					 LDN_RXDMA(i));
 9211		if (err)
 9212			return err;
 9213		ldg_rotor++;
 9214		if (ldg_rotor == np->num_ldg)
 9215			ldg_rotor = 0;
 9216	}
 9217
 9218	first_chan = 0;
 9219	for (i = 0; i < port; i++)
 9220		first_chan += parent->txchan_per_port[i];
 9221	num_chan = parent->txchan_per_port[port];
 9222	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9223		err = niu_ldg_assign_ldn(np, parent,
 9224					 ldg_num_map[ldg_rotor],
 9225					 LDN_TXDMA(i));
 9226		if (err)
 9227			return err;
 9228		ldg_rotor++;
 9229		if (ldg_rotor == np->num_ldg)
 9230			ldg_rotor = 0;
 9231	}
 9232
 9233	return 0;
 9234}
 9235
 9236static void __devexit niu_ldg_free(struct niu *np)
 9237{
 9238	if (np->flags & NIU_FLAGS_MSIX)
 9239		pci_disable_msix(np->pdev);
 9240}
 9241
 9242static int __devinit niu_get_of_props(struct niu *np)
 9243{
 9244#ifdef CONFIG_SPARC64
 9245	struct net_device *dev = np->dev;
 9246	struct device_node *dp;
 9247	const char *phy_type;
 9248	const u8 *mac_addr;
 9249	const char *model;
 9250	int prop_len;
 9251
 9252	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9253		dp = np->op->dev.of_node;
 9254	else
 9255		dp = pci_device_to_OF_node(np->pdev);
 9256
 9257	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9258	if (!phy_type) {
 9259		netdev_err(dev, "%s: OF node lacks phy-type property\n",
 9260			   dp->full_name);
 9261		return -EINVAL;
 9262	}
 9263
 9264	if (!strcmp(phy_type, "none"))
 9265		return -ENODEV;
 9266
 9267	strcpy(np->vpd.phy_type, phy_type);
 9268
 9269	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9270		netdev_err(dev, "%s: Illegal phy string [%s]\n",
 9271			   dp->full_name, np->vpd.phy_type);
 9272		return -EINVAL;
 9273	}
 9274
 9275	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9276	if (!mac_addr) {
 9277		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
 9278			   dp->full_name);
 9279		return -EINVAL;
 9280	}
 9281	if (prop_len != dev->addr_len) {
 9282		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
 9283			   dp->full_name, prop_len);
 9284	}
 9285	memcpy(dev->perm_addr, mac_addr, dev->addr_len);
 9286	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
 9287		netdev_err(dev, "%s: OF MAC address is invalid\n",
 9288			   dp->full_name);
 9289		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
 9290		return -EINVAL;
 9291	}
 9292
 9293	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 9294
 9295	model = of_get_property(dp, "model", &prop_len);
 9296
 9297	if (model)
 9298		strcpy(np->vpd.model, model);
 9299
 9300	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9301		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9302			NIU_FLAGS_HOTPLUG_PHY);
 9303	}
 9304
 9305	return 0;
 9306#else
 9307	return -EINVAL;
 9308#endif
 9309}
 9310
 9311static int __devinit niu_get_invariants(struct niu *np)
 9312{
 9313	int err, have_props;
 9314	u32 offset;
 9315
 9316	err = niu_get_of_props(np);
 9317	if (err == -ENODEV)
 9318		return err;
 9319
 9320	have_props = !err;
 9321
 9322	err = niu_init_mac_ipp_pcs_base(np);
 9323	if (err)
 9324		return err;
 9325
 9326	if (have_props) {
 9327		err = niu_get_and_validate_port(np);
 9328		if (err)
 9329			return err;
 9330
 9331	} else  {
 9332		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9333			return -EINVAL;
 9334
 9335		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9336		offset = niu_pci_vpd_offset(np);
 9337		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9338			     "%s() VPD offset [%08x]\n", __func__, offset);
 9339		if (offset)
 9340			niu_pci_vpd_fetch(np, offset);
 9341		nw64(ESPC_PIO_EN, 0);
 9342
 9343		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9344			niu_pci_vpd_validate(np);
 9345			err = niu_get_and_validate_port(np);
 9346			if (err)
 9347				return err;
 9348		}
 9349
 9350		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9351			err = niu_get_and_validate_port(np);
 9352			if (err)
 9353				return err;
 9354			err = niu_pci_probe_sprom(np);
 9355			if (err)
 9356				return err;
 9357		}
 9358	}
 9359
 9360	err = niu_probe_ports(np);
 9361	if (err)
 9362		return err;
 9363
 9364	niu_ldg_init(np);
 9365
 9366	niu_classifier_swstate_init(np);
 9367	niu_link_config_init(np);
 9368
 9369	err = niu_determine_phy_disposition(np);
 9370	if (!err)
 9371		err = niu_init_link(np);
 9372
 9373	return err;
 9374}
 9375
 9376static LIST_HEAD(niu_parent_list);
 9377static DEFINE_MUTEX(niu_parent_lock);
 9378static int niu_parent_index;
 9379
 9380static ssize_t show_port_phy(struct device *dev,
 9381			     struct device_attribute *attr, char *buf)
 9382{
 9383	struct platform_device *plat_dev = to_platform_device(dev);
 9384	struct niu_parent *p = plat_dev->dev.platform_data;
 9385	u32 port_phy = p->port_phy;
 9386	char *orig_buf = buf;
 9387	int i;
 9388
 9389	if (port_phy == PORT_PHY_UNKNOWN ||
 9390	    port_phy == PORT_PHY_INVALID)
 9391		return 0;
 9392
 9393	for (i = 0; i < p->num_ports; i++) {
 9394		const char *type_str;
 9395		int type;
 9396
 9397		type = phy_decode(port_phy, i);
 9398		if (type == PORT_TYPE_10G)
 9399			type_str = "10G";
 9400		else
 9401			type_str = "1G";
 9402		buf += sprintf(buf,
 9403			       (i == 0) ? "%s" : " %s",
 9404			       type_str);
 9405	}
 9406	buf += sprintf(buf, "\n");
 9407	return buf - orig_buf;
 9408}
 9409
 9410static ssize_t show_plat_type(struct device *dev,
 9411			      struct device_attribute *attr, char *buf)
 9412{
 9413	struct platform_device *plat_dev = to_platform_device(dev);
 9414	struct niu_parent *p = plat_dev->dev.platform_data;
 9415	const char *type_str;
 9416
 9417	switch (p->plat_type) {
 9418	case PLAT_TYPE_ATLAS:
 9419		type_str = "atlas";
 9420		break;
 9421	case PLAT_TYPE_NIU:
 9422		type_str = "niu";
 9423		break;
 9424	case PLAT_TYPE_VF_P0:
 9425		type_str = "vf_p0";
 9426		break;
 9427	case PLAT_TYPE_VF_P1:
 9428		type_str = "vf_p1";
 9429		break;
 9430	default:
 9431		type_str = "unknown";
 9432		break;
 9433	}
 9434
 9435	return sprintf(buf, "%s\n", type_str);
 9436}
 9437
 9438static ssize_t __show_chan_per_port(struct device *dev,
 9439				    struct device_attribute *attr, char *buf,
 9440				    int rx)
 9441{
 9442	struct platform_device *plat_dev = to_platform_device(dev);
 9443	struct niu_parent *p = plat_dev->dev.platform_data;
 9444	char *orig_buf = buf;
 9445	u8 *arr;
 9446	int i;
 9447
 9448	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9449
 9450	for (i = 0; i < p->num_ports; i++) {
 9451		buf += sprintf(buf,
 9452			       (i == 0) ? "%d" : " %d",
 9453			       arr[i]);
 9454	}
 9455	buf += sprintf(buf, "\n");
 9456
 9457	return buf - orig_buf;
 9458}
 9459
 9460static ssize_t show_rxchan_per_port(struct device *dev,
 9461				    struct device_attribute *attr, char *buf)
 9462{
 9463	return __show_chan_per_port(dev, attr, buf, 1);
 9464}
 9465
 9466static ssize_t show_txchan_per_port(struct device *dev,
 9467				    struct device_attribute *attr, char *buf)
 9468{
 9469	return __show_chan_per_port(dev, attr, buf, 1);
 9470}
 9471
 9472static ssize_t show_num_ports(struct device *dev,
 9473			      struct device_attribute *attr, char *buf)
 9474{
 9475	struct platform_device *plat_dev = to_platform_device(dev);
 9476	struct niu_parent *p = plat_dev->dev.platform_data;
 9477
 9478	return sprintf(buf, "%d\n", p->num_ports);
 9479}
 9480
 9481static struct device_attribute niu_parent_attributes[] = {
 9482	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
 9483	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
 9484	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
 9485	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
 9486	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
 9487	{}
 9488};
 9489
 9490static struct niu_parent * __devinit niu_new_parent(struct niu *np,
 9491						    union niu_parent_id *id,
 9492						    u8 ptype)
 9493{
 9494	struct platform_device *plat_dev;
 9495	struct niu_parent *p;
 9496	int i;
 9497
 9498	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9499						   NULL, 0);
 9500	if (IS_ERR(plat_dev))
 9501		return NULL;
 9502
 9503	for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
 9504		int err = device_create_file(&plat_dev->dev,
 9505					     &niu_parent_attributes[i]);
 9506		if (err)
 9507			goto fail_unregister;
 9508	}
 9509
 9510	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9511	if (!p)
 9512		goto fail_unregister;
 9513
 9514	p->index = niu_parent_index++;
 9515
 9516	plat_dev->dev.platform_data = p;
 9517	p->plat_dev = plat_dev;
 9518
 9519	memcpy(&p->id, id, sizeof(*id));
 9520	p->plat_type = ptype;
 9521	INIT_LIST_HEAD(&p->list);
 9522	atomic_set(&p->refcnt, 0);
 9523	list_add(&p->list, &niu_parent_list);
 9524	spin_lock_init(&p->lock);
 9525
 9526	p->rxdma_clock_divider = 7500;
 9527
 9528	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9529	if (p->plat_type == PLAT_TYPE_NIU)
 9530		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9531
 9532	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9533		int index = i - CLASS_CODE_USER_PROG1;
 9534
 9535		p->tcam_key[index] = TCAM_KEY_TSEL;
 9536		p->flow_key[index] = (FLOW_KEY_IPSA |
 9537				      FLOW_KEY_IPDA |
 9538				      FLOW_KEY_PROTO |
 9539				      (FLOW_KEY_L4_BYTE12 <<
 9540				       FLOW_KEY_L4_0_SHIFT) |
 9541				      (FLOW_KEY_L4_BYTE12 <<
 9542				       FLOW_KEY_L4_1_SHIFT));
 9543	}
 9544
 9545	for (i = 0; i < LDN_MAX + 1; i++)
 9546		p->ldg_map[i] = LDG_INVALID;
 9547
 9548	return p;
 9549
 9550fail_unregister:
 9551	platform_device_unregister(plat_dev);
 9552	return NULL;
 9553}
 9554
 9555static struct niu_parent * __devinit niu_get_parent(struct niu *np,
 9556						    union niu_parent_id *id,
 9557						    u8 ptype)
 9558{
 9559	struct niu_parent *p, *tmp;
 9560	int port = np->port;
 9561
 9562	mutex_lock(&niu_parent_lock);
 9563	p = NULL;
 9564	list_for_each_entry(tmp, &niu_parent_list, list) {
 9565		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9566			p = tmp;
 9567			break;
 9568		}
 9569	}
 9570	if (!p)
 9571		p = niu_new_parent(np, id, ptype);
 9572
 9573	if (p) {
 9574		char port_name[6];
 9575		int err;
 9576
 9577		sprintf(port_name, "port%d", port);
 9578		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9579					&np->device->kobj,
 9580					port_name);
 9581		if (!err) {
 9582			p->ports[port] = np;
 9583			atomic_inc(&p->refcnt);
 9584		}
 9585	}
 9586	mutex_unlock(&niu_parent_lock);
 9587
 9588	return p;
 9589}
 9590
 9591static void niu_put_parent(struct niu *np)
 9592{
 9593	struct niu_parent *p = np->parent;
 9594	u8 port = np->port;
 9595	char port_name[6];
 9596
 9597	BUG_ON(!p || p->ports[port] != np);
 9598
 9599	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9600		     "%s() port[%u]\n", __func__, port);
 9601
 9602	sprintf(port_name, "port%d", port);
 9603
 9604	mutex_lock(&niu_parent_lock);
 9605
 9606	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9607
 9608	p->ports[port] = NULL;
 9609	np->parent = NULL;
 9610
 9611	if (atomic_dec_and_test(&p->refcnt)) {
 9612		list_del(&p->list);
 9613		platform_device_unregister(p->plat_dev);
 9614	}
 9615
 9616	mutex_unlock(&niu_parent_lock);
 9617}
 9618
 9619static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9620				    u64 *handle, gfp_t flag)
 9621{
 9622	dma_addr_t dh;
 9623	void *ret;
 9624
 9625	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9626	if (ret)
 9627		*handle = dh;
 9628	return ret;
 9629}
 9630
 9631static void niu_pci_free_coherent(struct device *dev, size_t size,
 9632				  void *cpu_addr, u64 handle)
 9633{
 9634	dma_free_coherent(dev, size, cpu_addr, handle);
 9635}
 9636
 9637static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9638			    unsigned long offset, size_t size,
 9639			    enum dma_data_direction direction)
 9640{
 9641	return dma_map_page(dev, page, offset, size, direction);
 9642}
 9643
 9644static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9645			       size_t size, enum dma_data_direction direction)
 9646{
 9647	dma_unmap_page(dev, dma_address, size, direction);
 9648}
 9649
 9650static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9651			      size_t size,
 9652			      enum dma_data_direction direction)
 9653{
 9654	return dma_map_single(dev, cpu_addr, size, direction);
 9655}
 9656
 9657static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9658				 size_t size,
 9659				 enum dma_data_direction direction)
 9660{
 9661	dma_unmap_single(dev, dma_address, size, direction);
 9662}
 9663
 9664static const struct niu_ops niu_pci_ops = {
 9665	.alloc_coherent	= niu_pci_alloc_coherent,
 9666	.free_coherent	= niu_pci_free_coherent,
 9667	.map_page	= niu_pci_map_page,
 9668	.unmap_page	= niu_pci_unmap_page,
 9669	.map_single	= niu_pci_map_single,
 9670	.unmap_single	= niu_pci_unmap_single,
 9671};
 9672
 9673static void __devinit niu_driver_version(void)
 9674{
 9675	static int niu_version_printed;
 9676
 9677	if (niu_version_printed++ == 0)
 9678		pr_info("%s", version);
 9679}
 9680
 9681static struct net_device * __devinit niu_alloc_and_init(
 9682	struct device *gen_dev, struct pci_dev *pdev,
 9683	struct platform_device *op, const struct niu_ops *ops,
 9684	u8 port)
 9685{
 9686	struct net_device *dev;
 9687	struct niu *np;
 9688
 9689	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9690	if (!dev) {
 9691		dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
 9692		return NULL;
 9693	}
 9694
 9695	SET_NETDEV_DEV(dev, gen_dev);
 9696
 9697	np = netdev_priv(dev);
 9698	np->dev = dev;
 9699	np->pdev = pdev;
 9700	np->op = op;
 9701	np->device = gen_dev;
 9702	np->ops = ops;
 9703
 9704	np->msg_enable = niu_debug;
 9705
 9706	spin_lock_init(&np->lock);
 9707	INIT_WORK(&np->reset_task, niu_reset_task);
 9708
 9709	np->port = port;
 9710
 9711	return dev;
 9712}
 9713
 9714static const struct net_device_ops niu_netdev_ops = {
 9715	.ndo_open		= niu_open,
 9716	.ndo_stop		= niu_close,
 9717	.ndo_start_xmit		= niu_start_xmit,
 9718	.ndo_get_stats64	= niu_get_stats,
 9719	.ndo_set_multicast_list	= niu_set_rx_mode,
 9720	.ndo_validate_addr	= eth_validate_addr,
 9721	.ndo_set_mac_address	= niu_set_mac_addr,
 9722	.ndo_do_ioctl		= niu_ioctl,
 9723	.ndo_tx_timeout		= niu_tx_timeout,
 9724	.ndo_change_mtu		= niu_change_mtu,
 9725};
 9726
 9727static void __devinit niu_assign_netdev_ops(struct net_device *dev)
 9728{
 9729	dev->netdev_ops = &niu_netdev_ops;
 9730	dev->ethtool_ops = &niu_ethtool_ops;
 9731	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9732}
 9733
 9734static void __devinit niu_device_announce(struct niu *np)
 9735{
 9736	struct net_device *dev = np->dev;
 9737
 9738	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9739
 9740	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9741		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9742				dev->name,
 9743				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9744				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9745				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9746				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9747				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9748				np->vpd.phy_type);
 9749	} else {
 9750		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9751				dev->name,
 9752				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9753				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9754				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9755				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9756				  "COPPER")),
 9757				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9758				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9759				np->vpd.phy_type);
 9760	}
 9761}
 9762
 9763static void __devinit niu_set_basic_features(struct net_device *dev)
 9764{
 9765	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9766	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9767}
 9768
 9769static int __devinit niu_pci_init_one(struct pci_dev *pdev,
 9770				      const struct pci_device_id *ent)
 9771{
 9772	union niu_parent_id parent_id;
 9773	struct net_device *dev;
 9774	struct niu *np;
 9775	int err, pos;
 9776	u64 dma_mask;
 9777	u16 val16;
 9778
 9779	niu_driver_version();
 9780
 9781	err = pci_enable_device(pdev);
 9782	if (err) {
 9783		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9784		return err;
 9785	}
 9786
 9787	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9788	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9789		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9790		err = -ENODEV;
 9791		goto err_out_disable_pdev;
 9792	}
 9793
 9794	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9795	if (err) {
 9796		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9797		goto err_out_disable_pdev;
 9798	}
 9799
 9800	pos = pci_pcie_cap(pdev);
 9801	if (pos <= 0) {
 9802		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9803		goto err_out_free_res;
 9804	}
 9805
 9806	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9807				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9808	if (!dev) {
 9809		err = -ENOMEM;
 9810		goto err_out_free_res;
 9811	}
 9812	np = netdev_priv(dev);
 9813
 9814	memset(&parent_id, 0, sizeof(parent_id));
 9815	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9816	parent_id.pci.bus = pdev->bus->number;
 9817	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9818
 9819	np->parent = niu_get_parent(np, &parent_id,
 9820				    PLAT_TYPE_ATLAS);
 9821	if (!np->parent) {
 9822		err = -ENOMEM;
 9823		goto err_out_free_dev;
 9824	}
 9825
 9826	pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
 9827	val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
 9828	val16 |= (PCI_EXP_DEVCTL_CERE |
 9829		  PCI_EXP_DEVCTL_NFERE |
 9830		  PCI_EXP_DEVCTL_FERE |
 9831		  PCI_EXP_DEVCTL_URRE |
 9832		  PCI_EXP_DEVCTL_RELAX_EN);
 9833	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
 9834
 9835	dma_mask = DMA_BIT_MASK(44);
 9836	err = pci_set_dma_mask(pdev, dma_mask);
 9837	if (!err) {
 9838		dev->features |= NETIF_F_HIGHDMA;
 9839		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9840		if (err) {
 9841			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9842			goto err_out_release_parent;
 9843		}
 9844	}
 9845	if (err || dma_mask == DMA_BIT_MASK(32)) {
 9846		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9847		if (err) {
 9848			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9849			goto err_out_release_parent;
 9850		}
 9851	}
 9852
 9853	niu_set_basic_features(dev);
 9854
 9855	np->regs = pci_ioremap_bar(pdev, 0);
 9856	if (!np->regs) {
 9857		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9858		err = -ENOMEM;
 9859		goto err_out_release_parent;
 9860	}
 9861
 9862	pci_set_master(pdev);
 9863	pci_save_state(pdev);
 9864
 9865	dev->irq = pdev->irq;
 9866
 9867	niu_assign_netdev_ops(dev);
 9868
 9869	err = niu_get_invariants(np);
 9870	if (err) {
 9871		if (err != -ENODEV)
 9872			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9873		goto err_out_iounmap;
 9874	}
 9875
 9876	err = register_netdev(dev);
 9877	if (err) {
 9878		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9879		goto err_out_iounmap;
 9880	}
 9881
 9882	pci_set_drvdata(pdev, dev);
 9883
 9884	niu_device_announce(np);
 9885
 9886	return 0;
 9887
 9888err_out_iounmap:
 9889	if (np->regs) {
 9890		iounmap(np->regs);
 9891		np->regs = NULL;
 9892	}
 9893
 9894err_out_release_parent:
 9895	niu_put_parent(np);
 9896
 9897err_out_free_dev:
 9898	free_netdev(dev);
 9899
 9900err_out_free_res:
 9901	pci_release_regions(pdev);
 9902
 9903err_out_disable_pdev:
 9904	pci_disable_device(pdev);
 9905	pci_set_drvdata(pdev, NULL);
 9906
 9907	return err;
 9908}
 9909
 9910static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
 9911{
 9912	struct net_device *dev = pci_get_drvdata(pdev);
 9913
 9914	if (dev) {
 9915		struct niu *np = netdev_priv(dev);
 9916
 9917		unregister_netdev(dev);
 9918		if (np->regs) {
 9919			iounmap(np->regs);
 9920			np->regs = NULL;
 9921		}
 9922
 9923		niu_ldg_free(np);
 9924
 9925		niu_put_parent(np);
 9926
 9927		free_netdev(dev);
 9928		pci_release_regions(pdev);
 9929		pci_disable_device(pdev);
 9930		pci_set_drvdata(pdev, NULL);
 9931	}
 9932}
 9933
 9934static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9935{
 9936	struct net_device *dev = pci_get_drvdata(pdev);
 9937	struct niu *np = netdev_priv(dev);
 9938	unsigned long flags;
 9939
 9940	if (!netif_running(dev))
 9941		return 0;
 9942
 9943	flush_work_sync(&np->reset_task);
 9944	niu_netif_stop(np);
 9945
 9946	del_timer_sync(&np->timer);
 9947
 9948	spin_lock_irqsave(&np->lock, flags);
 9949	niu_enable_interrupts(np, 0);
 9950	spin_unlock_irqrestore(&np->lock, flags);
 9951
 9952	netif_device_detach(dev);
 9953
 9954	spin_lock_irqsave(&np->lock, flags);
 9955	niu_stop_hw(np);
 9956	spin_unlock_irqrestore(&np->lock, flags);
 9957
 9958	pci_save_state(pdev);
 9959
 9960	return 0;
 9961}
 9962
 9963static int niu_resume(struct pci_dev *pdev)
 9964{
 9965	struct net_device *dev = pci_get_drvdata(pdev);
 9966	struct niu *np = netdev_priv(dev);
 9967	unsigned long flags;
 9968	int err;
 9969
 9970	if (!netif_running(dev))
 9971		return 0;
 9972
 9973	pci_restore_state(pdev);
 9974
 9975	netif_device_attach(dev);
 9976
 9977	spin_lock_irqsave(&np->lock, flags);
 9978
 9979	err = niu_init_hw(np);
 9980	if (!err) {
 9981		np->timer.expires = jiffies + HZ;
 9982		add_timer(&np->timer);
 9983		niu_netif_start(np);
 9984	}
 9985
 9986	spin_unlock_irqrestore(&np->lock, flags);
 9987
 9988	return err;
 9989}
 9990
 9991static struct pci_driver niu_pci_driver = {
 9992	.name		= DRV_MODULE_NAME,
 9993	.id_table	= niu_pci_tbl,
 9994	.probe		= niu_pci_init_one,
 9995	.remove		= __devexit_p(niu_pci_remove_one),
 9996	.suspend	= niu_suspend,
 9997	.resume		= niu_resume,
 9998};
 9999
10000#ifdef CONFIG_SPARC64
10001static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
10002				     u64 *dma_addr, gfp_t flag)
10003{
10004	unsigned long order = get_order(size);
10005	unsigned long page = __get_free_pages(flag, order);
10006
10007	if (page == 0UL)
10008		return NULL;
10009	memset((char *)page, 0, PAGE_SIZE << order);
10010	*dma_addr = __pa(page);
10011
10012	return (void *) page;
10013}
10014
10015static void niu_phys_free_coherent(struct device *dev, size_t size,
10016				   void *cpu_addr, u64 handle)
10017{
10018	unsigned long order = get_order(size);
10019
10020	free_pages((unsigned long) cpu_addr, order);
10021}
10022
10023static u64 niu_phys_map_page(struct device *dev, struct page *page,
10024			     unsigned long offset, size_t size,
10025			     enum dma_data_direction direction)
10026{
10027	return page_to_phys(page) + offset;
10028}
10029
10030static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
10031				size_t size, enum dma_data_direction direction)
10032{
10033	/* Nothing to do.  */
10034}
10035
10036static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
10037			       size_t size,
10038			       enum dma_data_direction direction)
10039{
10040	return __pa(cpu_addr);
10041}
10042
10043static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10044				  size_t size,
10045				  enum dma_data_direction direction)
10046{
10047	/* Nothing to do.  */
10048}
10049
10050static const struct niu_ops niu_phys_ops = {
10051	.alloc_coherent	= niu_phys_alloc_coherent,
10052	.free_coherent	= niu_phys_free_coherent,
10053	.map_page	= niu_phys_map_page,
10054	.unmap_page	= niu_phys_unmap_page,
10055	.map_single	= niu_phys_map_single,
10056	.unmap_single	= niu_phys_unmap_single,
10057};
10058
10059static int __devinit niu_of_probe(struct platform_device *op)
10060{
10061	union niu_parent_id parent_id;
10062	struct net_device *dev;
10063	struct niu *np;
10064	const u32 *reg;
10065	int err;
10066
10067	niu_driver_version();
10068
10069	reg = of_get_property(op->dev.of_node, "reg", NULL);
10070	if (!reg) {
10071		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10072			op->dev.of_node->full_name);
10073		return -ENODEV;
10074	}
10075
10076	dev = niu_alloc_and_init(&op->dev, NULL, op,
10077				 &niu_phys_ops, reg[0] & 0x1);
10078	if (!dev) {
10079		err = -ENOMEM;
10080		goto err_out;
10081	}
10082	np = netdev_priv(dev);
10083
10084	memset(&parent_id, 0, sizeof(parent_id));
10085	parent_id.of = of_get_parent(op->dev.of_node);
10086
10087	np->parent = niu_get_parent(np, &parent_id,
10088				    PLAT_TYPE_NIU);
10089	if (!np->parent) {
10090		err = -ENOMEM;
10091		goto err_out_free_dev;
10092	}
10093
10094	niu_set_basic_features(dev);
10095
10096	np->regs = of_ioremap(&op->resource[1], 0,
10097			      resource_size(&op->resource[1]),
10098			      "niu regs");
10099	if (!np->regs) {
10100		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10101		err = -ENOMEM;
10102		goto err_out_release_parent;
10103	}
10104
10105	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10106				    resource_size(&op->resource[2]),
10107				    "niu vregs-1");
10108	if (!np->vir_regs_1) {
10109		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10110		err = -ENOMEM;
10111		goto err_out_iounmap;
10112	}
10113
10114	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10115				    resource_size(&op->resource[3]),
10116				    "niu vregs-2");
10117	if (!np->vir_regs_2) {
10118		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10119		err = -ENOMEM;
10120		goto err_out_iounmap;
10121	}
10122
10123	niu_assign_netdev_ops(dev);
10124
10125	err = niu_get_invariants(np);
10126	if (err) {
10127		if (err != -ENODEV)
10128			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10129		goto err_out_iounmap;
10130	}
10131
10132	err = register_netdev(dev);
10133	if (err) {
10134		dev_err(&op->dev, "Cannot register net device, aborting\n");
10135		goto err_out_iounmap;
10136	}
10137
10138	dev_set_drvdata(&op->dev, dev);
10139
10140	niu_device_announce(np);
10141
10142	return 0;
10143
10144err_out_iounmap:
10145	if (np->vir_regs_1) {
10146		of_iounmap(&op->resource[2], np->vir_regs_1,
10147			   resource_size(&op->resource[2]));
10148		np->vir_regs_1 = NULL;
10149	}
10150
10151	if (np->vir_regs_2) {
10152		of_iounmap(&op->resource[3], np->vir_regs_2,
10153			   resource_size(&op->resource[3]));
10154		np->vir_regs_2 = NULL;
10155	}
10156
10157	if (np->regs) {
10158		of_iounmap(&op->resource[1], np->regs,
10159			   resource_size(&op->resource[1]));
10160		np->regs = NULL;
10161	}
10162
10163err_out_release_parent:
10164	niu_put_parent(np);
10165
10166err_out_free_dev:
10167	free_netdev(dev);
10168
10169err_out:
10170	return err;
10171}
10172
10173static int __devexit niu_of_remove(struct platform_device *op)
10174{
10175	struct net_device *dev = dev_get_drvdata(&op->dev);
10176
10177	if (dev) {
10178		struct niu *np = netdev_priv(dev);
10179
10180		unregister_netdev(dev);
10181
10182		if (np->vir_regs_1) {
10183			of_iounmap(&op->resource[2], np->vir_regs_1,
10184				   resource_size(&op->resource[2]));
10185			np->vir_regs_1 = NULL;
10186		}
10187
10188		if (np->vir_regs_2) {
10189			of_iounmap(&op->resource[3], np->vir_regs_2,
10190				   resource_size(&op->resource[3]));
10191			np->vir_regs_2 = NULL;
10192		}
10193
10194		if (np->regs) {
10195			of_iounmap(&op->resource[1], np->regs,
10196				   resource_size(&op->resource[1]));
10197			np->regs = NULL;
10198		}
10199
10200		niu_ldg_free(np);
10201
10202		niu_put_parent(np);
10203
10204		free_netdev(dev);
10205		dev_set_drvdata(&op->dev, NULL);
10206	}
10207	return 0;
10208}
10209
10210static const struct of_device_id niu_match[] = {
10211	{
10212		.name = "network",
10213		.compatible = "SUNW,niusl",
10214	},
10215	{},
10216};
10217MODULE_DEVICE_TABLE(of, niu_match);
10218
10219static struct platform_driver niu_of_driver = {
10220	.driver = {
10221		.name = "niu",
10222		.owner = THIS_MODULE,
10223		.of_match_table = niu_match,
10224	},
10225	.probe		= niu_of_probe,
10226	.remove		= __devexit_p(niu_of_remove),
10227};
10228
10229#endif /* CONFIG_SPARC64 */
10230
10231static int __init niu_init(void)
10232{
10233	int err = 0;
10234
10235	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10236
10237	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10238
10239#ifdef CONFIG_SPARC64
10240	err = platform_driver_register(&niu_of_driver);
10241#endif
10242
10243	if (!err) {
10244		err = pci_register_driver(&niu_pci_driver);
10245#ifdef CONFIG_SPARC64
10246		if (err)
10247			platform_driver_unregister(&niu_of_driver);
10248#endif
10249	}
10250
10251	return err;
10252}
10253
10254static void __exit niu_exit(void)
10255{
10256	pci_unregister_driver(&niu_pci_driver);
10257#ifdef CONFIG_SPARC64
10258	platform_driver_unregister(&niu_of_driver);
10259#endif
10260}
10261
10262module_init(niu_init);
10263module_exit(niu_exit);