Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1/* niu.c: Neptune ethernet driver.
    2 *
    3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    4 */
    5
    6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    7
    8#include <linux/module.h>
    9#include <linux/init.h>
   10#include <linux/interrupt.h>
   11#include <linux/pci.h>
   12#include <linux/dma-mapping.h>
   13#include <linux/netdevice.h>
   14#include <linux/ethtool.h>
   15#include <linux/etherdevice.h>
   16#include <linux/platform_device.h>
   17#include <linux/delay.h>
   18#include <linux/bitops.h>
   19#include <linux/mii.h>
   20#include <linux/if.h>
   21#include <linux/if_ether.h>
   22#include <linux/if_vlan.h>
   23#include <linux/ip.h>
   24#include <linux/in.h>
   25#include <linux/ipv6.h>
   26#include <linux/log2.h>
   27#include <linux/jiffies.h>
   28#include <linux/crc32.h>
   29#include <linux/list.h>
   30#include <linux/slab.h>
   31
   32#include <linux/io.h>
   33#include <linux/of_device.h>
   34
   35#include "niu.h"
   36
   37#define DRV_MODULE_NAME		"niu"
   38#define DRV_MODULE_VERSION	"1.1"
   39#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   40
   41static char version[] __devinitdata =
   42	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   43
   44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   45MODULE_DESCRIPTION("NIU ethernet driver");
   46MODULE_LICENSE("GPL");
   47MODULE_VERSION(DRV_MODULE_VERSION);
   48
   49#ifndef readq
   50static u64 readq(void __iomem *reg)
   51{
   52	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   53}
   54
   55static void writeq(u64 val, void __iomem *reg)
   56{
   57	writel(val & 0xffffffff, reg);
   58	writel(val >> 32, reg + 0x4UL);
   59}
   60#endif
   61
   62static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
   63	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   64	{}
   65};
   66
   67MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   68
   69#define NIU_TX_TIMEOUT			(5 * HZ)
   70
   71#define nr64(reg)		readq(np->regs + (reg))
   72#define nw64(reg, val)		writeq((val), np->regs + (reg))
   73
   74#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   75#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   76
   77#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   78#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   79
   80#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   81#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   82
   83#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   84#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   85
   86#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   87
   88static int niu_debug;
   89static int debug = -1;
   90module_param(debug, int, 0);
   91MODULE_PARM_DESC(debug, "NIU debug level");
   92
   93#define niu_lock_parent(np, flags) \
   94	spin_lock_irqsave(&np->parent->lock, flags)
   95#define niu_unlock_parent(np, flags) \
   96	spin_unlock_irqrestore(&np->parent->lock, flags)
   97
   98static int serdes_init_10g_serdes(struct niu *np);
   99
  100static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  101				     u64 bits, int limit, int delay)
  102{
  103	while (--limit >= 0) {
  104		u64 val = nr64_mac(reg);
  105
  106		if (!(val & bits))
  107			break;
  108		udelay(delay);
  109	}
  110	if (limit < 0)
  111		return -ENODEV;
  112	return 0;
  113}
  114
  115static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  116					u64 bits, int limit, int delay,
  117					const char *reg_name)
  118{
  119	int err;
  120
  121	nw64_mac(reg, bits);
  122	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  123	if (err)
  124		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  125			   (unsigned long long)bits, reg_name,
  126			   (unsigned long long)nr64_mac(reg));
  127	return err;
  128}
  129
  130#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  131({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  132	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  133})
  134
  135static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  136				     u64 bits, int limit, int delay)
  137{
  138	while (--limit >= 0) {
  139		u64 val = nr64_ipp(reg);
  140
  141		if (!(val & bits))
  142			break;
  143		udelay(delay);
  144	}
  145	if (limit < 0)
  146		return -ENODEV;
  147	return 0;
  148}
  149
  150static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  151					u64 bits, int limit, int delay,
  152					const char *reg_name)
  153{
  154	int err;
  155	u64 val;
  156
  157	val = nr64_ipp(reg);
  158	val |= bits;
  159	nw64_ipp(reg, val);
  160
  161	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  162	if (err)
  163		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  164			   (unsigned long long)bits, reg_name,
  165			   (unsigned long long)nr64_ipp(reg));
  166	return err;
  167}
  168
  169#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  170({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  171	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  172})
  173
  174static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  175				 u64 bits, int limit, int delay)
  176{
  177	while (--limit >= 0) {
  178		u64 val = nr64(reg);
  179
  180		if (!(val & bits))
  181			break;
  182		udelay(delay);
  183	}
  184	if (limit < 0)
  185		return -ENODEV;
  186	return 0;
  187}
  188
  189#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  190({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  191	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  192})
  193
  194static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  195				    u64 bits, int limit, int delay,
  196				    const char *reg_name)
  197{
  198	int err;
  199
  200	nw64(reg, bits);
  201	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  202	if (err)
  203		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  204			   (unsigned long long)bits, reg_name,
  205			   (unsigned long long)nr64(reg));
  206	return err;
  207}
  208
  209#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  210({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  211	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  212})
  213
  214static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  215{
  216	u64 val = (u64) lp->timer;
  217
  218	if (on)
  219		val |= LDG_IMGMT_ARM;
  220
  221	nw64(LDG_IMGMT(lp->ldg_num), val);
  222}
  223
  224static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  225{
  226	unsigned long mask_reg, bits;
  227	u64 val;
  228
  229	if (ldn < 0 || ldn > LDN_MAX)
  230		return -EINVAL;
  231
  232	if (ldn < 64) {
  233		mask_reg = LD_IM0(ldn);
  234		bits = LD_IM0_MASK;
  235	} else {
  236		mask_reg = LD_IM1(ldn - 64);
  237		bits = LD_IM1_MASK;
  238	}
  239
  240	val = nr64(mask_reg);
  241	if (on)
  242		val &= ~bits;
  243	else
  244		val |= bits;
  245	nw64(mask_reg, val);
  246
  247	return 0;
  248}
  249
  250static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  251{
  252	struct niu_parent *parent = np->parent;
  253	int i;
  254
  255	for (i = 0; i <= LDN_MAX; i++) {
  256		int err;
  257
  258		if (parent->ldg_map[i] != lp->ldg_num)
  259			continue;
  260
  261		err = niu_ldn_irq_enable(np, i, on);
  262		if (err)
  263			return err;
  264	}
  265	return 0;
  266}
  267
  268static int niu_enable_interrupts(struct niu *np, int on)
  269{
  270	int i;
  271
  272	for (i = 0; i < np->num_ldg; i++) {
  273		struct niu_ldg *lp = &np->ldg[i];
  274		int err;
  275
  276		err = niu_enable_ldn_in_ldg(np, lp, on);
  277		if (err)
  278			return err;
  279	}
  280	for (i = 0; i < np->num_ldg; i++)
  281		niu_ldg_rearm(np, &np->ldg[i], on);
  282
  283	return 0;
  284}
  285
  286static u32 phy_encode(u32 type, int port)
  287{
  288	return type << (port * 2);
  289}
  290
  291static u32 phy_decode(u32 val, int port)
  292{
  293	return (val >> (port * 2)) & PORT_TYPE_MASK;
  294}
  295
  296static int mdio_wait(struct niu *np)
  297{
  298	int limit = 1000;
  299	u64 val;
  300
  301	while (--limit > 0) {
  302		val = nr64(MIF_FRAME_OUTPUT);
  303		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  304			return val & MIF_FRAME_OUTPUT_DATA;
  305
  306		udelay(10);
  307	}
  308
  309	return -ENODEV;
  310}
  311
  312static int mdio_read(struct niu *np, int port, int dev, int reg)
  313{
  314	int err;
  315
  316	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  317	err = mdio_wait(np);
  318	if (err < 0)
  319		return err;
  320
  321	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  322	return mdio_wait(np);
  323}
  324
  325static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  326{
  327	int err;
  328
  329	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  330	err = mdio_wait(np);
  331	if (err < 0)
  332		return err;
  333
  334	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  335	err = mdio_wait(np);
  336	if (err < 0)
  337		return err;
  338
  339	return 0;
  340}
  341
  342static int mii_read(struct niu *np, int port, int reg)
  343{
  344	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  345	return mdio_wait(np);
  346}
  347
  348static int mii_write(struct niu *np, int port, int reg, int data)
  349{
  350	int err;
  351
  352	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  353	err = mdio_wait(np);
  354	if (err < 0)
  355		return err;
  356
  357	return 0;
  358}
  359
  360static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  361{
  362	int err;
  363
  364	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  365			 ESR2_TI_PLL_TX_CFG_L(channel),
  366			 val & 0xffff);
  367	if (!err)
  368		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  369				 ESR2_TI_PLL_TX_CFG_H(channel),
  370				 val >> 16);
  371	return err;
  372}
  373
  374static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  375{
  376	int err;
  377
  378	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  379			 ESR2_TI_PLL_RX_CFG_L(channel),
  380			 val & 0xffff);
  381	if (!err)
  382		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  383				 ESR2_TI_PLL_RX_CFG_H(channel),
  384				 val >> 16);
  385	return err;
  386}
  387
  388/* Mode is always 10G fiber.  */
  389static int serdes_init_niu_10g_fiber(struct niu *np)
  390{
  391	struct niu_link_config *lp = &np->link_config;
  392	u32 tx_cfg, rx_cfg;
  393	unsigned long i;
  394
  395	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  396	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  397		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  398		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  399
  400	if (lp->loopback_mode == LOOPBACK_PHY) {
  401		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  402
  403		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  404			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  405
  406		tx_cfg |= PLL_TX_CFG_ENTEST;
  407		rx_cfg |= PLL_RX_CFG_ENTEST;
  408	}
  409
  410	/* Initialize all 4 lanes of the SERDES.  */
  411	for (i = 0; i < 4; i++) {
  412		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  413		if (err)
  414			return err;
  415	}
  416
  417	for (i = 0; i < 4; i++) {
  418		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  419		if (err)
  420			return err;
  421	}
  422
  423	return 0;
  424}
  425
  426static int serdes_init_niu_1g_serdes(struct niu *np)
  427{
  428	struct niu_link_config *lp = &np->link_config;
  429	u16 pll_cfg, pll_sts;
  430	int max_retry = 100;
  431	u64 uninitialized_var(sig), mask, val;
  432	u32 tx_cfg, rx_cfg;
  433	unsigned long i;
  434	int err;
  435
  436	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  437		  PLL_TX_CFG_RATE_HALF);
  438	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  439		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  440		  PLL_RX_CFG_RATE_HALF);
  441
  442	if (np->port == 0)
  443		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  444
  445	if (lp->loopback_mode == LOOPBACK_PHY) {
  446		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  447
  448		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  449			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  450
  451		tx_cfg |= PLL_TX_CFG_ENTEST;
  452		rx_cfg |= PLL_RX_CFG_ENTEST;
  453	}
  454
  455	/* Initialize PLL for 1G */
  456	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  457
  458	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  459			 ESR2_TI_PLL_CFG_L, pll_cfg);
  460	if (err) {
  461		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  462			   np->port, __func__);
  463		return err;
  464	}
  465
  466	pll_sts = PLL_CFG_ENPLL;
  467
  468	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  469			 ESR2_TI_PLL_STS_L, pll_sts);
  470	if (err) {
  471		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  472			   np->port, __func__);
  473		return err;
  474	}
  475
  476	udelay(200);
  477
  478	/* Initialize all 4 lanes of the SERDES.  */
  479	for (i = 0; i < 4; i++) {
  480		err = esr2_set_tx_cfg(np, i, tx_cfg);
  481		if (err)
  482			return err;
  483	}
  484
  485	for (i = 0; i < 4; i++) {
  486		err = esr2_set_rx_cfg(np, i, rx_cfg);
  487		if (err)
  488			return err;
  489	}
  490
  491	switch (np->port) {
  492	case 0:
  493		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  494		mask = val;
  495		break;
  496
  497	case 1:
  498		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  499		mask = val;
  500		break;
  501
  502	default:
  503		return -EINVAL;
  504	}
  505
  506	while (max_retry--) {
  507		sig = nr64(ESR_INT_SIGNALS);
  508		if ((sig & mask) == val)
  509			break;
  510
  511		mdelay(500);
  512	}
  513
  514	if ((sig & mask) != val) {
  515		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  516			   np->port, (int)(sig & mask), (int)val);
  517		return -ENODEV;
  518	}
  519
  520	return 0;
  521}
  522
  523static int serdes_init_niu_10g_serdes(struct niu *np)
  524{
  525	struct niu_link_config *lp = &np->link_config;
  526	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  527	int max_retry = 100;
  528	u64 uninitialized_var(sig), mask, val;
  529	unsigned long i;
  530	int err;
  531
  532	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  533	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  534		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  535		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  536
  537	if (lp->loopback_mode == LOOPBACK_PHY) {
  538		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  539
  540		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  541			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  542
  543		tx_cfg |= PLL_TX_CFG_ENTEST;
  544		rx_cfg |= PLL_RX_CFG_ENTEST;
  545	}
  546
  547	/* Initialize PLL for 10G */
  548	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  549
  550	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  551			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  552	if (err) {
  553		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  554			   np->port, __func__);
  555		return err;
  556	}
  557
  558	pll_sts = PLL_CFG_ENPLL;
  559
  560	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  561			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  562	if (err) {
  563		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  564			   np->port, __func__);
  565		return err;
  566	}
  567
  568	udelay(200);
  569
  570	/* Initialize all 4 lanes of the SERDES.  */
  571	for (i = 0; i < 4; i++) {
  572		err = esr2_set_tx_cfg(np, i, tx_cfg);
  573		if (err)
  574			return err;
  575	}
  576
  577	for (i = 0; i < 4; i++) {
  578		err = esr2_set_rx_cfg(np, i, rx_cfg);
  579		if (err)
  580			return err;
  581	}
  582
  583	/* check if serdes is ready */
  584
  585	switch (np->port) {
  586	case 0:
  587		mask = ESR_INT_SIGNALS_P0_BITS;
  588		val = (ESR_INT_SRDY0_P0 |
  589		       ESR_INT_DET0_P0 |
  590		       ESR_INT_XSRDY_P0 |
  591		       ESR_INT_XDP_P0_CH3 |
  592		       ESR_INT_XDP_P0_CH2 |
  593		       ESR_INT_XDP_P0_CH1 |
  594		       ESR_INT_XDP_P0_CH0);
  595		break;
  596
  597	case 1:
  598		mask = ESR_INT_SIGNALS_P1_BITS;
  599		val = (ESR_INT_SRDY0_P1 |
  600		       ESR_INT_DET0_P1 |
  601		       ESR_INT_XSRDY_P1 |
  602		       ESR_INT_XDP_P1_CH3 |
  603		       ESR_INT_XDP_P1_CH2 |
  604		       ESR_INT_XDP_P1_CH1 |
  605		       ESR_INT_XDP_P1_CH0);
  606		break;
  607
  608	default:
  609		return -EINVAL;
  610	}
  611
  612	while (max_retry--) {
  613		sig = nr64(ESR_INT_SIGNALS);
  614		if ((sig & mask) == val)
  615			break;
  616
  617		mdelay(500);
  618	}
  619
  620	if ((sig & mask) != val) {
  621		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  622			np->port, (int)(sig & mask), (int)val);
  623
  624		/* 10G failed, try initializing at 1G */
  625		err = serdes_init_niu_1g_serdes(np);
  626		if (!err) {
  627			np->flags &= ~NIU_FLAGS_10G;
  628			np->mac_xcvr = MAC_XCVR_PCS;
  629		}  else {
  630			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  631				   np->port);
  632			return -ENODEV;
  633		}
  634	}
  635	return 0;
  636}
  637
  638static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  639{
  640	int err;
  641
  642	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  643	if (err >= 0) {
  644		*val = (err & 0xffff);
  645		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  646				ESR_RXTX_CTRL_H(chan));
  647		if (err >= 0)
  648			*val |= ((err & 0xffff) << 16);
  649		err = 0;
  650	}
  651	return err;
  652}
  653
  654static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  655{
  656	int err;
  657
  658	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  659			ESR_GLUE_CTRL0_L(chan));
  660	if (err >= 0) {
  661		*val = (err & 0xffff);
  662		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  663				ESR_GLUE_CTRL0_H(chan));
  664		if (err >= 0) {
  665			*val |= ((err & 0xffff) << 16);
  666			err = 0;
  667		}
  668	}
  669	return err;
  670}
  671
  672static int esr_read_reset(struct niu *np, u32 *val)
  673{
  674	int err;
  675
  676	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  677			ESR_RXTX_RESET_CTRL_L);
  678	if (err >= 0) {
  679		*val = (err & 0xffff);
  680		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  681				ESR_RXTX_RESET_CTRL_H);
  682		if (err >= 0) {
  683			*val |= ((err & 0xffff) << 16);
  684			err = 0;
  685		}
  686	}
  687	return err;
  688}
  689
  690static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  691{
  692	int err;
  693
  694	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  695			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  696	if (!err)
  697		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  698				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  699	return err;
  700}
  701
  702static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  703{
  704	int err;
  705
  706	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  707			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  708	if (!err)
  709		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  710				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  711	return err;
  712}
  713
  714static int esr_reset(struct niu *np)
  715{
  716	u32 uninitialized_var(reset);
  717	int err;
  718
  719	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  720			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  721	if (err)
  722		return err;
  723	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  724			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  725	if (err)
  726		return err;
  727	udelay(200);
  728
  729	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  730			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  731	if (err)
  732		return err;
  733	udelay(200);
  734
  735	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  736			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  737	if (err)
  738		return err;
  739	udelay(200);
  740
  741	err = esr_read_reset(np, &reset);
  742	if (err)
  743		return err;
  744	if (reset != 0) {
  745		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  746			   np->port, reset);
  747		return -ENODEV;
  748	}
  749
  750	return 0;
  751}
  752
  753static int serdes_init_10g(struct niu *np)
  754{
  755	struct niu_link_config *lp = &np->link_config;
  756	unsigned long ctrl_reg, test_cfg_reg, i;
  757	u64 ctrl_val, test_cfg_val, sig, mask, val;
  758	int err;
  759
  760	switch (np->port) {
  761	case 0:
  762		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  763		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  764		break;
  765	case 1:
  766		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  767		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  768		break;
  769
  770	default:
  771		return -EINVAL;
  772	}
  773	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  774		    ENET_SERDES_CTRL_SDET_1 |
  775		    ENET_SERDES_CTRL_SDET_2 |
  776		    ENET_SERDES_CTRL_SDET_3 |
  777		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  780		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  781		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  784		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  785	test_cfg_val = 0;
  786
  787	if (lp->loopback_mode == LOOPBACK_PHY) {
  788		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  789				  ENET_SERDES_TEST_MD_0_SHIFT) |
  790				 (ENET_TEST_MD_PAD_LOOPBACK <<
  791				  ENET_SERDES_TEST_MD_1_SHIFT) |
  792				 (ENET_TEST_MD_PAD_LOOPBACK <<
  793				  ENET_SERDES_TEST_MD_2_SHIFT) |
  794				 (ENET_TEST_MD_PAD_LOOPBACK <<
  795				  ENET_SERDES_TEST_MD_3_SHIFT));
  796	}
  797
  798	nw64(ctrl_reg, ctrl_val);
  799	nw64(test_cfg_reg, test_cfg_val);
  800
  801	/* Initialize all 4 lanes of the SERDES.  */
  802	for (i = 0; i < 4; i++) {
  803		u32 rxtx_ctrl, glue0;
  804
  805		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  806		if (err)
  807			return err;
  808		err = esr_read_glue0(np, i, &glue0);
  809		if (err)
  810			return err;
  811
  812		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  813		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  814			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  815
  816		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  817			   ESR_GLUE_CTRL0_THCNT |
  818			   ESR_GLUE_CTRL0_BLTIME);
  819		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  820			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  821			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  822			  (BLTIME_300_CYCLES <<
  823			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  824
  825		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  826		if (err)
  827			return err;
  828		err = esr_write_glue0(np, i, glue0);
  829		if (err)
  830			return err;
  831	}
  832
  833	err = esr_reset(np);
  834	if (err)
  835		return err;
  836
  837	sig = nr64(ESR_INT_SIGNALS);
  838	switch (np->port) {
  839	case 0:
  840		mask = ESR_INT_SIGNALS_P0_BITS;
  841		val = (ESR_INT_SRDY0_P0 |
  842		       ESR_INT_DET0_P0 |
  843		       ESR_INT_XSRDY_P0 |
  844		       ESR_INT_XDP_P0_CH3 |
  845		       ESR_INT_XDP_P0_CH2 |
  846		       ESR_INT_XDP_P0_CH1 |
  847		       ESR_INT_XDP_P0_CH0);
  848		break;
  849
  850	case 1:
  851		mask = ESR_INT_SIGNALS_P1_BITS;
  852		val = (ESR_INT_SRDY0_P1 |
  853		       ESR_INT_DET0_P1 |
  854		       ESR_INT_XSRDY_P1 |
  855		       ESR_INT_XDP_P1_CH3 |
  856		       ESR_INT_XDP_P1_CH2 |
  857		       ESR_INT_XDP_P1_CH1 |
  858		       ESR_INT_XDP_P1_CH0);
  859		break;
  860
  861	default:
  862		return -EINVAL;
  863	}
  864
  865	if ((sig & mask) != val) {
  866		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  867			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  868			return 0;
  869		}
  870		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  871			   np->port, (int)(sig & mask), (int)val);
  872		return -ENODEV;
  873	}
  874	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  875		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  876	return 0;
  877}
  878
  879static int serdes_init_1g(struct niu *np)
  880{
  881	u64 val;
  882
  883	val = nr64(ENET_SERDES_1_PLL_CFG);
  884	val &= ~ENET_SERDES_PLL_FBDIV2;
  885	switch (np->port) {
  886	case 0:
  887		val |= ENET_SERDES_PLL_HRATE0;
  888		break;
  889	case 1:
  890		val |= ENET_SERDES_PLL_HRATE1;
  891		break;
  892	case 2:
  893		val |= ENET_SERDES_PLL_HRATE2;
  894		break;
  895	case 3:
  896		val |= ENET_SERDES_PLL_HRATE3;
  897		break;
  898	default:
  899		return -EINVAL;
  900	}
  901	nw64(ENET_SERDES_1_PLL_CFG, val);
  902
  903	return 0;
  904}
  905
  906static int serdes_init_1g_serdes(struct niu *np)
  907{
  908	struct niu_link_config *lp = &np->link_config;
  909	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  910	u64 ctrl_val, test_cfg_val, sig, mask, val;
  911	int err;
  912	u64 reset_val, val_rd;
  913
  914	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  915		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  916		ENET_SERDES_PLL_FBDIV0;
  917	switch (np->port) {
  918	case 0:
  919		reset_val =  ENET_SERDES_RESET_0;
  920		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  921		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  922		pll_cfg = ENET_SERDES_0_PLL_CFG;
  923		break;
  924	case 1:
  925		reset_val =  ENET_SERDES_RESET_1;
  926		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  927		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  928		pll_cfg = ENET_SERDES_1_PLL_CFG;
  929		break;
  930
  931	default:
  932		return -EINVAL;
  933	}
  934	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  935		    ENET_SERDES_CTRL_SDET_1 |
  936		    ENET_SERDES_CTRL_SDET_2 |
  937		    ENET_SERDES_CTRL_SDET_3 |
  938		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  941		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  942		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  945		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  946	test_cfg_val = 0;
  947
  948	if (lp->loopback_mode == LOOPBACK_PHY) {
  949		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  950				  ENET_SERDES_TEST_MD_0_SHIFT) |
  951				 (ENET_TEST_MD_PAD_LOOPBACK <<
  952				  ENET_SERDES_TEST_MD_1_SHIFT) |
  953				 (ENET_TEST_MD_PAD_LOOPBACK <<
  954				  ENET_SERDES_TEST_MD_2_SHIFT) |
  955				 (ENET_TEST_MD_PAD_LOOPBACK <<
  956				  ENET_SERDES_TEST_MD_3_SHIFT));
  957	}
  958
  959	nw64(ENET_SERDES_RESET, reset_val);
  960	mdelay(20);
  961	val_rd = nr64(ENET_SERDES_RESET);
  962	val_rd &= ~reset_val;
  963	nw64(pll_cfg, val);
  964	nw64(ctrl_reg, ctrl_val);
  965	nw64(test_cfg_reg, test_cfg_val);
  966	nw64(ENET_SERDES_RESET, val_rd);
  967	mdelay(2000);
  968
  969	/* Initialize all 4 lanes of the SERDES.  */
  970	for (i = 0; i < 4; i++) {
  971		u32 rxtx_ctrl, glue0;
  972
  973		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  974		if (err)
  975			return err;
  976		err = esr_read_glue0(np, i, &glue0);
  977		if (err)
  978			return err;
  979
  980		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  981		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  982			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  983
  984		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  985			   ESR_GLUE_CTRL0_THCNT |
  986			   ESR_GLUE_CTRL0_BLTIME);
  987		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  988			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  989			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  990			  (BLTIME_300_CYCLES <<
  991			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  992
  993		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  994		if (err)
  995			return err;
  996		err = esr_write_glue0(np, i, glue0);
  997		if (err)
  998			return err;
  999	}
 1000
 1001
 1002	sig = nr64(ESR_INT_SIGNALS);
 1003	switch (np->port) {
 1004	case 0:
 1005		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1006		mask = val;
 1007		break;
 1008
 1009	case 1:
 1010		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1011		mask = val;
 1012		break;
 1013
 1014	default:
 1015		return -EINVAL;
 1016	}
 1017
 1018	if ((sig & mask) != val) {
 1019		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1020			   np->port, (int)(sig & mask), (int)val);
 1021		return -ENODEV;
 1022	}
 1023
 1024	return 0;
 1025}
 1026
 1027static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1028{
 1029	struct niu_link_config *lp = &np->link_config;
 1030	int link_up;
 1031	u64 val;
 1032	u16 current_speed;
 1033	unsigned long flags;
 1034	u8 current_duplex;
 1035
 1036	link_up = 0;
 1037	current_speed = SPEED_INVALID;
 1038	current_duplex = DUPLEX_INVALID;
 1039
 1040	spin_lock_irqsave(&np->lock, flags);
 1041
 1042	val = nr64_pcs(PCS_MII_STAT);
 1043
 1044	if (val & PCS_MII_STAT_LINK_STATUS) {
 1045		link_up = 1;
 1046		current_speed = SPEED_1000;
 1047		current_duplex = DUPLEX_FULL;
 1048	}
 1049
 1050	lp->active_speed = current_speed;
 1051	lp->active_duplex = current_duplex;
 1052	spin_unlock_irqrestore(&np->lock, flags);
 1053
 1054	*link_up_p = link_up;
 1055	return 0;
 1056}
 1057
 1058static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1059{
 1060	unsigned long flags;
 1061	struct niu_link_config *lp = &np->link_config;
 1062	int link_up = 0;
 1063	int link_ok = 1;
 1064	u64 val, val2;
 1065	u16 current_speed;
 1066	u8 current_duplex;
 1067
 1068	if (!(np->flags & NIU_FLAGS_10G))
 1069		return link_status_1g_serdes(np, link_up_p);
 1070
 1071	current_speed = SPEED_INVALID;
 1072	current_duplex = DUPLEX_INVALID;
 1073	spin_lock_irqsave(&np->lock, flags);
 1074
 1075	val = nr64_xpcs(XPCS_STATUS(0));
 1076	val2 = nr64_mac(XMAC_INTER2);
 1077	if (val2 & 0x01000000)
 1078		link_ok = 0;
 1079
 1080	if ((val & 0x1000ULL) && link_ok) {
 1081		link_up = 1;
 1082		current_speed = SPEED_10000;
 1083		current_duplex = DUPLEX_FULL;
 1084	}
 1085	lp->active_speed = current_speed;
 1086	lp->active_duplex = current_duplex;
 1087	spin_unlock_irqrestore(&np->lock, flags);
 1088	*link_up_p = link_up;
 1089	return 0;
 1090}
 1091
 1092static int link_status_mii(struct niu *np, int *link_up_p)
 1093{
 1094	struct niu_link_config *lp = &np->link_config;
 1095	int err;
 1096	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1097	int supported, advertising, active_speed, active_duplex;
 1098
 1099	err = mii_read(np, np->phy_addr, MII_BMCR);
 1100	if (unlikely(err < 0))
 1101		return err;
 1102	bmcr = err;
 1103
 1104	err = mii_read(np, np->phy_addr, MII_BMSR);
 1105	if (unlikely(err < 0))
 1106		return err;
 1107	bmsr = err;
 1108
 1109	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1110	if (unlikely(err < 0))
 1111		return err;
 1112	advert = err;
 1113
 1114	err = mii_read(np, np->phy_addr, MII_LPA);
 1115	if (unlikely(err < 0))
 1116		return err;
 1117	lpa = err;
 1118
 1119	if (likely(bmsr & BMSR_ESTATEN)) {
 1120		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1121		if (unlikely(err < 0))
 1122			return err;
 1123		estatus = err;
 1124
 1125		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1126		if (unlikely(err < 0))
 1127			return err;
 1128		ctrl1000 = err;
 1129
 1130		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1131		if (unlikely(err < 0))
 1132			return err;
 1133		stat1000 = err;
 1134	} else
 1135		estatus = ctrl1000 = stat1000 = 0;
 1136
 1137	supported = 0;
 1138	if (bmsr & BMSR_ANEGCAPABLE)
 1139		supported |= SUPPORTED_Autoneg;
 1140	if (bmsr & BMSR_10HALF)
 1141		supported |= SUPPORTED_10baseT_Half;
 1142	if (bmsr & BMSR_10FULL)
 1143		supported |= SUPPORTED_10baseT_Full;
 1144	if (bmsr & BMSR_100HALF)
 1145		supported |= SUPPORTED_100baseT_Half;
 1146	if (bmsr & BMSR_100FULL)
 1147		supported |= SUPPORTED_100baseT_Full;
 1148	if (estatus & ESTATUS_1000_THALF)
 1149		supported |= SUPPORTED_1000baseT_Half;
 1150	if (estatus & ESTATUS_1000_TFULL)
 1151		supported |= SUPPORTED_1000baseT_Full;
 1152	lp->supported = supported;
 1153
 1154	advertising = mii_adv_to_ethtool_adv_t(advert);
 1155	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 1156
 1157	if (bmcr & BMCR_ANENABLE) {
 1158		int neg, neg1000;
 1159
 1160		lp->active_autoneg = 1;
 1161		advertising |= ADVERTISED_Autoneg;
 1162
 1163		neg = advert & lpa;
 1164		neg1000 = (ctrl1000 << 2) & stat1000;
 1165
 1166		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1167			active_speed = SPEED_1000;
 1168		else if (neg & LPA_100)
 1169			active_speed = SPEED_100;
 1170		else if (neg & (LPA_10HALF | LPA_10FULL))
 1171			active_speed = SPEED_10;
 1172		else
 1173			active_speed = SPEED_INVALID;
 1174
 1175		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1176			active_duplex = DUPLEX_FULL;
 1177		else if (active_speed != SPEED_INVALID)
 1178			active_duplex = DUPLEX_HALF;
 1179		else
 1180			active_duplex = DUPLEX_INVALID;
 1181	} else {
 1182		lp->active_autoneg = 0;
 1183
 1184		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1185			active_speed = SPEED_1000;
 1186		else if (bmcr & BMCR_SPEED100)
 1187			active_speed = SPEED_100;
 1188		else
 1189			active_speed = SPEED_10;
 1190
 1191		if (bmcr & BMCR_FULLDPLX)
 1192			active_duplex = DUPLEX_FULL;
 1193		else
 1194			active_duplex = DUPLEX_HALF;
 1195	}
 1196
 1197	lp->active_advertising = advertising;
 1198	lp->active_speed = active_speed;
 1199	lp->active_duplex = active_duplex;
 1200	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1201
 1202	return 0;
 1203}
 1204
 1205static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1206{
 1207	struct niu_link_config *lp = &np->link_config;
 1208	u16 current_speed, bmsr;
 1209	unsigned long flags;
 1210	u8 current_duplex;
 1211	int err, link_up;
 1212
 1213	link_up = 0;
 1214	current_speed = SPEED_INVALID;
 1215	current_duplex = DUPLEX_INVALID;
 1216
 1217	spin_lock_irqsave(&np->lock, flags);
 1218
 1219	err = -EINVAL;
 1220
 1221	err = mii_read(np, np->phy_addr, MII_BMSR);
 1222	if (err < 0)
 1223		goto out;
 1224
 1225	bmsr = err;
 1226	if (bmsr & BMSR_LSTATUS) {
 1227		u16 adv, lpa;
 1228
 1229		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1230		if (err < 0)
 1231			goto out;
 1232		adv = err;
 1233
 1234		err = mii_read(np, np->phy_addr, MII_LPA);
 1235		if (err < 0)
 1236			goto out;
 1237		lpa = err;
 1238
 1239		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1240		if (err < 0)
 1241			goto out;
 1242		link_up = 1;
 1243		current_speed = SPEED_1000;
 1244		current_duplex = DUPLEX_FULL;
 1245
 1246	}
 1247	lp->active_speed = current_speed;
 1248	lp->active_duplex = current_duplex;
 1249	err = 0;
 1250
 1251out:
 1252	spin_unlock_irqrestore(&np->lock, flags);
 1253
 1254	*link_up_p = link_up;
 1255	return err;
 1256}
 1257
 1258static int link_status_1g(struct niu *np, int *link_up_p)
 1259{
 1260	struct niu_link_config *lp = &np->link_config;
 1261	unsigned long flags;
 1262	int err;
 1263
 1264	spin_lock_irqsave(&np->lock, flags);
 1265
 1266	err = link_status_mii(np, link_up_p);
 1267	lp->supported |= SUPPORTED_TP;
 1268	lp->active_advertising |= ADVERTISED_TP;
 1269
 1270	spin_unlock_irqrestore(&np->lock, flags);
 1271	return err;
 1272}
 1273
 1274static int bcm8704_reset(struct niu *np)
 1275{
 1276	int err, limit;
 1277
 1278	err = mdio_read(np, np->phy_addr,
 1279			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1280	if (err < 0 || err == 0xffff)
 1281		return err;
 1282	err |= BMCR_RESET;
 1283	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1284			 MII_BMCR, err);
 1285	if (err)
 1286		return err;
 1287
 1288	limit = 1000;
 1289	while (--limit >= 0) {
 1290		err = mdio_read(np, np->phy_addr,
 1291				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1292		if (err < 0)
 1293			return err;
 1294		if (!(err & BMCR_RESET))
 1295			break;
 1296	}
 1297	if (limit < 0) {
 1298		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1299			   np->port, (err & 0xffff));
 1300		return -ENODEV;
 1301	}
 1302	return 0;
 1303}
 1304
 1305/* When written, certain PHY registers need to be read back twice
 1306 * in order for the bits to settle properly.
 1307 */
 1308static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1309{
 1310	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1311	if (err < 0)
 1312		return err;
 1313	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1314	if (err < 0)
 1315		return err;
 1316	return 0;
 1317}
 1318
 1319static int bcm8706_init_user_dev3(struct niu *np)
 1320{
 1321	int err;
 1322
 1323
 1324	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1325			BCM8704_USER_OPT_DIGITAL_CTRL);
 1326	if (err < 0)
 1327		return err;
 1328	err &= ~USER_ODIG_CTRL_GPIOS;
 1329	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1330	err |=  USER_ODIG_CTRL_RESV2;
 1331	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1332			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1333	if (err)
 1334		return err;
 1335
 1336	mdelay(1000);
 1337
 1338	return 0;
 1339}
 1340
 1341static int bcm8704_init_user_dev3(struct niu *np)
 1342{
 1343	int err;
 1344
 1345	err = mdio_write(np, np->phy_addr,
 1346			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1347			 (USER_CONTROL_OPTXRST_LVL |
 1348			  USER_CONTROL_OPBIASFLT_LVL |
 1349			  USER_CONTROL_OBTMPFLT_LVL |
 1350			  USER_CONTROL_OPPRFLT_LVL |
 1351			  USER_CONTROL_OPTXFLT_LVL |
 1352			  USER_CONTROL_OPRXLOS_LVL |
 1353			  USER_CONTROL_OPRXFLT_LVL |
 1354			  USER_CONTROL_OPTXON_LVL |
 1355			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1356	if (err)
 1357		return err;
 1358
 1359	err = mdio_write(np, np->phy_addr,
 1360			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1361			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1362			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1363			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1364			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1365	if (err)
 1366		return err;
 1367
 1368	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1369	if (err)
 1370		return err;
 1371	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1372	if (err)
 1373		return err;
 1374
 1375	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1376			BCM8704_USER_OPT_DIGITAL_CTRL);
 1377	if (err < 0)
 1378		return err;
 1379	err &= ~USER_ODIG_CTRL_GPIOS;
 1380	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1381	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1382			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1383	if (err)
 1384		return err;
 1385
 1386	mdelay(1000);
 1387
 1388	return 0;
 1389}
 1390
 1391static int mrvl88x2011_act_led(struct niu *np, int val)
 1392{
 1393	int	err;
 1394
 1395	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1396		MRVL88X2011_LED_8_TO_11_CTL);
 1397	if (err < 0)
 1398		return err;
 1399
 1400	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1401	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1402
 1403	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1404			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1405}
 1406
 1407static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1408{
 1409	int	err;
 1410
 1411	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1412			MRVL88X2011_LED_BLINK_CTL);
 1413	if (err >= 0) {
 1414		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1415		err |= (rate << 4);
 1416
 1417		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1418				 MRVL88X2011_LED_BLINK_CTL, err);
 1419	}
 1420
 1421	return err;
 1422}
 1423
 1424static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1425{
 1426	int	err;
 1427
 1428	/* Set LED functions */
 1429	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1430	if (err)
 1431		return err;
 1432
 1433	/* led activity */
 1434	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1435	if (err)
 1436		return err;
 1437
 1438	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1439			MRVL88X2011_GENERAL_CTL);
 1440	if (err < 0)
 1441		return err;
 1442
 1443	err |= MRVL88X2011_ENA_XFPREFCLK;
 1444
 1445	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1446			 MRVL88X2011_GENERAL_CTL, err);
 1447	if (err < 0)
 1448		return err;
 1449
 1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1451			MRVL88X2011_PMA_PMD_CTL_1);
 1452	if (err < 0)
 1453		return err;
 1454
 1455	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1456		err |= MRVL88X2011_LOOPBACK;
 1457	else
 1458		err &= ~MRVL88X2011_LOOPBACK;
 1459
 1460	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1461			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1462	if (err < 0)
 1463		return err;
 1464
 1465	/* Enable PMD  */
 1466	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1467			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1468}
 1469
 1470
 1471static int xcvr_diag_bcm870x(struct niu *np)
 1472{
 1473	u16 analog_stat0, tx_alarm_status;
 1474	int err = 0;
 1475
 1476#if 1
 1477	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1478			MII_STAT1000);
 1479	if (err < 0)
 1480		return err;
 1481	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1482
 1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1484	if (err < 0)
 1485		return err;
 1486	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1487
 1488	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1489			MII_NWAYTEST);
 1490	if (err < 0)
 1491		return err;
 1492	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1493#endif
 1494
 1495	/* XXX dig this out it might not be so useful XXX */
 1496	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1497			BCM8704_USER_ANALOG_STATUS0);
 1498	if (err < 0)
 1499		return err;
 1500	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1501			BCM8704_USER_ANALOG_STATUS0);
 1502	if (err < 0)
 1503		return err;
 1504	analog_stat0 = err;
 1505
 1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1507			BCM8704_USER_TX_ALARM_STATUS);
 1508	if (err < 0)
 1509		return err;
 1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1511			BCM8704_USER_TX_ALARM_STATUS);
 1512	if (err < 0)
 1513		return err;
 1514	tx_alarm_status = err;
 1515
 1516	if (analog_stat0 != 0x03fc) {
 1517		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1518			pr_info("Port %u cable not connected or bad cable\n",
 1519				np->port);
 1520		} else if (analog_stat0 == 0x639c) {
 1521			pr_info("Port %u optical module is bad or missing\n",
 1522				np->port);
 1523		}
 1524	}
 1525
 1526	return 0;
 1527}
 1528
 1529static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1530{
 1531	struct niu_link_config *lp = &np->link_config;
 1532	int err;
 1533
 1534	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1535			MII_BMCR);
 1536	if (err < 0)
 1537		return err;
 1538
 1539	err &= ~BMCR_LOOPBACK;
 1540
 1541	if (lp->loopback_mode == LOOPBACK_MAC)
 1542		err |= BMCR_LOOPBACK;
 1543
 1544	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1545			 MII_BMCR, err);
 1546	if (err)
 1547		return err;
 1548
 1549	return 0;
 1550}
 1551
 1552static int xcvr_init_10g_bcm8706(struct niu *np)
 1553{
 1554	int err = 0;
 1555	u64 val;
 1556
 1557	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1558	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1559			return err;
 1560
 1561	val = nr64_mac(XMAC_CONFIG);
 1562	val &= ~XMAC_CONFIG_LED_POLARITY;
 1563	val |= XMAC_CONFIG_FORCE_LED_ON;
 1564	nw64_mac(XMAC_CONFIG, val);
 1565
 1566	val = nr64(MIF_CONFIG);
 1567	val |= MIF_CONFIG_INDIRECT_MODE;
 1568	nw64(MIF_CONFIG, val);
 1569
 1570	err = bcm8704_reset(np);
 1571	if (err)
 1572		return err;
 1573
 1574	err = xcvr_10g_set_lb_bcm870x(np);
 1575	if (err)
 1576		return err;
 1577
 1578	err = bcm8706_init_user_dev3(np);
 1579	if (err)
 1580		return err;
 1581
 1582	err = xcvr_diag_bcm870x(np);
 1583	if (err)
 1584		return err;
 1585
 1586	return 0;
 1587}
 1588
 1589static int xcvr_init_10g_bcm8704(struct niu *np)
 1590{
 1591	int err;
 1592
 1593	err = bcm8704_reset(np);
 1594	if (err)
 1595		return err;
 1596
 1597	err = bcm8704_init_user_dev3(np);
 1598	if (err)
 1599		return err;
 1600
 1601	err = xcvr_10g_set_lb_bcm870x(np);
 1602	if (err)
 1603		return err;
 1604
 1605	err =  xcvr_diag_bcm870x(np);
 1606	if (err)
 1607		return err;
 1608
 1609	return 0;
 1610}
 1611
 1612static int xcvr_init_10g(struct niu *np)
 1613{
 1614	int phy_id, err;
 1615	u64 val;
 1616
 1617	val = nr64_mac(XMAC_CONFIG);
 1618	val &= ~XMAC_CONFIG_LED_POLARITY;
 1619	val |= XMAC_CONFIG_FORCE_LED_ON;
 1620	nw64_mac(XMAC_CONFIG, val);
 1621
 1622	/* XXX shared resource, lock parent XXX */
 1623	val = nr64(MIF_CONFIG);
 1624	val |= MIF_CONFIG_INDIRECT_MODE;
 1625	nw64(MIF_CONFIG, val);
 1626
 1627	phy_id = phy_decode(np->parent->port_phy, np->port);
 1628	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1629
 1630	/* handle different phy types */
 1631	switch (phy_id & NIU_PHY_ID_MASK) {
 1632	case NIU_PHY_ID_MRVL88X2011:
 1633		err = xcvr_init_10g_mrvl88x2011(np);
 1634		break;
 1635
 1636	default: /* bcom 8704 */
 1637		err = xcvr_init_10g_bcm8704(np);
 1638		break;
 1639	}
 1640
 1641	return err;
 1642}
 1643
 1644static int mii_reset(struct niu *np)
 1645{
 1646	int limit, err;
 1647
 1648	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1649	if (err)
 1650		return err;
 1651
 1652	limit = 1000;
 1653	while (--limit >= 0) {
 1654		udelay(500);
 1655		err = mii_read(np, np->phy_addr, MII_BMCR);
 1656		if (err < 0)
 1657			return err;
 1658		if (!(err & BMCR_RESET))
 1659			break;
 1660	}
 1661	if (limit < 0) {
 1662		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1663			   np->port, err);
 1664		return -ENODEV;
 1665	}
 1666
 1667	return 0;
 1668}
 1669
 1670static int xcvr_init_1g_rgmii(struct niu *np)
 1671{
 1672	int err;
 1673	u64 val;
 1674	u16 bmcr, bmsr, estat;
 1675
 1676	val = nr64(MIF_CONFIG);
 1677	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1678	nw64(MIF_CONFIG, val);
 1679
 1680	err = mii_reset(np);
 1681	if (err)
 1682		return err;
 1683
 1684	err = mii_read(np, np->phy_addr, MII_BMSR);
 1685	if (err < 0)
 1686		return err;
 1687	bmsr = err;
 1688
 1689	estat = 0;
 1690	if (bmsr & BMSR_ESTATEN) {
 1691		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1692		if (err < 0)
 1693			return err;
 1694		estat = err;
 1695	}
 1696
 1697	bmcr = 0;
 1698	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1699	if (err)
 1700		return err;
 1701
 1702	if (bmsr & BMSR_ESTATEN) {
 1703		u16 ctrl1000 = 0;
 1704
 1705		if (estat & ESTATUS_1000_TFULL)
 1706			ctrl1000 |= ADVERTISE_1000FULL;
 1707		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1708		if (err)
 1709			return err;
 1710	}
 1711
 1712	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1713
 1714	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1715	if (err)
 1716		return err;
 1717
 1718	err = mii_read(np, np->phy_addr, MII_BMCR);
 1719	if (err < 0)
 1720		return err;
 1721	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1722
 1723	err = mii_read(np, np->phy_addr, MII_BMSR);
 1724	if (err < 0)
 1725		return err;
 1726
 1727	return 0;
 1728}
 1729
 1730static int mii_init_common(struct niu *np)
 1731{
 1732	struct niu_link_config *lp = &np->link_config;
 1733	u16 bmcr, bmsr, adv, estat;
 1734	int err;
 1735
 1736	err = mii_reset(np);
 1737	if (err)
 1738		return err;
 1739
 1740	err = mii_read(np, np->phy_addr, MII_BMSR);
 1741	if (err < 0)
 1742		return err;
 1743	bmsr = err;
 1744
 1745	estat = 0;
 1746	if (bmsr & BMSR_ESTATEN) {
 1747		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1748		if (err < 0)
 1749			return err;
 1750		estat = err;
 1751	}
 1752
 1753	bmcr = 0;
 1754	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1755	if (err)
 1756		return err;
 1757
 1758	if (lp->loopback_mode == LOOPBACK_MAC) {
 1759		bmcr |= BMCR_LOOPBACK;
 1760		if (lp->active_speed == SPEED_1000)
 1761			bmcr |= BMCR_SPEED1000;
 1762		if (lp->active_duplex == DUPLEX_FULL)
 1763			bmcr |= BMCR_FULLDPLX;
 1764	}
 1765
 1766	if (lp->loopback_mode == LOOPBACK_PHY) {
 1767		u16 aux;
 1768
 1769		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1770		       BCM5464R_AUX_CTL_WRITE_1);
 1771		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1772		if (err)
 1773			return err;
 1774	}
 1775
 1776	if (lp->autoneg) {
 1777		u16 ctrl1000;
 1778
 1779		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1780		if ((bmsr & BMSR_10HALF) &&
 1781			(lp->advertising & ADVERTISED_10baseT_Half))
 1782			adv |= ADVERTISE_10HALF;
 1783		if ((bmsr & BMSR_10FULL) &&
 1784			(lp->advertising & ADVERTISED_10baseT_Full))
 1785			adv |= ADVERTISE_10FULL;
 1786		if ((bmsr & BMSR_100HALF) &&
 1787			(lp->advertising & ADVERTISED_100baseT_Half))
 1788			adv |= ADVERTISE_100HALF;
 1789		if ((bmsr & BMSR_100FULL) &&
 1790			(lp->advertising & ADVERTISED_100baseT_Full))
 1791			adv |= ADVERTISE_100FULL;
 1792		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1793		if (err)
 1794			return err;
 1795
 1796		if (likely(bmsr & BMSR_ESTATEN)) {
 1797			ctrl1000 = 0;
 1798			if ((estat & ESTATUS_1000_THALF) &&
 1799				(lp->advertising & ADVERTISED_1000baseT_Half))
 1800				ctrl1000 |= ADVERTISE_1000HALF;
 1801			if ((estat & ESTATUS_1000_TFULL) &&
 1802				(lp->advertising & ADVERTISED_1000baseT_Full))
 1803				ctrl1000 |= ADVERTISE_1000FULL;
 1804			err = mii_write(np, np->phy_addr,
 1805					MII_CTRL1000, ctrl1000);
 1806			if (err)
 1807				return err;
 1808		}
 1809
 1810		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1811	} else {
 1812		/* !lp->autoneg */
 1813		int fulldpx;
 1814
 1815		if (lp->duplex == DUPLEX_FULL) {
 1816			bmcr |= BMCR_FULLDPLX;
 1817			fulldpx = 1;
 1818		} else if (lp->duplex == DUPLEX_HALF)
 1819			fulldpx = 0;
 1820		else
 1821			return -EINVAL;
 1822
 1823		if (lp->speed == SPEED_1000) {
 1824			/* if X-full requested while not supported, or
 1825			   X-half requested while not supported... */
 1826			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1827				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1828				return -EINVAL;
 1829			bmcr |= BMCR_SPEED1000;
 1830		} else if (lp->speed == SPEED_100) {
 1831			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1832				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1833				return -EINVAL;
 1834			bmcr |= BMCR_SPEED100;
 1835		} else if (lp->speed == SPEED_10) {
 1836			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1837				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1838				return -EINVAL;
 1839		} else
 1840			return -EINVAL;
 1841	}
 1842
 1843	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1844	if (err)
 1845		return err;
 1846
 1847#if 0
 1848	err = mii_read(np, np->phy_addr, MII_BMCR);
 1849	if (err < 0)
 1850		return err;
 1851	bmcr = err;
 1852
 1853	err = mii_read(np, np->phy_addr, MII_BMSR);
 1854	if (err < 0)
 1855		return err;
 1856	bmsr = err;
 1857
 1858	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1859		np->port, bmcr, bmsr);
 1860#endif
 1861
 1862	return 0;
 1863}
 1864
 1865static int xcvr_init_1g(struct niu *np)
 1866{
 1867	u64 val;
 1868
 1869	/* XXX shared resource, lock parent XXX */
 1870	val = nr64(MIF_CONFIG);
 1871	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1872	nw64(MIF_CONFIG, val);
 1873
 1874	return mii_init_common(np);
 1875}
 1876
 1877static int niu_xcvr_init(struct niu *np)
 1878{
 1879	const struct niu_phy_ops *ops = np->phy_ops;
 1880	int err;
 1881
 1882	err = 0;
 1883	if (ops->xcvr_init)
 1884		err = ops->xcvr_init(np);
 1885
 1886	return err;
 1887}
 1888
 1889static int niu_serdes_init(struct niu *np)
 1890{
 1891	const struct niu_phy_ops *ops = np->phy_ops;
 1892	int err;
 1893
 1894	err = 0;
 1895	if (ops->serdes_init)
 1896		err = ops->serdes_init(np);
 1897
 1898	return err;
 1899}
 1900
 1901static void niu_init_xif(struct niu *);
 1902static void niu_handle_led(struct niu *, int status);
 1903
 1904static int niu_link_status_common(struct niu *np, int link_up)
 1905{
 1906	struct niu_link_config *lp = &np->link_config;
 1907	struct net_device *dev = np->dev;
 1908	unsigned long flags;
 1909
 1910	if (!netif_carrier_ok(dev) && link_up) {
 1911		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1912			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1913			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1914			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1915			   "10Mbit/sec",
 1916			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1917
 1918		spin_lock_irqsave(&np->lock, flags);
 1919		niu_init_xif(np);
 1920		niu_handle_led(np, 1);
 1921		spin_unlock_irqrestore(&np->lock, flags);
 1922
 1923		netif_carrier_on(dev);
 1924	} else if (netif_carrier_ok(dev) && !link_up) {
 1925		netif_warn(np, link, dev, "Link is down\n");
 1926		spin_lock_irqsave(&np->lock, flags);
 1927		niu_handle_led(np, 0);
 1928		spin_unlock_irqrestore(&np->lock, flags);
 1929		netif_carrier_off(dev);
 1930	}
 1931
 1932	return 0;
 1933}
 1934
 1935static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1936{
 1937	int err, link_up, pma_status, pcs_status;
 1938
 1939	link_up = 0;
 1940
 1941	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1942			MRVL88X2011_10G_PMD_STATUS_2);
 1943	if (err < 0)
 1944		goto out;
 1945
 1946	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1947	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1948			MRVL88X2011_PMA_PMD_STATUS_1);
 1949	if (err < 0)
 1950		goto out;
 1951
 1952	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1953
 1954        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1955	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1956			MRVL88X2011_PMA_PMD_STATUS_1);
 1957	if (err < 0)
 1958		goto out;
 1959
 1960	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1961			MRVL88X2011_PMA_PMD_STATUS_1);
 1962	if (err < 0)
 1963		goto out;
 1964
 1965	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1966
 1967        /* Check XGXS Register : 4.0018.[0-3,12] */
 1968	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1969			MRVL88X2011_10G_XGXS_LANE_STAT);
 1970	if (err < 0)
 1971		goto out;
 1972
 1973	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1974		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1975		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1976		    0x800))
 1977		link_up = (pma_status && pcs_status) ? 1 : 0;
 1978
 1979	np->link_config.active_speed = SPEED_10000;
 1980	np->link_config.active_duplex = DUPLEX_FULL;
 1981	err = 0;
 1982out:
 1983	mrvl88x2011_act_led(np, (link_up ?
 1984				 MRVL88X2011_LED_CTL_PCS_ACT :
 1985				 MRVL88X2011_LED_CTL_OFF));
 1986
 1987	*link_up_p = link_up;
 1988	return err;
 1989}
 1990
 1991static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 1992{
 1993	int err, link_up;
 1994	link_up = 0;
 1995
 1996	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1997			BCM8704_PMD_RCV_SIGDET);
 1998	if (err < 0 || err == 0xffff)
 1999		goto out;
 2000	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2001		err = 0;
 2002		goto out;
 2003	}
 2004
 2005	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2006			BCM8704_PCS_10G_R_STATUS);
 2007	if (err < 0)
 2008		goto out;
 2009
 2010	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2011		err = 0;
 2012		goto out;
 2013	}
 2014
 2015	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2016			BCM8704_PHYXS_XGXS_LANE_STAT);
 2017	if (err < 0)
 2018		goto out;
 2019	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2020		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2021		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2022		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2023		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2024		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2025		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2026		err = 0;
 2027		np->link_config.active_speed = SPEED_INVALID;
 2028		np->link_config.active_duplex = DUPLEX_INVALID;
 2029		goto out;
 2030	}
 2031
 2032	link_up = 1;
 2033	np->link_config.active_speed = SPEED_10000;
 2034	np->link_config.active_duplex = DUPLEX_FULL;
 2035	err = 0;
 2036
 2037out:
 2038	*link_up_p = link_up;
 2039	return err;
 2040}
 2041
 2042static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2043{
 2044	int err, link_up;
 2045
 2046	link_up = 0;
 2047
 2048	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2049			BCM8704_PMD_RCV_SIGDET);
 2050	if (err < 0)
 2051		goto out;
 2052	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2053		err = 0;
 2054		goto out;
 2055	}
 2056
 2057	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2058			BCM8704_PCS_10G_R_STATUS);
 2059	if (err < 0)
 2060		goto out;
 2061	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2062		err = 0;
 2063		goto out;
 2064	}
 2065
 2066	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2067			BCM8704_PHYXS_XGXS_LANE_STAT);
 2068	if (err < 0)
 2069		goto out;
 2070
 2071	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2072		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2073		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2074		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2075		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2076		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2077		err = 0;
 2078		goto out;
 2079	}
 2080
 2081	link_up = 1;
 2082	np->link_config.active_speed = SPEED_10000;
 2083	np->link_config.active_duplex = DUPLEX_FULL;
 2084	err = 0;
 2085
 2086out:
 2087	*link_up_p = link_up;
 2088	return err;
 2089}
 2090
 2091static int link_status_10g(struct niu *np, int *link_up_p)
 2092{
 2093	unsigned long flags;
 2094	int err = -EINVAL;
 2095
 2096	spin_lock_irqsave(&np->lock, flags);
 2097
 2098	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2099		int phy_id;
 2100
 2101		phy_id = phy_decode(np->parent->port_phy, np->port);
 2102		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2103
 2104		/* handle different phy types */
 2105		switch (phy_id & NIU_PHY_ID_MASK) {
 2106		case NIU_PHY_ID_MRVL88X2011:
 2107			err = link_status_10g_mrvl(np, link_up_p);
 2108			break;
 2109
 2110		default: /* bcom 8704 */
 2111			err = link_status_10g_bcom(np, link_up_p);
 2112			break;
 2113		}
 2114	}
 2115
 2116	spin_unlock_irqrestore(&np->lock, flags);
 2117
 2118	return err;
 2119}
 2120
 2121static int niu_10g_phy_present(struct niu *np)
 2122{
 2123	u64 sig, mask, val;
 2124
 2125	sig = nr64(ESR_INT_SIGNALS);
 2126	switch (np->port) {
 2127	case 0:
 2128		mask = ESR_INT_SIGNALS_P0_BITS;
 2129		val = (ESR_INT_SRDY0_P0 |
 2130		       ESR_INT_DET0_P0 |
 2131		       ESR_INT_XSRDY_P0 |
 2132		       ESR_INT_XDP_P0_CH3 |
 2133		       ESR_INT_XDP_P0_CH2 |
 2134		       ESR_INT_XDP_P0_CH1 |
 2135		       ESR_INT_XDP_P0_CH0);
 2136		break;
 2137
 2138	case 1:
 2139		mask = ESR_INT_SIGNALS_P1_BITS;
 2140		val = (ESR_INT_SRDY0_P1 |
 2141		       ESR_INT_DET0_P1 |
 2142		       ESR_INT_XSRDY_P1 |
 2143		       ESR_INT_XDP_P1_CH3 |
 2144		       ESR_INT_XDP_P1_CH2 |
 2145		       ESR_INT_XDP_P1_CH1 |
 2146		       ESR_INT_XDP_P1_CH0);
 2147		break;
 2148
 2149	default:
 2150		return 0;
 2151	}
 2152
 2153	if ((sig & mask) != val)
 2154		return 0;
 2155	return 1;
 2156}
 2157
 2158static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2159{
 2160	unsigned long flags;
 2161	int err = 0;
 2162	int phy_present;
 2163	int phy_present_prev;
 2164
 2165	spin_lock_irqsave(&np->lock, flags);
 2166
 2167	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2168		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2169			1 : 0;
 2170		phy_present = niu_10g_phy_present(np);
 2171		if (phy_present != phy_present_prev) {
 2172			/* state change */
 2173			if (phy_present) {
 2174				/* A NEM was just plugged in */
 2175				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2176				if (np->phy_ops->xcvr_init)
 2177					err = np->phy_ops->xcvr_init(np);
 2178				if (err) {
 2179					err = mdio_read(np, np->phy_addr,
 2180						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2181					if (err == 0xffff) {
 2182						/* No mdio, back-to-back XAUI */
 2183						goto out;
 2184					}
 2185					/* debounce */
 2186					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2187				}
 2188			} else {
 2189				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2190				*link_up_p = 0;
 2191				netif_warn(np, link, np->dev,
 2192					   "Hotplug PHY Removed\n");
 2193			}
 2194		}
 2195out:
 2196		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2197			err = link_status_10g_bcm8706(np, link_up_p);
 2198			if (err == 0xffff) {
 2199				/* No mdio, back-to-back XAUI: it is C10NEM */
 2200				*link_up_p = 1;
 2201				np->link_config.active_speed = SPEED_10000;
 2202				np->link_config.active_duplex = DUPLEX_FULL;
 2203			}
 2204		}
 2205	}
 2206
 2207	spin_unlock_irqrestore(&np->lock, flags);
 2208
 2209	return 0;
 2210}
 2211
 2212static int niu_link_status(struct niu *np, int *link_up_p)
 2213{
 2214	const struct niu_phy_ops *ops = np->phy_ops;
 2215	int err;
 2216
 2217	err = 0;
 2218	if (ops->link_status)
 2219		err = ops->link_status(np, link_up_p);
 2220
 2221	return err;
 2222}
 2223
 2224static void niu_timer(unsigned long __opaque)
 2225{
 2226	struct niu *np = (struct niu *) __opaque;
 2227	unsigned long off;
 2228	int err, link_up;
 2229
 2230	err = niu_link_status(np, &link_up);
 2231	if (!err)
 2232		niu_link_status_common(np, link_up);
 2233
 2234	if (netif_carrier_ok(np->dev))
 2235		off = 5 * HZ;
 2236	else
 2237		off = 1 * HZ;
 2238	np->timer.expires = jiffies + off;
 2239
 2240	add_timer(&np->timer);
 2241}
 2242
 2243static const struct niu_phy_ops phy_ops_10g_serdes = {
 2244	.serdes_init		= serdes_init_10g_serdes,
 2245	.link_status		= link_status_10g_serdes,
 2246};
 2247
 2248static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2249	.serdes_init		= serdes_init_niu_10g_serdes,
 2250	.link_status		= link_status_10g_serdes,
 2251};
 2252
 2253static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2254	.serdes_init		= serdes_init_niu_1g_serdes,
 2255	.link_status		= link_status_1g_serdes,
 2256};
 2257
 2258static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2259	.xcvr_init		= xcvr_init_1g_rgmii,
 2260	.link_status		= link_status_1g_rgmii,
 2261};
 2262
 2263static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2264	.serdes_init		= serdes_init_niu_10g_fiber,
 2265	.xcvr_init		= xcvr_init_10g,
 2266	.link_status		= link_status_10g,
 2267};
 2268
 2269static const struct niu_phy_ops phy_ops_10g_fiber = {
 2270	.serdes_init		= serdes_init_10g,
 2271	.xcvr_init		= xcvr_init_10g,
 2272	.link_status		= link_status_10g,
 2273};
 2274
 2275static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2276	.serdes_init		= serdes_init_10g,
 2277	.xcvr_init		= xcvr_init_10g_bcm8706,
 2278	.link_status		= link_status_10g_hotplug,
 2279};
 2280
 2281static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2282	.serdes_init		= serdes_init_niu_10g_fiber,
 2283	.xcvr_init		= xcvr_init_10g_bcm8706,
 2284	.link_status		= link_status_10g_hotplug,
 2285};
 2286
 2287static const struct niu_phy_ops phy_ops_10g_copper = {
 2288	.serdes_init		= serdes_init_10g,
 2289	.link_status		= link_status_10g, /* XXX */
 2290};
 2291
 2292static const struct niu_phy_ops phy_ops_1g_fiber = {
 2293	.serdes_init		= serdes_init_1g,
 2294	.xcvr_init		= xcvr_init_1g,
 2295	.link_status		= link_status_1g,
 2296};
 2297
 2298static const struct niu_phy_ops phy_ops_1g_copper = {
 2299	.xcvr_init		= xcvr_init_1g,
 2300	.link_status		= link_status_1g,
 2301};
 2302
 2303struct niu_phy_template {
 2304	const struct niu_phy_ops	*ops;
 2305	u32				phy_addr_base;
 2306};
 2307
 2308static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2309	.ops		= &phy_ops_10g_fiber_niu,
 2310	.phy_addr_base	= 16,
 2311};
 2312
 2313static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2314	.ops		= &phy_ops_10g_serdes_niu,
 2315	.phy_addr_base	= 0,
 2316};
 2317
 2318static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2319	.ops		= &phy_ops_1g_serdes_niu,
 2320	.phy_addr_base	= 0,
 2321};
 2322
 2323static const struct niu_phy_template phy_template_10g_fiber = {
 2324	.ops		= &phy_ops_10g_fiber,
 2325	.phy_addr_base	= 8,
 2326};
 2327
 2328static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2329	.ops		= &phy_ops_10g_fiber_hotplug,
 2330	.phy_addr_base	= 8,
 2331};
 2332
 2333static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2334	.ops		= &phy_ops_niu_10g_hotplug,
 2335	.phy_addr_base	= 8,
 2336};
 2337
 2338static const struct niu_phy_template phy_template_10g_copper = {
 2339	.ops		= &phy_ops_10g_copper,
 2340	.phy_addr_base	= 10,
 2341};
 2342
 2343static const struct niu_phy_template phy_template_1g_fiber = {
 2344	.ops		= &phy_ops_1g_fiber,
 2345	.phy_addr_base	= 0,
 2346};
 2347
 2348static const struct niu_phy_template phy_template_1g_copper = {
 2349	.ops		= &phy_ops_1g_copper,
 2350	.phy_addr_base	= 0,
 2351};
 2352
 2353static const struct niu_phy_template phy_template_1g_rgmii = {
 2354	.ops		= &phy_ops_1g_rgmii,
 2355	.phy_addr_base	= 0,
 2356};
 2357
 2358static const struct niu_phy_template phy_template_10g_serdes = {
 2359	.ops		= &phy_ops_10g_serdes,
 2360	.phy_addr_base	= 0,
 2361};
 2362
 2363static int niu_atca_port_num[4] = {
 2364	0, 0,  11, 10
 2365};
 2366
 2367static int serdes_init_10g_serdes(struct niu *np)
 2368{
 2369	struct niu_link_config *lp = &np->link_config;
 2370	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2371	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2372
 2373	switch (np->port) {
 2374	case 0:
 2375		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2376		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2377		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2378		break;
 2379	case 1:
 2380		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2381		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2382		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2383		break;
 2384
 2385	default:
 2386		return -EINVAL;
 2387	}
 2388	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2389		    ENET_SERDES_CTRL_SDET_1 |
 2390		    ENET_SERDES_CTRL_SDET_2 |
 2391		    ENET_SERDES_CTRL_SDET_3 |
 2392		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2393		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2394		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2395		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2396		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2397		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2398		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2399		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2400	test_cfg_val = 0;
 2401
 2402	if (lp->loopback_mode == LOOPBACK_PHY) {
 2403		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2404				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2406				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2407				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2408				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2409				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2410				  ENET_SERDES_TEST_MD_3_SHIFT));
 2411	}
 2412
 2413	esr_reset(np);
 2414	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2415	nw64(ctrl_reg, ctrl_val);
 2416	nw64(test_cfg_reg, test_cfg_val);
 2417
 2418	/* Initialize all 4 lanes of the SERDES.  */
 2419	for (i = 0; i < 4; i++) {
 2420		u32 rxtx_ctrl, glue0;
 2421		int err;
 2422
 2423		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2424		if (err)
 2425			return err;
 2426		err = esr_read_glue0(np, i, &glue0);
 2427		if (err)
 2428			return err;
 2429
 2430		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2431		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2432			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2433
 2434		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2435			   ESR_GLUE_CTRL0_THCNT |
 2436			   ESR_GLUE_CTRL0_BLTIME);
 2437		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2438			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2439			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2440			  (BLTIME_300_CYCLES <<
 2441			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2442
 2443		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2444		if (err)
 2445			return err;
 2446		err = esr_write_glue0(np, i, glue0);
 2447		if (err)
 2448			return err;
 2449	}
 2450
 2451
 2452	sig = nr64(ESR_INT_SIGNALS);
 2453	switch (np->port) {
 2454	case 0:
 2455		mask = ESR_INT_SIGNALS_P0_BITS;
 2456		val = (ESR_INT_SRDY0_P0 |
 2457		       ESR_INT_DET0_P0 |
 2458		       ESR_INT_XSRDY_P0 |
 2459		       ESR_INT_XDP_P0_CH3 |
 2460		       ESR_INT_XDP_P0_CH2 |
 2461		       ESR_INT_XDP_P0_CH1 |
 2462		       ESR_INT_XDP_P0_CH0);
 2463		break;
 2464
 2465	case 1:
 2466		mask = ESR_INT_SIGNALS_P1_BITS;
 2467		val = (ESR_INT_SRDY0_P1 |
 2468		       ESR_INT_DET0_P1 |
 2469		       ESR_INT_XSRDY_P1 |
 2470		       ESR_INT_XDP_P1_CH3 |
 2471		       ESR_INT_XDP_P1_CH2 |
 2472		       ESR_INT_XDP_P1_CH1 |
 2473		       ESR_INT_XDP_P1_CH0);
 2474		break;
 2475
 2476	default:
 2477		return -EINVAL;
 2478	}
 2479
 2480	if ((sig & mask) != val) {
 2481		int err;
 2482		err = serdes_init_1g_serdes(np);
 2483		if (!err) {
 2484			np->flags &= ~NIU_FLAGS_10G;
 2485			np->mac_xcvr = MAC_XCVR_PCS;
 2486		}  else {
 2487			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2488				   np->port);
 2489			return -ENODEV;
 2490		}
 2491	}
 2492
 2493	return 0;
 2494}
 2495
 2496static int niu_determine_phy_disposition(struct niu *np)
 2497{
 2498	struct niu_parent *parent = np->parent;
 2499	u8 plat_type = parent->plat_type;
 2500	const struct niu_phy_template *tp;
 2501	u32 phy_addr_off = 0;
 2502
 2503	if (plat_type == PLAT_TYPE_NIU) {
 2504		switch (np->flags &
 2505			(NIU_FLAGS_10G |
 2506			 NIU_FLAGS_FIBER |
 2507			 NIU_FLAGS_XCVR_SERDES)) {
 2508		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2509			/* 10G Serdes */
 2510			tp = &phy_template_niu_10g_serdes;
 2511			break;
 2512		case NIU_FLAGS_XCVR_SERDES:
 2513			/* 1G Serdes */
 2514			tp = &phy_template_niu_1g_serdes;
 2515			break;
 2516		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2517			/* 10G Fiber */
 2518		default:
 2519			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2520				tp = &phy_template_niu_10g_hotplug;
 2521				if (np->port == 0)
 2522					phy_addr_off = 8;
 2523				if (np->port == 1)
 2524					phy_addr_off = 12;
 2525			} else {
 2526				tp = &phy_template_niu_10g_fiber;
 2527				phy_addr_off += np->port;
 2528			}
 2529			break;
 2530		}
 2531	} else {
 2532		switch (np->flags &
 2533			(NIU_FLAGS_10G |
 2534			 NIU_FLAGS_FIBER |
 2535			 NIU_FLAGS_XCVR_SERDES)) {
 2536		case 0:
 2537			/* 1G copper */
 2538			tp = &phy_template_1g_copper;
 2539			if (plat_type == PLAT_TYPE_VF_P0)
 2540				phy_addr_off = 10;
 2541			else if (plat_type == PLAT_TYPE_VF_P1)
 2542				phy_addr_off = 26;
 2543
 2544			phy_addr_off += (np->port ^ 0x3);
 2545			break;
 2546
 2547		case NIU_FLAGS_10G:
 2548			/* 10G copper */
 2549			tp = &phy_template_10g_copper;
 2550			break;
 2551
 2552		case NIU_FLAGS_FIBER:
 2553			/* 1G fiber */
 2554			tp = &phy_template_1g_fiber;
 2555			break;
 2556
 2557		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2558			/* 10G fiber */
 2559			tp = &phy_template_10g_fiber;
 2560			if (plat_type == PLAT_TYPE_VF_P0 ||
 2561			    plat_type == PLAT_TYPE_VF_P1)
 2562				phy_addr_off = 8;
 2563			phy_addr_off += np->port;
 2564			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2565				tp = &phy_template_10g_fiber_hotplug;
 2566				if (np->port == 0)
 2567					phy_addr_off = 8;
 2568				if (np->port == 1)
 2569					phy_addr_off = 12;
 2570			}
 2571			break;
 2572
 2573		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2574		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2575		case NIU_FLAGS_XCVR_SERDES:
 2576			switch(np->port) {
 2577			case 0:
 2578			case 1:
 2579				tp = &phy_template_10g_serdes;
 2580				break;
 2581			case 2:
 2582			case 3:
 2583				tp = &phy_template_1g_rgmii;
 2584				break;
 2585			default:
 2586				return -EINVAL;
 2587				break;
 2588			}
 2589			phy_addr_off = niu_atca_port_num[np->port];
 2590			break;
 2591
 2592		default:
 2593			return -EINVAL;
 2594		}
 2595	}
 2596
 2597	np->phy_ops = tp->ops;
 2598	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2599
 2600	return 0;
 2601}
 2602
 2603static int niu_init_link(struct niu *np)
 2604{
 2605	struct niu_parent *parent = np->parent;
 2606	int err, ignore;
 2607
 2608	if (parent->plat_type == PLAT_TYPE_NIU) {
 2609		err = niu_xcvr_init(np);
 2610		if (err)
 2611			return err;
 2612		msleep(200);
 2613	}
 2614	err = niu_serdes_init(np);
 2615	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2616		return err;
 2617	msleep(200);
 2618	err = niu_xcvr_init(np);
 2619	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2620		niu_link_status(np, &ignore);
 2621	return 0;
 2622}
 2623
 2624static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2625{
 2626	u16 reg0 = addr[4] << 8 | addr[5];
 2627	u16 reg1 = addr[2] << 8 | addr[3];
 2628	u16 reg2 = addr[0] << 8 | addr[1];
 2629
 2630	if (np->flags & NIU_FLAGS_XMAC) {
 2631		nw64_mac(XMAC_ADDR0, reg0);
 2632		nw64_mac(XMAC_ADDR1, reg1);
 2633		nw64_mac(XMAC_ADDR2, reg2);
 2634	} else {
 2635		nw64_mac(BMAC_ADDR0, reg0);
 2636		nw64_mac(BMAC_ADDR1, reg1);
 2637		nw64_mac(BMAC_ADDR2, reg2);
 2638	}
 2639}
 2640
 2641static int niu_num_alt_addr(struct niu *np)
 2642{
 2643	if (np->flags & NIU_FLAGS_XMAC)
 2644		return XMAC_NUM_ALT_ADDR;
 2645	else
 2646		return BMAC_NUM_ALT_ADDR;
 2647}
 2648
 2649static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2650{
 2651	u16 reg0 = addr[4] << 8 | addr[5];
 2652	u16 reg1 = addr[2] << 8 | addr[3];
 2653	u16 reg2 = addr[0] << 8 | addr[1];
 2654
 2655	if (index >= niu_num_alt_addr(np))
 2656		return -EINVAL;
 2657
 2658	if (np->flags & NIU_FLAGS_XMAC) {
 2659		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2660		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2661		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2662	} else {
 2663		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2664		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2665		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2666	}
 2667
 2668	return 0;
 2669}
 2670
 2671static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2672{
 2673	unsigned long reg;
 2674	u64 val, mask;
 2675
 2676	if (index >= niu_num_alt_addr(np))
 2677		return -EINVAL;
 2678
 2679	if (np->flags & NIU_FLAGS_XMAC) {
 2680		reg = XMAC_ADDR_CMPEN;
 2681		mask = 1 << index;
 2682	} else {
 2683		reg = BMAC_ADDR_CMPEN;
 2684		mask = 1 << (index + 1);
 2685	}
 2686
 2687	val = nr64_mac(reg);
 2688	if (on)
 2689		val |= mask;
 2690	else
 2691		val &= ~mask;
 2692	nw64_mac(reg, val);
 2693
 2694	return 0;
 2695}
 2696
 2697static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2698				   int num, int mac_pref)
 2699{
 2700	u64 val = nr64_mac(reg);
 2701	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2702	val |= num;
 2703	if (mac_pref)
 2704		val |= HOST_INFO_MPR;
 2705	nw64_mac(reg, val);
 2706}
 2707
 2708static int __set_rdc_table_num(struct niu *np,
 2709			       int xmac_index, int bmac_index,
 2710			       int rdc_table_num, int mac_pref)
 2711{
 2712	unsigned long reg;
 2713
 2714	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2715		return -EINVAL;
 2716	if (np->flags & NIU_FLAGS_XMAC)
 2717		reg = XMAC_HOST_INFO(xmac_index);
 2718	else
 2719		reg = BMAC_HOST_INFO(bmac_index);
 2720	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2721	return 0;
 2722}
 2723
 2724static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2725					 int mac_pref)
 2726{
 2727	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2728}
 2729
 2730static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2731					   int mac_pref)
 2732{
 2733	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2734}
 2735
 2736static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2737				     int table_num, int mac_pref)
 2738{
 2739	if (idx >= niu_num_alt_addr(np))
 2740		return -EINVAL;
 2741	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2742}
 2743
 2744static u64 vlan_entry_set_parity(u64 reg_val)
 2745{
 2746	u64 port01_mask;
 2747	u64 port23_mask;
 2748
 2749	port01_mask = 0x00ff;
 2750	port23_mask = 0xff00;
 2751
 2752	if (hweight64(reg_val & port01_mask) & 1)
 2753		reg_val |= ENET_VLAN_TBL_PARITY0;
 2754	else
 2755		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2756
 2757	if (hweight64(reg_val & port23_mask) & 1)
 2758		reg_val |= ENET_VLAN_TBL_PARITY1;
 2759	else
 2760		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2761
 2762	return reg_val;
 2763}
 2764
 2765static void vlan_tbl_write(struct niu *np, unsigned long index,
 2766			   int port, int vpr, int rdc_table)
 2767{
 2768	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2769
 2770	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2771		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2772		     ENET_VLAN_TBL_SHIFT(port));
 2773	if (vpr)
 2774		reg_val |= (ENET_VLAN_TBL_VPR <<
 2775			    ENET_VLAN_TBL_SHIFT(port));
 2776	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2777
 2778	reg_val = vlan_entry_set_parity(reg_val);
 2779
 2780	nw64(ENET_VLAN_TBL(index), reg_val);
 2781}
 2782
 2783static void vlan_tbl_clear(struct niu *np)
 2784{
 2785	int i;
 2786
 2787	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2788		nw64(ENET_VLAN_TBL(i), 0);
 2789}
 2790
 2791static int tcam_wait_bit(struct niu *np, u64 bit)
 2792{
 2793	int limit = 1000;
 2794
 2795	while (--limit > 0) {
 2796		if (nr64(TCAM_CTL) & bit)
 2797			break;
 2798		udelay(1);
 2799	}
 2800	if (limit <= 0)
 2801		return -ENODEV;
 2802
 2803	return 0;
 2804}
 2805
 2806static int tcam_flush(struct niu *np, int index)
 2807{
 2808	nw64(TCAM_KEY_0, 0x00);
 2809	nw64(TCAM_KEY_MASK_0, 0xff);
 2810	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2811
 2812	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2813}
 2814
 2815#if 0
 2816static int tcam_read(struct niu *np, int index,
 2817		     u64 *key, u64 *mask)
 2818{
 2819	int err;
 2820
 2821	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2822	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2823	if (!err) {
 2824		key[0] = nr64(TCAM_KEY_0);
 2825		key[1] = nr64(TCAM_KEY_1);
 2826		key[2] = nr64(TCAM_KEY_2);
 2827		key[3] = nr64(TCAM_KEY_3);
 2828		mask[0] = nr64(TCAM_KEY_MASK_0);
 2829		mask[1] = nr64(TCAM_KEY_MASK_1);
 2830		mask[2] = nr64(TCAM_KEY_MASK_2);
 2831		mask[3] = nr64(TCAM_KEY_MASK_3);
 2832	}
 2833	return err;
 2834}
 2835#endif
 2836
 2837static int tcam_write(struct niu *np, int index,
 2838		      u64 *key, u64 *mask)
 2839{
 2840	nw64(TCAM_KEY_0, key[0]);
 2841	nw64(TCAM_KEY_1, key[1]);
 2842	nw64(TCAM_KEY_2, key[2]);
 2843	nw64(TCAM_KEY_3, key[3]);
 2844	nw64(TCAM_KEY_MASK_0, mask[0]);
 2845	nw64(TCAM_KEY_MASK_1, mask[1]);
 2846	nw64(TCAM_KEY_MASK_2, mask[2]);
 2847	nw64(TCAM_KEY_MASK_3, mask[3]);
 2848	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2849
 2850	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2851}
 2852
 2853#if 0
 2854static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2855{
 2856	int err;
 2857
 2858	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2859	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2860	if (!err)
 2861		*data = nr64(TCAM_KEY_1);
 2862
 2863	return err;
 2864}
 2865#endif
 2866
 2867static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2868{
 2869	nw64(TCAM_KEY_1, assoc_data);
 2870	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2871
 2872	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2873}
 2874
 2875static void tcam_enable(struct niu *np, int on)
 2876{
 2877	u64 val = nr64(FFLP_CFG_1);
 2878
 2879	if (on)
 2880		val &= ~FFLP_CFG_1_TCAM_DIS;
 2881	else
 2882		val |= FFLP_CFG_1_TCAM_DIS;
 2883	nw64(FFLP_CFG_1, val);
 2884}
 2885
 2886static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2887{
 2888	u64 val = nr64(FFLP_CFG_1);
 2889
 2890	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2891		 FFLP_CFG_1_CAMLAT |
 2892		 FFLP_CFG_1_CAMRATIO);
 2893	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2894	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2895	nw64(FFLP_CFG_1, val);
 2896
 2897	val = nr64(FFLP_CFG_1);
 2898	val |= FFLP_CFG_1_FFLPINITDONE;
 2899	nw64(FFLP_CFG_1, val);
 2900}
 2901
 2902static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2903				      int on)
 2904{
 2905	unsigned long reg;
 2906	u64 val;
 2907
 2908	if (class < CLASS_CODE_ETHERTYPE1 ||
 2909	    class > CLASS_CODE_ETHERTYPE2)
 2910		return -EINVAL;
 2911
 2912	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2913	val = nr64(reg);
 2914	if (on)
 2915		val |= L2_CLS_VLD;
 2916	else
 2917		val &= ~L2_CLS_VLD;
 2918	nw64(reg, val);
 2919
 2920	return 0;
 2921}
 2922
 2923#if 0
 2924static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2925				   u64 ether_type)
 2926{
 2927	unsigned long reg;
 2928	u64 val;
 2929
 2930	if (class < CLASS_CODE_ETHERTYPE1 ||
 2931	    class > CLASS_CODE_ETHERTYPE2 ||
 2932	    (ether_type & ~(u64)0xffff) != 0)
 2933		return -EINVAL;
 2934
 2935	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2936	val = nr64(reg);
 2937	val &= ~L2_CLS_ETYPE;
 2938	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2939	nw64(reg, val);
 2940
 2941	return 0;
 2942}
 2943#endif
 2944
 2945static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2946				     int on)
 2947{
 2948	unsigned long reg;
 2949	u64 val;
 2950
 2951	if (class < CLASS_CODE_USER_PROG1 ||
 2952	    class > CLASS_CODE_USER_PROG4)
 2953		return -EINVAL;
 2954
 2955	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2956	val = nr64(reg);
 2957	if (on)
 2958		val |= L3_CLS_VALID;
 2959	else
 2960		val &= ~L3_CLS_VALID;
 2961	nw64(reg, val);
 2962
 2963	return 0;
 2964}
 2965
 2966static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2967				  int ipv6, u64 protocol_id,
 2968				  u64 tos_mask, u64 tos_val)
 2969{
 2970	unsigned long reg;
 2971	u64 val;
 2972
 2973	if (class < CLASS_CODE_USER_PROG1 ||
 2974	    class > CLASS_CODE_USER_PROG4 ||
 2975	    (protocol_id & ~(u64)0xff) != 0 ||
 2976	    (tos_mask & ~(u64)0xff) != 0 ||
 2977	    (tos_val & ~(u64)0xff) != 0)
 2978		return -EINVAL;
 2979
 2980	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2981	val = nr64(reg);
 2982	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2983		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2984	if (ipv6)
 2985		val |= L3_CLS_IPVER;
 2986	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2987	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2988	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2989	nw64(reg, val);
 2990
 2991	return 0;
 2992}
 2993
 2994static int tcam_early_init(struct niu *np)
 2995{
 2996	unsigned long i;
 2997	int err;
 2998
 2999	tcam_enable(np, 0);
 3000	tcam_set_lat_and_ratio(np,
 3001			       DEFAULT_TCAM_LATENCY,
 3002			       DEFAULT_TCAM_ACCESS_RATIO);
 3003	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 3004		err = tcam_user_eth_class_enable(np, i, 0);
 3005		if (err)
 3006			return err;
 3007	}
 3008	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 3009		err = tcam_user_ip_class_enable(np, i, 0);
 3010		if (err)
 3011			return err;
 3012	}
 3013
 3014	return 0;
 3015}
 3016
 3017static int tcam_flush_all(struct niu *np)
 3018{
 3019	unsigned long i;
 3020
 3021	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3022		int err = tcam_flush(np, i);
 3023		if (err)
 3024			return err;
 3025	}
 3026	return 0;
 3027}
 3028
 3029static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3030{
 3031	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3032}
 3033
 3034#if 0
 3035static int hash_read(struct niu *np, unsigned long partition,
 3036		     unsigned long index, unsigned long num_entries,
 3037		     u64 *data)
 3038{
 3039	u64 val = hash_addr_regval(index, num_entries);
 3040	unsigned long i;
 3041
 3042	if (partition >= FCRAM_NUM_PARTITIONS ||
 3043	    index + num_entries > FCRAM_SIZE)
 3044		return -EINVAL;
 3045
 3046	nw64(HASH_TBL_ADDR(partition), val);
 3047	for (i = 0; i < num_entries; i++)
 3048		data[i] = nr64(HASH_TBL_DATA(partition));
 3049
 3050	return 0;
 3051}
 3052#endif
 3053
 3054static int hash_write(struct niu *np, unsigned long partition,
 3055		      unsigned long index, unsigned long num_entries,
 3056		      u64 *data)
 3057{
 3058	u64 val = hash_addr_regval(index, num_entries);
 3059	unsigned long i;
 3060
 3061	if (partition >= FCRAM_NUM_PARTITIONS ||
 3062	    index + (num_entries * 8) > FCRAM_SIZE)
 3063		return -EINVAL;
 3064
 3065	nw64(HASH_TBL_ADDR(partition), val);
 3066	for (i = 0; i < num_entries; i++)
 3067		nw64(HASH_TBL_DATA(partition), data[i]);
 3068
 3069	return 0;
 3070}
 3071
 3072static void fflp_reset(struct niu *np)
 3073{
 3074	u64 val;
 3075
 3076	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3077	udelay(10);
 3078	nw64(FFLP_CFG_1, 0);
 3079
 3080	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3081	nw64(FFLP_CFG_1, val);
 3082}
 3083
 3084static void fflp_set_timings(struct niu *np)
 3085{
 3086	u64 val = nr64(FFLP_CFG_1);
 3087
 3088	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3089	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3090	nw64(FFLP_CFG_1, val);
 3091
 3092	val = nr64(FFLP_CFG_1);
 3093	val |= FFLP_CFG_1_FFLPINITDONE;
 3094	nw64(FFLP_CFG_1, val);
 3095
 3096	val = nr64(FCRAM_REF_TMR);
 3097	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3098	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3099	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3100	nw64(FCRAM_REF_TMR, val);
 3101}
 3102
 3103static int fflp_set_partition(struct niu *np, u64 partition,
 3104			      u64 mask, u64 base, int enable)
 3105{
 3106	unsigned long reg;
 3107	u64 val;
 3108
 3109	if (partition >= FCRAM_NUM_PARTITIONS ||
 3110	    (mask & ~(u64)0x1f) != 0 ||
 3111	    (base & ~(u64)0x1f) != 0)
 3112		return -EINVAL;
 3113
 3114	reg = FLW_PRT_SEL(partition);
 3115
 3116	val = nr64(reg);
 3117	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3118	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3119	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3120	if (enable)
 3121		val |= FLW_PRT_SEL_EXT;
 3122	nw64(reg, val);
 3123
 3124	return 0;
 3125}
 3126
 3127static int fflp_disable_all_partitions(struct niu *np)
 3128{
 3129	unsigned long i;
 3130
 3131	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3132		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3133		if (err)
 3134			return err;
 3135	}
 3136	return 0;
 3137}
 3138
 3139static void fflp_llcsnap_enable(struct niu *np, int on)
 3140{
 3141	u64 val = nr64(FFLP_CFG_1);
 3142
 3143	if (on)
 3144		val |= FFLP_CFG_1_LLCSNAP;
 3145	else
 3146		val &= ~FFLP_CFG_1_LLCSNAP;
 3147	nw64(FFLP_CFG_1, val);
 3148}
 3149
 3150static void fflp_errors_enable(struct niu *np, int on)
 3151{
 3152	u64 val = nr64(FFLP_CFG_1);
 3153
 3154	if (on)
 3155		val &= ~FFLP_CFG_1_ERRORDIS;
 3156	else
 3157		val |= FFLP_CFG_1_ERRORDIS;
 3158	nw64(FFLP_CFG_1, val);
 3159}
 3160
 3161static int fflp_hash_clear(struct niu *np)
 3162{
 3163	struct fcram_hash_ipv4 ent;
 3164	unsigned long i;
 3165
 3166	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3167	memset(&ent, 0, sizeof(ent));
 3168	ent.header = HASH_HEADER_EXT;
 3169
 3170	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3171		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3172		if (err)
 3173			return err;
 3174	}
 3175	return 0;
 3176}
 3177
 3178static int fflp_early_init(struct niu *np)
 3179{
 3180	struct niu_parent *parent;
 3181	unsigned long flags;
 3182	int err;
 3183
 3184	niu_lock_parent(np, flags);
 3185
 3186	parent = np->parent;
 3187	err = 0;
 3188	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3189		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3190			fflp_reset(np);
 3191			fflp_set_timings(np);
 3192			err = fflp_disable_all_partitions(np);
 3193			if (err) {
 3194				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3195					     "fflp_disable_all_partitions failed, err=%d\n",
 3196					     err);
 3197				goto out;
 3198			}
 3199		}
 3200
 3201		err = tcam_early_init(np);
 3202		if (err) {
 3203			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3204				     "tcam_early_init failed, err=%d\n", err);
 3205			goto out;
 3206		}
 3207		fflp_llcsnap_enable(np, 1);
 3208		fflp_errors_enable(np, 0);
 3209		nw64(H1POLY, 0);
 3210		nw64(H2POLY, 0);
 3211
 3212		err = tcam_flush_all(np);
 3213		if (err) {
 3214			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3215				     "tcam_flush_all failed, err=%d\n", err);
 3216			goto out;
 3217		}
 3218		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3219			err = fflp_hash_clear(np);
 3220			if (err) {
 3221				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3222					     "fflp_hash_clear failed, err=%d\n",
 3223					     err);
 3224				goto out;
 3225			}
 3226		}
 3227
 3228		vlan_tbl_clear(np);
 3229
 3230		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3231	}
 3232out:
 3233	niu_unlock_parent(np, flags);
 3234	return err;
 3235}
 3236
 3237static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3238{
 3239	if (class_code < CLASS_CODE_USER_PROG1 ||
 3240	    class_code > CLASS_CODE_SCTP_IPV6)
 3241		return -EINVAL;
 3242
 3243	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3244	return 0;
 3245}
 3246
 3247static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3248{
 3249	if (class_code < CLASS_CODE_USER_PROG1 ||
 3250	    class_code > CLASS_CODE_SCTP_IPV6)
 3251		return -EINVAL;
 3252
 3253	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3254	return 0;
 3255}
 3256
 3257/* Entries for the ports are interleaved in the TCAM */
 3258static u16 tcam_get_index(struct niu *np, u16 idx)
 3259{
 3260	/* One entry reserved for IP fragment rule */
 3261	if (idx >= (np->clas.tcam_sz - 1))
 3262		idx = 0;
 3263	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3264}
 3265
 3266static u16 tcam_get_size(struct niu *np)
 3267{
 3268	/* One entry reserved for IP fragment rule */
 3269	return np->clas.tcam_sz - 1;
 3270}
 3271
 3272static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3273{
 3274	/* One entry reserved for IP fragment rule */
 3275	return np->clas.tcam_valid_entries - 1;
 3276}
 3277
 3278static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3279			      u32 offset, u32 size, u32 truesize)
 3280{
 3281	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
 3282
 3283	skb->len += size;
 3284	skb->data_len += size;
 3285	skb->truesize += truesize;
 3286}
 3287
 3288static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3289{
 3290	a >>= PAGE_SHIFT;
 3291	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3292
 3293	return a & (MAX_RBR_RING_SIZE - 1);
 3294}
 3295
 3296static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3297				    struct page ***link)
 3298{
 3299	unsigned int h = niu_hash_rxaddr(rp, addr);
 3300	struct page *p, **pp;
 3301
 3302	addr &= PAGE_MASK;
 3303	pp = &rp->rxhash[h];
 3304	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3305		if (p->index == addr) {
 3306			*link = pp;
 3307			goto found;
 3308		}
 3309	}
 3310	BUG();
 3311
 3312found:
 3313	return p;
 3314}
 3315
 3316static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3317{
 3318	unsigned int h = niu_hash_rxaddr(rp, base);
 3319
 3320	page->index = base;
 3321	page->mapping = (struct address_space *) rp->rxhash[h];
 3322	rp->rxhash[h] = page;
 3323}
 3324
 3325static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3326			    gfp_t mask, int start_index)
 3327{
 3328	struct page *page;
 3329	u64 addr;
 3330	int i;
 3331
 3332	page = alloc_page(mask);
 3333	if (!page)
 3334		return -ENOMEM;
 3335
 3336	addr = np->ops->map_page(np->device, page, 0,
 3337				 PAGE_SIZE, DMA_FROM_DEVICE);
 3338
 3339	niu_hash_page(rp, page, addr);
 3340	if (rp->rbr_blocks_per_page > 1)
 3341		atomic_add(rp->rbr_blocks_per_page - 1,
 3342			   &compound_head(page)->_count);
 3343
 3344	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3345		__le32 *rbr = &rp->rbr[start_index + i];
 3346
 3347		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3348		addr += rp->rbr_block_size;
 3349	}
 3350
 3351	return 0;
 3352}
 3353
 3354static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3355{
 3356	int index = rp->rbr_index;
 3357
 3358	rp->rbr_pending++;
 3359	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3360		int err = niu_rbr_add_page(np, rp, mask, index);
 3361
 3362		if (unlikely(err)) {
 3363			rp->rbr_pending--;
 3364			return;
 3365		}
 3366
 3367		rp->rbr_index += rp->rbr_blocks_per_page;
 3368		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3369		if (rp->rbr_index == rp->rbr_table_size)
 3370			rp->rbr_index = 0;
 3371
 3372		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3373			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3374			rp->rbr_pending = 0;
 3375		}
 3376	}
 3377}
 3378
 3379static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3380{
 3381	unsigned int index = rp->rcr_index;
 3382	int num_rcr = 0;
 3383
 3384	rp->rx_dropped++;
 3385	while (1) {
 3386		struct page *page, **link;
 3387		u64 addr, val;
 3388		u32 rcr_size;
 3389
 3390		num_rcr++;
 3391
 3392		val = le64_to_cpup(&rp->rcr[index]);
 3393		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3394			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3395		page = niu_find_rxpage(rp, addr, &link);
 3396
 3397		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3398					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3399		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3400			*link = (struct page *) page->mapping;
 3401			np->ops->unmap_page(np->device, page->index,
 3402					    PAGE_SIZE, DMA_FROM_DEVICE);
 3403			page->index = 0;
 3404			page->mapping = NULL;
 3405			__free_page(page);
 3406			rp->rbr_refill_pending++;
 3407		}
 3408
 3409		index = NEXT_RCR(rp, index);
 3410		if (!(val & RCR_ENTRY_MULTI))
 3411			break;
 3412
 3413	}
 3414	rp->rcr_index = index;
 3415
 3416	return num_rcr;
 3417}
 3418
 3419static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3420			      struct rx_ring_info *rp)
 3421{
 3422	unsigned int index = rp->rcr_index;
 3423	struct rx_pkt_hdr1 *rh;
 3424	struct sk_buff *skb;
 3425	int len, num_rcr;
 3426
 3427	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3428	if (unlikely(!skb))
 3429		return niu_rx_pkt_ignore(np, rp);
 3430
 3431	num_rcr = 0;
 3432	while (1) {
 3433		struct page *page, **link;
 3434		u32 rcr_size, append_size;
 3435		u64 addr, val, off;
 3436
 3437		num_rcr++;
 3438
 3439		val = le64_to_cpup(&rp->rcr[index]);
 3440
 3441		len = (val & RCR_ENTRY_L2_LEN) >>
 3442			RCR_ENTRY_L2_LEN_SHIFT;
 3443		len -= ETH_FCS_LEN;
 3444
 3445		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3446			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3447		page = niu_find_rxpage(rp, addr, &link);
 3448
 3449		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3450					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3451
 3452		off = addr & ~PAGE_MASK;
 3453		append_size = rcr_size;
 3454		if (num_rcr == 1) {
 3455			int ptype;
 3456
 3457			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3458			if ((ptype == RCR_PKT_TYPE_TCP ||
 3459			     ptype == RCR_PKT_TYPE_UDP) &&
 3460			    !(val & (RCR_ENTRY_NOPORT |
 3461				     RCR_ENTRY_ERROR)))
 3462				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3463			else
 3464				skb_checksum_none_assert(skb);
 3465		} else if (!(val & RCR_ENTRY_MULTI))
 3466			append_size = len - skb->len;
 3467
 3468		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 3469		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3470			*link = (struct page *) page->mapping;
 3471			np->ops->unmap_page(np->device, page->index,
 3472					    PAGE_SIZE, DMA_FROM_DEVICE);
 3473			page->index = 0;
 3474			page->mapping = NULL;
 3475			rp->rbr_refill_pending++;
 3476		} else
 3477			get_page(page);
 3478
 3479		index = NEXT_RCR(rp, index);
 3480		if (!(val & RCR_ENTRY_MULTI))
 3481			break;
 3482
 3483	}
 3484	rp->rcr_index = index;
 3485
 3486	len += sizeof(*rh);
 3487	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3488	__pskb_pull_tail(skb, len);
 3489
 3490	rh = (struct rx_pkt_hdr1 *) skb->data;
 3491	if (np->dev->features & NETIF_F_RXHASH)
 3492		skb->rxhash = ((u32)rh->hashval2_0 << 24 |
 3493			       (u32)rh->hashval2_1 << 16 |
 3494			       (u32)rh->hashval1_1 << 8 |
 3495			       (u32)rh->hashval1_2 << 0);
 3496	skb_pull(skb, sizeof(*rh));
 3497
 3498	rp->rx_packets++;
 3499	rp->rx_bytes += skb->len;
 3500
 3501	skb->protocol = eth_type_trans(skb, np->dev);
 3502	skb_record_rx_queue(skb, rp->rx_channel);
 3503	napi_gro_receive(napi, skb);
 3504
 3505	return num_rcr;
 3506}
 3507
 3508static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3509{
 3510	int blocks_per_page = rp->rbr_blocks_per_page;
 3511	int err, index = rp->rbr_index;
 3512
 3513	err = 0;
 3514	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3515		err = niu_rbr_add_page(np, rp, mask, index);
 3516		if (err)
 3517			break;
 3518
 3519		index += blocks_per_page;
 3520	}
 3521
 3522	rp->rbr_index = index;
 3523	return err;
 3524}
 3525
 3526static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3527{
 3528	int i;
 3529
 3530	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3531		struct page *page;
 3532
 3533		page = rp->rxhash[i];
 3534		while (page) {
 3535			struct page *next = (struct page *) page->mapping;
 3536			u64 base = page->index;
 3537
 3538			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3539					    DMA_FROM_DEVICE);
 3540			page->index = 0;
 3541			page->mapping = NULL;
 3542
 3543			__free_page(page);
 3544
 3545			page = next;
 3546		}
 3547	}
 3548
 3549	for (i = 0; i < rp->rbr_table_size; i++)
 3550		rp->rbr[i] = cpu_to_le32(0);
 3551	rp->rbr_index = 0;
 3552}
 3553
 3554static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3555{
 3556	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3557	struct sk_buff *skb = tb->skb;
 3558	struct tx_pkt_hdr *tp;
 3559	u64 tx_flags;
 3560	int i, len;
 3561
 3562	tp = (struct tx_pkt_hdr *) skb->data;
 3563	tx_flags = le64_to_cpup(&tp->flags);
 3564
 3565	rp->tx_packets++;
 3566	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3567			 ((tx_flags & TXHDR_PAD) / 2));
 3568
 3569	len = skb_headlen(skb);
 3570	np->ops->unmap_single(np->device, tb->mapping,
 3571			      len, DMA_TO_DEVICE);
 3572
 3573	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3574		rp->mark_pending--;
 3575
 3576	tb->skb = NULL;
 3577	do {
 3578		idx = NEXT_TX(rp, idx);
 3579		len -= MAX_TX_DESC_LEN;
 3580	} while (len > 0);
 3581
 3582	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3583		tb = &rp->tx_buffs[idx];
 3584		BUG_ON(tb->skb != NULL);
 3585		np->ops->unmap_page(np->device, tb->mapping,
 3586				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
 3587				    DMA_TO_DEVICE);
 3588		idx = NEXT_TX(rp, idx);
 3589	}
 3590
 3591	dev_kfree_skb(skb);
 3592
 3593	return idx;
 3594}
 3595
 3596#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3597
 3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3599{
 3600	struct netdev_queue *txq;
 3601	u16 pkt_cnt, tmp;
 3602	int cons, index;
 3603	u64 cs;
 3604
 3605	index = (rp - np->tx_rings);
 3606	txq = netdev_get_tx_queue(np->dev, index);
 3607
 3608	cs = rp->tx_cs;
 3609	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3610		goto out;
 3611
 3612	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3613	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3614		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3615
 3616	rp->last_pkt_cnt = tmp;
 3617
 3618	cons = rp->cons;
 3619
 3620	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3621		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3622
 3623	while (pkt_cnt--)
 3624		cons = release_tx_packet(np, rp, cons);
 3625
 3626	rp->cons = cons;
 3627	smp_mb();
 3628
 3629out:
 3630	if (unlikely(netif_tx_queue_stopped(txq) &&
 3631		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3632		__netif_tx_lock(txq, smp_processor_id());
 3633		if (netif_tx_queue_stopped(txq) &&
 3634		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3635			netif_tx_wake_queue(txq);
 3636		__netif_tx_unlock(txq);
 3637	}
 3638}
 3639
 3640static inline void niu_sync_rx_discard_stats(struct niu *np,
 3641					     struct rx_ring_info *rp,
 3642					     const int limit)
 3643{
 3644	/* This elaborate scheme is needed for reading the RX discard
 3645	 * counters, as they are only 16-bit and can overflow quickly,
 3646	 * and because the overflow indication bit is not usable as
 3647	 * the counter value does not wrap, but remains at max value
 3648	 * 0xFFFF.
 3649	 *
 3650	 * In theory and in practice counters can be lost in between
 3651	 * reading nr64() and clearing the counter nw64().  For this
 3652	 * reason, the number of counter clearings nw64() is
 3653	 * limited/reduced though the limit parameter.
 3654	 */
 3655	int rx_channel = rp->rx_channel;
 3656	u32 misc, wred;
 3657
 3658	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3659	 * following discard events: IPP (Input Port Process),
 3660	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3661	 * Block Ring) prefetch buffer is empty.
 3662	 */
 3663	misc = nr64(RXMISC(rx_channel));
 3664	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3665		nw64(RXMISC(rx_channel), 0);
 3666		rp->rx_errors += misc & RXMISC_COUNT;
 3667
 3668		if (unlikely(misc & RXMISC_OFLOW))
 3669			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3670				rx_channel);
 3671
 3672		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3673			     "rx-%d: MISC drop=%u over=%u\n",
 3674			     rx_channel, misc, misc-limit);
 3675	}
 3676
 3677	/* WRED (Weighted Random Early Discard) by hardware */
 3678	wred = nr64(RED_DIS_CNT(rx_channel));
 3679	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3680		nw64(RED_DIS_CNT(rx_channel), 0);
 3681		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3682
 3683		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3684			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3685
 3686		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3687			     "rx-%d: WRED drop=%u over=%u\n",
 3688			     rx_channel, wred, wred-limit);
 3689	}
 3690}
 3691
 3692static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3693		       struct rx_ring_info *rp, int budget)
 3694{
 3695	int qlen, rcr_done = 0, work_done = 0;
 3696	struct rxdma_mailbox *mbox = rp->mbox;
 3697	u64 stat;
 3698
 3699#if 1
 3700	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3701	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3702#else
 3703	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3704	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3705#endif
 3706	mbox->rx_dma_ctl_stat = 0;
 3707	mbox->rcrstat_a = 0;
 3708
 3709	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3710		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3711		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3712
 3713	rcr_done = work_done = 0;
 3714	qlen = min(qlen, budget);
 3715	while (work_done < qlen) {
 3716		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3717		work_done++;
 3718	}
 3719
 3720	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3721		unsigned int i;
 3722
 3723		for (i = 0; i < rp->rbr_refill_pending; i++)
 3724			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3725		rp->rbr_refill_pending = 0;
 3726	}
 3727
 3728	stat = (RX_DMA_CTL_STAT_MEX |
 3729		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3730		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3731
 3732	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3733
 3734	/* Only sync discards stats when qlen indicate potential for drops */
 3735	if (qlen > 10)
 3736		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3737
 3738	return work_done;
 3739}
 3740
 3741static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3742{
 3743	u64 v0 = lp->v0;
 3744	u32 tx_vec = (v0 >> 32);
 3745	u32 rx_vec = (v0 & 0xffffffff);
 3746	int i, work_done = 0;
 3747
 3748	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3749		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3750
 3751	for (i = 0; i < np->num_tx_rings; i++) {
 3752		struct tx_ring_info *rp = &np->tx_rings[i];
 3753		if (tx_vec & (1 << rp->tx_channel))
 3754			niu_tx_work(np, rp);
 3755		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3756	}
 3757
 3758	for (i = 0; i < np->num_rx_rings; i++) {
 3759		struct rx_ring_info *rp = &np->rx_rings[i];
 3760
 3761		if (rx_vec & (1 << rp->rx_channel)) {
 3762			int this_work_done;
 3763
 3764			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3765						     budget);
 3766
 3767			budget -= this_work_done;
 3768			work_done += this_work_done;
 3769		}
 3770		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3771	}
 3772
 3773	return work_done;
 3774}
 3775
 3776static int niu_poll(struct napi_struct *napi, int budget)
 3777{
 3778	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3779	struct niu *np = lp->np;
 3780	int work_done;
 3781
 3782	work_done = niu_poll_core(np, lp, budget);
 3783
 3784	if (work_done < budget) {
 3785		napi_complete(napi);
 3786		niu_ldg_rearm(np, lp, 1);
 3787	}
 3788	return work_done;
 3789}
 3790
 3791static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3792				  u64 stat)
 3793{
 3794	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3795
 3796	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3797		pr_cont("RBR_TMOUT ");
 3798	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3799		pr_cont("RSP_CNT ");
 3800	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3801		pr_cont("BYTE_EN_BUS ");
 3802	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3803		pr_cont("RSP_DAT ");
 3804	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3805		pr_cont("RCR_ACK ");
 3806	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3807		pr_cont("RCR_SHA_PAR ");
 3808	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3809		pr_cont("RBR_PRE_PAR ");
 3810	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3811		pr_cont("CONFIG ");
 3812	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3813		pr_cont("RCRINCON ");
 3814	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3815		pr_cont("RCRFULL ");
 3816	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3817		pr_cont("RBRFULL ");
 3818	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3819		pr_cont("RBRLOGPAGE ");
 3820	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3821		pr_cont("CFIGLOGPAGE ");
 3822	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3823		pr_cont("DC_FIDO ");
 3824
 3825	pr_cont(")\n");
 3826}
 3827
 3828static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3829{
 3830	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3831	int err = 0;
 3832
 3833
 3834	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3835		    RX_DMA_CTL_STAT_PORT_FATAL))
 3836		err = -EINVAL;
 3837
 3838	if (err) {
 3839		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3840			   rp->rx_channel,
 3841			   (unsigned long long) stat);
 3842
 3843		niu_log_rxchan_errors(np, rp, stat);
 3844	}
 3845
 3846	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3847	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3848
 3849	return err;
 3850}
 3851
 3852static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3853				  u64 cs)
 3854{
 3855	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3856
 3857	if (cs & TX_CS_MBOX_ERR)
 3858		pr_cont("MBOX ");
 3859	if (cs & TX_CS_PKT_SIZE_ERR)
 3860		pr_cont("PKT_SIZE ");
 3861	if (cs & TX_CS_TX_RING_OFLOW)
 3862		pr_cont("TX_RING_OFLOW ");
 3863	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3864		pr_cont("PREF_BUF_PAR ");
 3865	if (cs & TX_CS_NACK_PREF)
 3866		pr_cont("NACK_PREF ");
 3867	if (cs & TX_CS_NACK_PKT_RD)
 3868		pr_cont("NACK_PKT_RD ");
 3869	if (cs & TX_CS_CONF_PART_ERR)
 3870		pr_cont("CONF_PART ");
 3871	if (cs & TX_CS_PKT_PRT_ERR)
 3872		pr_cont("PKT_PTR ");
 3873
 3874	pr_cont(")\n");
 3875}
 3876
 3877static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3878{
 3879	u64 cs, logh, logl;
 3880
 3881	cs = nr64(TX_CS(rp->tx_channel));
 3882	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3883	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3884
 3885	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3886		   rp->tx_channel,
 3887		   (unsigned long long)cs,
 3888		   (unsigned long long)logh,
 3889		   (unsigned long long)logl);
 3890
 3891	niu_log_txchan_errors(np, rp, cs);
 3892
 3893	return -ENODEV;
 3894}
 3895
 3896static int niu_mif_interrupt(struct niu *np)
 3897{
 3898	u64 mif_status = nr64(MIF_STATUS);
 3899	int phy_mdint = 0;
 3900
 3901	if (np->flags & NIU_FLAGS_XMAC) {
 3902		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3903
 3904		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3905			phy_mdint = 1;
 3906	}
 3907
 3908	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3909		   (unsigned long long)mif_status, phy_mdint);
 3910
 3911	return -ENODEV;
 3912}
 3913
 3914static void niu_xmac_interrupt(struct niu *np)
 3915{
 3916	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3917	u64 val;
 3918
 3919	val = nr64_mac(XTXMAC_STATUS);
 3920	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3921		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3922	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3923		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3924	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3925		mp->tx_fifo_errors++;
 3926	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3927		mp->tx_overflow_errors++;
 3928	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3929		mp->tx_max_pkt_size_errors++;
 3930	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3931		mp->tx_underflow_errors++;
 3932
 3933	val = nr64_mac(XRXMAC_STATUS);
 3934	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3935		mp->rx_local_faults++;
 3936	if (val & XRXMAC_STATUS_RFLT_DET)
 3937		mp->rx_remote_faults++;
 3938	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3939		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3940	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3941		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3942	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3943		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3944	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3945		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3946	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3947		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3948	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3949		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3950	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3951		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3952	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3953		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3954	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3955		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3956	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3957		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3958	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3959		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3960	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3961		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3962	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3963		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3964	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3965		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3966	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3967		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3968	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3969		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3970	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3971		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3972	if (val & XRXMAC_STATUS_RXUFLOW)
 3973		mp->rx_underflows++;
 3974	if (val & XRXMAC_STATUS_RXOFLOW)
 3975		mp->rx_overflows++;
 3976
 3977	val = nr64_mac(XMAC_FC_STAT);
 3978	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3979		mp->pause_off_state++;
 3980	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3981		mp->pause_on_state++;
 3982	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 3983		mp->pause_received++;
 3984}
 3985
 3986static void niu_bmac_interrupt(struct niu *np)
 3987{
 3988	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 3989	u64 val;
 3990
 3991	val = nr64_mac(BTXMAC_STATUS);
 3992	if (val & BTXMAC_STATUS_UNDERRUN)
 3993		mp->tx_underflow_errors++;
 3994	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 3995		mp->tx_max_pkt_size_errors++;
 3996	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 3997		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 3998	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 3999		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 4000
 4001	val = nr64_mac(BRXMAC_STATUS);
 4002	if (val & BRXMAC_STATUS_OVERFLOW)
 4003		mp->rx_overflows++;
 4004	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 4005		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 4006	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 4007		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4008	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 4009		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4010	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 4011		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 4012
 4013	val = nr64_mac(BMAC_CTRL_STATUS);
 4014	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4015		mp->pause_off_state++;
 4016	if (val & BMAC_CTRL_STATUS_PAUSE)
 4017		mp->pause_on_state++;
 4018	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4019		mp->pause_received++;
 4020}
 4021
 4022static int niu_mac_interrupt(struct niu *np)
 4023{
 4024	if (np->flags & NIU_FLAGS_XMAC)
 4025		niu_xmac_interrupt(np);
 4026	else
 4027		niu_bmac_interrupt(np);
 4028
 4029	return 0;
 4030}
 4031
 4032static void niu_log_device_error(struct niu *np, u64 stat)
 4033{
 4034	netdev_err(np->dev, "Core device errors ( ");
 4035
 4036	if (stat & SYS_ERR_MASK_META2)
 4037		pr_cont("META2 ");
 4038	if (stat & SYS_ERR_MASK_META1)
 4039		pr_cont("META1 ");
 4040	if (stat & SYS_ERR_MASK_PEU)
 4041		pr_cont("PEU ");
 4042	if (stat & SYS_ERR_MASK_TXC)
 4043		pr_cont("TXC ");
 4044	if (stat & SYS_ERR_MASK_RDMC)
 4045		pr_cont("RDMC ");
 4046	if (stat & SYS_ERR_MASK_TDMC)
 4047		pr_cont("TDMC ");
 4048	if (stat & SYS_ERR_MASK_ZCP)
 4049		pr_cont("ZCP ");
 4050	if (stat & SYS_ERR_MASK_FFLP)
 4051		pr_cont("FFLP ");
 4052	if (stat & SYS_ERR_MASK_IPP)
 4053		pr_cont("IPP ");
 4054	if (stat & SYS_ERR_MASK_MAC)
 4055		pr_cont("MAC ");
 4056	if (stat & SYS_ERR_MASK_SMX)
 4057		pr_cont("SMX ");
 4058
 4059	pr_cont(")\n");
 4060}
 4061
 4062static int niu_device_error(struct niu *np)
 4063{
 4064	u64 stat = nr64(SYS_ERR_STAT);
 4065
 4066	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4067		   (unsigned long long)stat);
 4068
 4069	niu_log_device_error(np, stat);
 4070
 4071	return -ENODEV;
 4072}
 4073
 4074static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4075			      u64 v0, u64 v1, u64 v2)
 4076{
 4077
 4078	int i, err = 0;
 4079
 4080	lp->v0 = v0;
 4081	lp->v1 = v1;
 4082	lp->v2 = v2;
 4083
 4084	if (v1 & 0x00000000ffffffffULL) {
 4085		u32 rx_vec = (v1 & 0xffffffff);
 4086
 4087		for (i = 0; i < np->num_rx_rings; i++) {
 4088			struct rx_ring_info *rp = &np->rx_rings[i];
 4089
 4090			if (rx_vec & (1 << rp->rx_channel)) {
 4091				int r = niu_rx_error(np, rp);
 4092				if (r) {
 4093					err = r;
 4094				} else {
 4095					if (!v0)
 4096						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4097						     RX_DMA_CTL_STAT_MEX);
 4098				}
 4099			}
 4100		}
 4101	}
 4102	if (v1 & 0x7fffffff00000000ULL) {
 4103		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4104
 4105		for (i = 0; i < np->num_tx_rings; i++) {
 4106			struct tx_ring_info *rp = &np->tx_rings[i];
 4107
 4108			if (tx_vec & (1 << rp->tx_channel)) {
 4109				int r = niu_tx_error(np, rp);
 4110				if (r)
 4111					err = r;
 4112			}
 4113		}
 4114	}
 4115	if ((v0 | v1) & 0x8000000000000000ULL) {
 4116		int r = niu_mif_interrupt(np);
 4117		if (r)
 4118			err = r;
 4119	}
 4120	if (v2) {
 4121		if (v2 & 0x01ef) {
 4122			int r = niu_mac_interrupt(np);
 4123			if (r)
 4124				err = r;
 4125		}
 4126		if (v2 & 0x0210) {
 4127			int r = niu_device_error(np);
 4128			if (r)
 4129				err = r;
 4130		}
 4131	}
 4132
 4133	if (err)
 4134		niu_enable_interrupts(np, 0);
 4135
 4136	return err;
 4137}
 4138
 4139static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4140			    int ldn)
 4141{
 4142	struct rxdma_mailbox *mbox = rp->mbox;
 4143	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4144
 4145	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4146		      RX_DMA_CTL_STAT_RCRTO);
 4147	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4148
 4149	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4150		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4151}
 4152
 4153static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4154			    int ldn)
 4155{
 4156	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4157
 4158	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4159		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4160}
 4161
 4162static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4163{
 4164	struct niu_parent *parent = np->parent;
 4165	u32 rx_vec, tx_vec;
 4166	int i;
 4167
 4168	tx_vec = (v0 >> 32);
 4169	rx_vec = (v0 & 0xffffffff);
 4170
 4171	for (i = 0; i < np->num_rx_rings; i++) {
 4172		struct rx_ring_info *rp = &np->rx_rings[i];
 4173		int ldn = LDN_RXDMA(rp->rx_channel);
 4174
 4175		if (parent->ldg_map[ldn] != ldg)
 4176			continue;
 4177
 4178		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4179		if (rx_vec & (1 << rp->rx_channel))
 4180			niu_rxchan_intr(np, rp, ldn);
 4181	}
 4182
 4183	for (i = 0; i < np->num_tx_rings; i++) {
 4184		struct tx_ring_info *rp = &np->tx_rings[i];
 4185		int ldn = LDN_TXDMA(rp->tx_channel);
 4186
 4187		if (parent->ldg_map[ldn] != ldg)
 4188			continue;
 4189
 4190		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4191		if (tx_vec & (1 << rp->tx_channel))
 4192			niu_txchan_intr(np, rp, ldn);
 4193	}
 4194}
 4195
 4196static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4197			      u64 v0, u64 v1, u64 v2)
 4198{
 4199	if (likely(napi_schedule_prep(&lp->napi))) {
 4200		lp->v0 = v0;
 4201		lp->v1 = v1;
 4202		lp->v2 = v2;
 4203		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4204		__napi_schedule(&lp->napi);
 4205	}
 4206}
 4207
 4208static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4209{
 4210	struct niu_ldg *lp = dev_id;
 4211	struct niu *np = lp->np;
 4212	int ldg = lp->ldg_num;
 4213	unsigned long flags;
 4214	u64 v0, v1, v2;
 4215
 4216	if (netif_msg_intr(np))
 4217		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4218		       __func__, lp, ldg);
 4219
 4220	spin_lock_irqsave(&np->lock, flags);
 4221
 4222	v0 = nr64(LDSV0(ldg));
 4223	v1 = nr64(LDSV1(ldg));
 4224	v2 = nr64(LDSV2(ldg));
 4225
 4226	if (netif_msg_intr(np))
 4227		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4228		       (unsigned long long) v0,
 4229		       (unsigned long long) v1,
 4230		       (unsigned long long) v2);
 4231
 4232	if (unlikely(!v0 && !v1 && !v2)) {
 4233		spin_unlock_irqrestore(&np->lock, flags);
 4234		return IRQ_NONE;
 4235	}
 4236
 4237	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4238		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4239		if (err)
 4240			goto out;
 4241	}
 4242	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4243		niu_schedule_napi(np, lp, v0, v1, v2);
 4244	else
 4245		niu_ldg_rearm(np, lp, 1);
 4246out:
 4247	spin_unlock_irqrestore(&np->lock, flags);
 4248
 4249	return IRQ_HANDLED;
 4250}
 4251
 4252static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4253{
 4254	if (rp->mbox) {
 4255		np->ops->free_coherent(np->device,
 4256				       sizeof(struct rxdma_mailbox),
 4257				       rp->mbox, rp->mbox_dma);
 4258		rp->mbox = NULL;
 4259	}
 4260	if (rp->rcr) {
 4261		np->ops->free_coherent(np->device,
 4262				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4263				       rp->rcr, rp->rcr_dma);
 4264		rp->rcr = NULL;
 4265		rp->rcr_table_size = 0;
 4266		rp->rcr_index = 0;
 4267	}
 4268	if (rp->rbr) {
 4269		niu_rbr_free(np, rp);
 4270
 4271		np->ops->free_coherent(np->device,
 4272				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4273				       rp->rbr, rp->rbr_dma);
 4274		rp->rbr = NULL;
 4275		rp->rbr_table_size = 0;
 4276		rp->rbr_index = 0;
 4277	}
 4278	kfree(rp->rxhash);
 4279	rp->rxhash = NULL;
 4280}
 4281
 4282static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4283{
 4284	if (rp->mbox) {
 4285		np->ops->free_coherent(np->device,
 4286				       sizeof(struct txdma_mailbox),
 4287				       rp->mbox, rp->mbox_dma);
 4288		rp->mbox = NULL;
 4289	}
 4290	if (rp->descr) {
 4291		int i;
 4292
 4293		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4294			if (rp->tx_buffs[i].skb)
 4295				(void) release_tx_packet(np, rp, i);
 4296		}
 4297
 4298		np->ops->free_coherent(np->device,
 4299				       MAX_TX_RING_SIZE * sizeof(__le64),
 4300				       rp->descr, rp->descr_dma);
 4301		rp->descr = NULL;
 4302		rp->pending = 0;
 4303		rp->prod = 0;
 4304		rp->cons = 0;
 4305		rp->wrap_bit = 0;
 4306	}
 4307}
 4308
 4309static void niu_free_channels(struct niu *np)
 4310{
 4311	int i;
 4312
 4313	if (np->rx_rings) {
 4314		for (i = 0; i < np->num_rx_rings; i++) {
 4315			struct rx_ring_info *rp = &np->rx_rings[i];
 4316
 4317			niu_free_rx_ring_info(np, rp);
 4318		}
 4319		kfree(np->rx_rings);
 4320		np->rx_rings = NULL;
 4321		np->num_rx_rings = 0;
 4322	}
 4323
 4324	if (np->tx_rings) {
 4325		for (i = 0; i < np->num_tx_rings; i++) {
 4326			struct tx_ring_info *rp = &np->tx_rings[i];
 4327
 4328			niu_free_tx_ring_info(np, rp);
 4329		}
 4330		kfree(np->tx_rings);
 4331		np->tx_rings = NULL;
 4332		np->num_tx_rings = 0;
 4333	}
 4334}
 4335
 4336static int niu_alloc_rx_ring_info(struct niu *np,
 4337				  struct rx_ring_info *rp)
 4338{
 4339	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4340
 4341	rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
 4342			     GFP_KERNEL);
 4343	if (!rp->rxhash)
 4344		return -ENOMEM;
 4345
 4346	rp->mbox = np->ops->alloc_coherent(np->device,
 4347					   sizeof(struct rxdma_mailbox),
 4348					   &rp->mbox_dma, GFP_KERNEL);
 4349	if (!rp->mbox)
 4350		return -ENOMEM;
 4351	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4352		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4353			   rp->mbox);
 4354		return -EINVAL;
 4355	}
 4356
 4357	rp->rcr = np->ops->alloc_coherent(np->device,
 4358					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4359					  &rp->rcr_dma, GFP_KERNEL);
 4360	if (!rp->rcr)
 4361		return -ENOMEM;
 4362	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4363		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4364			   rp->rcr);
 4365		return -EINVAL;
 4366	}
 4367	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4368	rp->rcr_index = 0;
 4369
 4370	rp->rbr = np->ops->alloc_coherent(np->device,
 4371					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4372					  &rp->rbr_dma, GFP_KERNEL);
 4373	if (!rp->rbr)
 4374		return -ENOMEM;
 4375	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4376		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4377			   rp->rbr);
 4378		return -EINVAL;
 4379	}
 4380	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4381	rp->rbr_index = 0;
 4382	rp->rbr_pending = 0;
 4383
 4384	return 0;
 4385}
 4386
 4387static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4388{
 4389	int mtu = np->dev->mtu;
 4390
 4391	/* These values are recommended by the HW designers for fair
 4392	 * utilization of DRR amongst the rings.
 4393	 */
 4394	rp->max_burst = mtu + 32;
 4395	if (rp->max_burst > 4096)
 4396		rp->max_burst = 4096;
 4397}
 4398
 4399static int niu_alloc_tx_ring_info(struct niu *np,
 4400				  struct tx_ring_info *rp)
 4401{
 4402	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4403
 4404	rp->mbox = np->ops->alloc_coherent(np->device,
 4405					   sizeof(struct txdma_mailbox),
 4406					   &rp->mbox_dma, GFP_KERNEL);
 4407	if (!rp->mbox)
 4408		return -ENOMEM;
 4409	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4410		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4411			   rp->mbox);
 4412		return -EINVAL;
 4413	}
 4414
 4415	rp->descr = np->ops->alloc_coherent(np->device,
 4416					    MAX_TX_RING_SIZE * sizeof(__le64),
 4417					    &rp->descr_dma, GFP_KERNEL);
 4418	if (!rp->descr)
 4419		return -ENOMEM;
 4420	if ((unsigned long)rp->descr & (64UL - 1)) {
 4421		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4422			   rp->descr);
 4423		return -EINVAL;
 4424	}
 4425
 4426	rp->pending = MAX_TX_RING_SIZE;
 4427	rp->prod = 0;
 4428	rp->cons = 0;
 4429	rp->wrap_bit = 0;
 4430
 4431	/* XXX make these configurable... XXX */
 4432	rp->mark_freq = rp->pending / 4;
 4433
 4434	niu_set_max_burst(np, rp);
 4435
 4436	return 0;
 4437}
 4438
 4439static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4440{
 4441	u16 bss;
 4442
 4443	bss = min(PAGE_SHIFT, 15);
 4444
 4445	rp->rbr_block_size = 1 << bss;
 4446	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4447
 4448	rp->rbr_sizes[0] = 256;
 4449	rp->rbr_sizes[1] = 1024;
 4450	if (np->dev->mtu > ETH_DATA_LEN) {
 4451		switch (PAGE_SIZE) {
 4452		case 4 * 1024:
 4453			rp->rbr_sizes[2] = 4096;
 4454			break;
 4455
 4456		default:
 4457			rp->rbr_sizes[2] = 8192;
 4458			break;
 4459		}
 4460	} else {
 4461		rp->rbr_sizes[2] = 2048;
 4462	}
 4463	rp->rbr_sizes[3] = rp->rbr_block_size;
 4464}
 4465
 4466static int niu_alloc_channels(struct niu *np)
 4467{
 4468	struct niu_parent *parent = np->parent;
 4469	int first_rx_channel, first_tx_channel;
 4470	int num_rx_rings, num_tx_rings;
 4471	struct rx_ring_info *rx_rings;
 4472	struct tx_ring_info *tx_rings;
 4473	int i, port, err;
 4474
 4475	port = np->port;
 4476	first_rx_channel = first_tx_channel = 0;
 4477	for (i = 0; i < port; i++) {
 4478		first_rx_channel += parent->rxchan_per_port[i];
 4479		first_tx_channel += parent->txchan_per_port[i];
 4480	}
 4481
 4482	num_rx_rings = parent->rxchan_per_port[port];
 4483	num_tx_rings = parent->txchan_per_port[port];
 4484
 4485	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4486			   GFP_KERNEL);
 4487	err = -ENOMEM;
 4488	if (!rx_rings)
 4489		goto out_err;
 4490
 4491	np->num_rx_rings = num_rx_rings;
 4492	smp_wmb();
 4493	np->rx_rings = rx_rings;
 4494
 4495	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4496
 4497	for (i = 0; i < np->num_rx_rings; i++) {
 4498		struct rx_ring_info *rp = &np->rx_rings[i];
 4499
 4500		rp->np = np;
 4501		rp->rx_channel = first_rx_channel + i;
 4502
 4503		err = niu_alloc_rx_ring_info(np, rp);
 4504		if (err)
 4505			goto out_err;
 4506
 4507		niu_size_rbr(np, rp);
 4508
 4509		/* XXX better defaults, configurable, etc... XXX */
 4510		rp->nonsyn_window = 64;
 4511		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4512		rp->syn_window = 64;
 4513		rp->syn_threshold = rp->rcr_table_size - 64;
 4514		rp->rcr_pkt_threshold = 16;
 4515		rp->rcr_timeout = 8;
 4516		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4517		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4518			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4519
 4520		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4521		if (err)
 4522			return err;
 4523	}
 4524
 4525	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4526			   GFP_KERNEL);
 4527	err = -ENOMEM;
 4528	if (!tx_rings)
 4529		goto out_err;
 4530
 4531	np->num_tx_rings = num_tx_rings;
 4532	smp_wmb();
 4533	np->tx_rings = tx_rings;
 4534
 4535	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4536
 4537	for (i = 0; i < np->num_tx_rings; i++) {
 4538		struct tx_ring_info *rp = &np->tx_rings[i];
 4539
 4540		rp->np = np;
 4541		rp->tx_channel = first_tx_channel + i;
 4542
 4543		err = niu_alloc_tx_ring_info(np, rp);
 4544		if (err)
 4545			goto out_err;
 4546	}
 4547
 4548	return 0;
 4549
 4550out_err:
 4551	niu_free_channels(np);
 4552	return err;
 4553}
 4554
 4555static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4556{
 4557	int limit = 1000;
 4558
 4559	while (--limit > 0) {
 4560		u64 val = nr64(TX_CS(channel));
 4561		if (val & TX_CS_SNG_STATE)
 4562			return 0;
 4563	}
 4564	return -ENODEV;
 4565}
 4566
 4567static int niu_tx_channel_stop(struct niu *np, int channel)
 4568{
 4569	u64 val = nr64(TX_CS(channel));
 4570
 4571	val |= TX_CS_STOP_N_GO;
 4572	nw64(TX_CS(channel), val);
 4573
 4574	return niu_tx_cs_sng_poll(np, channel);
 4575}
 4576
 4577static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4578{
 4579	int limit = 1000;
 4580
 4581	while (--limit > 0) {
 4582		u64 val = nr64(TX_CS(channel));
 4583		if (!(val & TX_CS_RST))
 4584			return 0;
 4585	}
 4586	return -ENODEV;
 4587}
 4588
 4589static int niu_tx_channel_reset(struct niu *np, int channel)
 4590{
 4591	u64 val = nr64(TX_CS(channel));
 4592	int err;
 4593
 4594	val |= TX_CS_RST;
 4595	nw64(TX_CS(channel), val);
 4596
 4597	err = niu_tx_cs_reset_poll(np, channel);
 4598	if (!err)
 4599		nw64(TX_RING_KICK(channel), 0);
 4600
 4601	return err;
 4602}
 4603
 4604static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4605{
 4606	u64 val;
 4607
 4608	nw64(TX_LOG_MASK1(channel), 0);
 4609	nw64(TX_LOG_VAL1(channel), 0);
 4610	nw64(TX_LOG_MASK2(channel), 0);
 4611	nw64(TX_LOG_VAL2(channel), 0);
 4612	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4613	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4614	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4615
 4616	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4617	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4618	nw64(TX_LOG_PAGE_VLD(channel), val);
 4619
 4620	/* XXX TXDMA 32bit mode? XXX */
 4621
 4622	return 0;
 4623}
 4624
 4625static void niu_txc_enable_port(struct niu *np, int on)
 4626{
 4627	unsigned long flags;
 4628	u64 val, mask;
 4629
 4630	niu_lock_parent(np, flags);
 4631	val = nr64(TXC_CONTROL);
 4632	mask = (u64)1 << np->port;
 4633	if (on) {
 4634		val |= TXC_CONTROL_ENABLE | mask;
 4635	} else {
 4636		val &= ~mask;
 4637		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4638			val &= ~TXC_CONTROL_ENABLE;
 4639	}
 4640	nw64(TXC_CONTROL, val);
 4641	niu_unlock_parent(np, flags);
 4642}
 4643
 4644static void niu_txc_set_imask(struct niu *np, u64 imask)
 4645{
 4646	unsigned long flags;
 4647	u64 val;
 4648
 4649	niu_lock_parent(np, flags);
 4650	val = nr64(TXC_INT_MASK);
 4651	val &= ~TXC_INT_MASK_VAL(np->port);
 4652	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4653	niu_unlock_parent(np, flags);
 4654}
 4655
 4656static void niu_txc_port_dma_enable(struct niu *np, int on)
 4657{
 4658	u64 val = 0;
 4659
 4660	if (on) {
 4661		int i;
 4662
 4663		for (i = 0; i < np->num_tx_rings; i++)
 4664			val |= (1 << np->tx_rings[i].tx_channel);
 4665	}
 4666	nw64(TXC_PORT_DMA(np->port), val);
 4667}
 4668
 4669static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4670{
 4671	int err, channel = rp->tx_channel;
 4672	u64 val, ring_len;
 4673
 4674	err = niu_tx_channel_stop(np, channel);
 4675	if (err)
 4676		return err;
 4677
 4678	err = niu_tx_channel_reset(np, channel);
 4679	if (err)
 4680		return err;
 4681
 4682	err = niu_tx_channel_lpage_init(np, channel);
 4683	if (err)
 4684		return err;
 4685
 4686	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4687	nw64(TX_ENT_MSK(channel), 0);
 4688
 4689	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4690			      TX_RNG_CFIG_STADDR)) {
 4691		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4692			   channel, (unsigned long long)rp->descr_dma);
 4693		return -EINVAL;
 4694	}
 4695
 4696	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4697	 * blocks.  rp->pending is the number of TX descriptors in
 4698	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4699	 * to get the proper value the chip wants.
 4700	 */
 4701	ring_len = (rp->pending / 8);
 4702
 4703	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4704	       rp->descr_dma);
 4705	nw64(TX_RNG_CFIG(channel), val);
 4706
 4707	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4708	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4709		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4710			    channel, (unsigned long long)rp->mbox_dma);
 4711		return -EINVAL;
 4712	}
 4713	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4714	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4715
 4716	nw64(TX_CS(channel), 0);
 4717
 4718	rp->last_pkt_cnt = 0;
 4719
 4720	return 0;
 4721}
 4722
 4723static void niu_init_rdc_groups(struct niu *np)
 4724{
 4725	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4726	int i, first_table_num = tp->first_table_num;
 4727
 4728	for (i = 0; i < tp->num_tables; i++) {
 4729		struct rdc_table *tbl = &tp->tables[i];
 4730		int this_table = first_table_num + i;
 4731		int slot;
 4732
 4733		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4734			nw64(RDC_TBL(this_table, slot),
 4735			     tbl->rxdma_channel[slot]);
 4736	}
 4737
 4738	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4739}
 4740
 4741static void niu_init_drr_weight(struct niu *np)
 4742{
 4743	int type = phy_decode(np->parent->port_phy, np->port);
 4744	u64 val;
 4745
 4746	switch (type) {
 4747	case PORT_TYPE_10G:
 4748		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4749		break;
 4750
 4751	case PORT_TYPE_1G:
 4752	default:
 4753		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4754		break;
 4755	}
 4756	nw64(PT_DRR_WT(np->port), val);
 4757}
 4758
 4759static int niu_init_hostinfo(struct niu *np)
 4760{
 4761	struct niu_parent *parent = np->parent;
 4762	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4763	int i, err, num_alt = niu_num_alt_addr(np);
 4764	int first_rdc_table = tp->first_table_num;
 4765
 4766	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4767	if (err)
 4768		return err;
 4769
 4770	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4771	if (err)
 4772		return err;
 4773
 4774	for (i = 0; i < num_alt; i++) {
 4775		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4776		if (err)
 4777			return err;
 4778	}
 4779
 4780	return 0;
 4781}
 4782
 4783static int niu_rx_channel_reset(struct niu *np, int channel)
 4784{
 4785	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4786				      RXDMA_CFIG1_RST, 1000, 10,
 4787				      "RXDMA_CFIG1");
 4788}
 4789
 4790static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4791{
 4792	u64 val;
 4793
 4794	nw64(RX_LOG_MASK1(channel), 0);
 4795	nw64(RX_LOG_VAL1(channel), 0);
 4796	nw64(RX_LOG_MASK2(channel), 0);
 4797	nw64(RX_LOG_VAL2(channel), 0);
 4798	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4799	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4800	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4801
 4802	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4803	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4804	nw64(RX_LOG_PAGE_VLD(channel), val);
 4805
 4806	return 0;
 4807}
 4808
 4809static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4810{
 4811	u64 val;
 4812
 4813	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4814	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4815	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4816	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4817	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4818}
 4819
 4820static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4821{
 4822	u64 val = 0;
 4823
 4824	*ret = 0;
 4825	switch (rp->rbr_block_size) {
 4826	case 4 * 1024:
 4827		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4828		break;
 4829	case 8 * 1024:
 4830		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4831		break;
 4832	case 16 * 1024:
 4833		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4834		break;
 4835	case 32 * 1024:
 4836		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4837		break;
 4838	default:
 4839		return -EINVAL;
 4840	}
 4841	val |= RBR_CFIG_B_VLD2;
 4842	switch (rp->rbr_sizes[2]) {
 4843	case 2 * 1024:
 4844		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4845		break;
 4846	case 4 * 1024:
 4847		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4848		break;
 4849	case 8 * 1024:
 4850		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4851		break;
 4852	case 16 * 1024:
 4853		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4854		break;
 4855
 4856	default:
 4857		return -EINVAL;
 4858	}
 4859	val |= RBR_CFIG_B_VLD1;
 4860	switch (rp->rbr_sizes[1]) {
 4861	case 1 * 1024:
 4862		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4863		break;
 4864	case 2 * 1024:
 4865		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4866		break;
 4867	case 4 * 1024:
 4868		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4869		break;
 4870	case 8 * 1024:
 4871		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4872		break;
 4873
 4874	default:
 4875		return -EINVAL;
 4876	}
 4877	val |= RBR_CFIG_B_VLD0;
 4878	switch (rp->rbr_sizes[0]) {
 4879	case 256:
 4880		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4881		break;
 4882	case 512:
 4883		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4884		break;
 4885	case 1 * 1024:
 4886		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4887		break;
 4888	case 2 * 1024:
 4889		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4890		break;
 4891
 4892	default:
 4893		return -EINVAL;
 4894	}
 4895
 4896	*ret = val;
 4897	return 0;
 4898}
 4899
 4900static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4901{
 4902	u64 val = nr64(RXDMA_CFIG1(channel));
 4903	int limit;
 4904
 4905	if (on)
 4906		val |= RXDMA_CFIG1_EN;
 4907	else
 4908		val &= ~RXDMA_CFIG1_EN;
 4909	nw64(RXDMA_CFIG1(channel), val);
 4910
 4911	limit = 1000;
 4912	while (--limit > 0) {
 4913		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4914			break;
 4915		udelay(10);
 4916	}
 4917	if (limit <= 0)
 4918		return -ENODEV;
 4919	return 0;
 4920}
 4921
 4922static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4923{
 4924	int err, channel = rp->rx_channel;
 4925	u64 val;
 4926
 4927	err = niu_rx_channel_reset(np, channel);
 4928	if (err)
 4929		return err;
 4930
 4931	err = niu_rx_channel_lpage_init(np, channel);
 4932	if (err)
 4933		return err;
 4934
 4935	niu_rx_channel_wred_init(np, rp);
 4936
 4937	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4938	nw64(RX_DMA_CTL_STAT(channel),
 4939	     (RX_DMA_CTL_STAT_MEX |
 4940	      RX_DMA_CTL_STAT_RCRTHRES |
 4941	      RX_DMA_CTL_STAT_RCRTO |
 4942	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4943	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4944	nw64(RXDMA_CFIG2(channel),
 4945	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4946	      RXDMA_CFIG2_FULL_HDR));
 4947	nw64(RBR_CFIG_A(channel),
 4948	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4949	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4950	err = niu_compute_rbr_cfig_b(rp, &val);
 4951	if (err)
 4952		return err;
 4953	nw64(RBR_CFIG_B(channel), val);
 4954	nw64(RCRCFIG_A(channel),
 4955	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4956	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4957	nw64(RCRCFIG_B(channel),
 4958	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4959	     RCRCFIG_B_ENTOUT |
 4960	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4961
 4962	err = niu_enable_rx_channel(np, channel, 1);
 4963	if (err)
 4964		return err;
 4965
 4966	nw64(RBR_KICK(channel), rp->rbr_index);
 4967
 4968	val = nr64(RX_DMA_CTL_STAT(channel));
 4969	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4970	nw64(RX_DMA_CTL_STAT(channel), val);
 4971
 4972	return 0;
 4973}
 4974
 4975static int niu_init_rx_channels(struct niu *np)
 4976{
 4977	unsigned long flags;
 4978	u64 seed = jiffies_64;
 4979	int err, i;
 4980
 4981	niu_lock_parent(np, flags);
 4982	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 4983	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 4984	niu_unlock_parent(np, flags);
 4985
 4986	/* XXX RXDMA 32bit mode? XXX */
 4987
 4988	niu_init_rdc_groups(np);
 4989	niu_init_drr_weight(np);
 4990
 4991	err = niu_init_hostinfo(np);
 4992	if (err)
 4993		return err;
 4994
 4995	for (i = 0; i < np->num_rx_rings; i++) {
 4996		struct rx_ring_info *rp = &np->rx_rings[i];
 4997
 4998		err = niu_init_one_rx_channel(np, rp);
 4999		if (err)
 5000			return err;
 5001	}
 5002
 5003	return 0;
 5004}
 5005
 5006static int niu_set_ip_frag_rule(struct niu *np)
 5007{
 5008	struct niu_parent *parent = np->parent;
 5009	struct niu_classifier *cp = &np->clas;
 5010	struct niu_tcam_entry *tp;
 5011	int index, err;
 5012
 5013	index = cp->tcam_top;
 5014	tp = &parent->tcam[index];
 5015
 5016	/* Note that the noport bit is the same in both ipv4 and
 5017	 * ipv6 format TCAM entries.
 5018	 */
 5019	memset(tp, 0, sizeof(*tp));
 5020	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5021	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5022	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5023			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5024	err = tcam_write(np, index, tp->key, tp->key_mask);
 5025	if (err)
 5026		return err;
 5027	err = tcam_assoc_write(np, index, tp->assoc_data);
 5028	if (err)
 5029		return err;
 5030	tp->valid = 1;
 5031	cp->tcam_valid_entries++;
 5032
 5033	return 0;
 5034}
 5035
 5036static int niu_init_classifier_hw(struct niu *np)
 5037{
 5038	struct niu_parent *parent = np->parent;
 5039	struct niu_classifier *cp = &np->clas;
 5040	int i, err;
 5041
 5042	nw64(H1POLY, cp->h1_init);
 5043	nw64(H2POLY, cp->h2_init);
 5044
 5045	err = niu_init_hostinfo(np);
 5046	if (err)
 5047		return err;
 5048
 5049	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5050		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5051
 5052		vlan_tbl_write(np, i, np->port,
 5053			       vp->vlan_pref, vp->rdc_num);
 5054	}
 5055
 5056	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5057		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5058
 5059		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5060						ap->rdc_num, ap->mac_pref);
 5061		if (err)
 5062			return err;
 5063	}
 5064
 5065	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5066		int index = i - CLASS_CODE_USER_PROG1;
 5067
 5068		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5069		if (err)
 5070			return err;
 5071		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5072		if (err)
 5073			return err;
 5074	}
 5075
 5076	err = niu_set_ip_frag_rule(np);
 5077	if (err)
 5078		return err;
 5079
 5080	tcam_enable(np, 1);
 5081
 5082	return 0;
 5083}
 5084
 5085static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5086{
 5087	nw64(ZCP_RAM_DATA0, data[0]);
 5088	nw64(ZCP_RAM_DATA1, data[1]);
 5089	nw64(ZCP_RAM_DATA2, data[2]);
 5090	nw64(ZCP_RAM_DATA3, data[3]);
 5091	nw64(ZCP_RAM_DATA4, data[4]);
 5092	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5093	nw64(ZCP_RAM_ACC,
 5094	     (ZCP_RAM_ACC_WRITE |
 5095	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5096	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5097
 5098	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5099				   1000, 100);
 5100}
 5101
 5102static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5103{
 5104	int err;
 5105
 5106	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5107				  1000, 100);
 5108	if (err) {
 5109		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5110			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5111		return err;
 5112	}
 5113
 5114	nw64(ZCP_RAM_ACC,
 5115	     (ZCP_RAM_ACC_READ |
 5116	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5117	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5118
 5119	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5120				  1000, 100);
 5121	if (err) {
 5122		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5123			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5124		return err;
 5125	}
 5126
 5127	data[0] = nr64(ZCP_RAM_DATA0);
 5128	data[1] = nr64(ZCP_RAM_DATA1);
 5129	data[2] = nr64(ZCP_RAM_DATA2);
 5130	data[3] = nr64(ZCP_RAM_DATA3);
 5131	data[4] = nr64(ZCP_RAM_DATA4);
 5132
 5133	return 0;
 5134}
 5135
 5136static void niu_zcp_cfifo_reset(struct niu *np)
 5137{
 5138	u64 val = nr64(RESET_CFIFO);
 5139
 5140	val |= RESET_CFIFO_RST(np->port);
 5141	nw64(RESET_CFIFO, val);
 5142	udelay(10);
 5143
 5144	val &= ~RESET_CFIFO_RST(np->port);
 5145	nw64(RESET_CFIFO, val);
 5146}
 5147
 5148static int niu_init_zcp(struct niu *np)
 5149{
 5150	u64 data[5], rbuf[5];
 5151	int i, max, err;
 5152
 5153	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5154		if (np->port == 0 || np->port == 1)
 5155			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5156		else
 5157			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5158	} else
 5159		max = NIU_CFIFO_ENTRIES;
 5160
 5161	data[0] = 0;
 5162	data[1] = 0;
 5163	data[2] = 0;
 5164	data[3] = 0;
 5165	data[4] = 0;
 5166
 5167	for (i = 0; i < max; i++) {
 5168		err = niu_zcp_write(np, i, data);
 5169		if (err)
 5170			return err;
 5171		err = niu_zcp_read(np, i, rbuf);
 5172		if (err)
 5173			return err;
 5174	}
 5175
 5176	niu_zcp_cfifo_reset(np);
 5177	nw64(CFIFO_ECC(np->port), 0);
 5178	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5179	(void) nr64(ZCP_INT_STAT);
 5180	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5181
 5182	return 0;
 5183}
 5184
 5185static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5186{
 5187	u64 val = nr64_ipp(IPP_CFIG);
 5188
 5189	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5190	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5191	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5192	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5193	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5194	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5195	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5196	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5197}
 5198
 5199static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5200{
 5201	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5202	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5203	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5204	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5205	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5206	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5207}
 5208
 5209static int niu_ipp_reset(struct niu *np)
 5210{
 5211	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5212					  1000, 100, "IPP_CFIG");
 5213}
 5214
 5215static int niu_init_ipp(struct niu *np)
 5216{
 5217	u64 data[5], rbuf[5], val;
 5218	int i, max, err;
 5219
 5220	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5221		if (np->port == 0 || np->port == 1)
 5222			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5223		else
 5224			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5225	} else
 5226		max = NIU_DFIFO_ENTRIES;
 5227
 5228	data[0] = 0;
 5229	data[1] = 0;
 5230	data[2] = 0;
 5231	data[3] = 0;
 5232	data[4] = 0;
 5233
 5234	for (i = 0; i < max; i++) {
 5235		niu_ipp_write(np, i, data);
 5236		niu_ipp_read(np, i, rbuf);
 5237	}
 5238
 5239	(void) nr64_ipp(IPP_INT_STAT);
 5240	(void) nr64_ipp(IPP_INT_STAT);
 5241
 5242	err = niu_ipp_reset(np);
 5243	if (err)
 5244		return err;
 5245
 5246	(void) nr64_ipp(IPP_PKT_DIS);
 5247	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5248	(void) nr64_ipp(IPP_ECC);
 5249
 5250	(void) nr64_ipp(IPP_INT_STAT);
 5251
 5252	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5253
 5254	val = nr64_ipp(IPP_CFIG);
 5255	val &= ~IPP_CFIG_IP_MAX_PKT;
 5256	val |= (IPP_CFIG_IPP_ENABLE |
 5257		IPP_CFIG_DFIFO_ECC_EN |
 5258		IPP_CFIG_DROP_BAD_CRC |
 5259		IPP_CFIG_CKSUM_EN |
 5260		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5261	nw64_ipp(IPP_CFIG, val);
 5262
 5263	return 0;
 5264}
 5265
 5266static void niu_handle_led(struct niu *np, int status)
 5267{
 5268	u64 val;
 5269	val = nr64_mac(XMAC_CONFIG);
 5270
 5271	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5272	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5273		if (status) {
 5274			val |= XMAC_CONFIG_LED_POLARITY;
 5275			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5276		} else {
 5277			val |= XMAC_CONFIG_FORCE_LED_ON;
 5278			val &= ~XMAC_CONFIG_LED_POLARITY;
 5279		}
 5280	}
 5281
 5282	nw64_mac(XMAC_CONFIG, val);
 5283}
 5284
 5285static void niu_init_xif_xmac(struct niu *np)
 5286{
 5287	struct niu_link_config *lp = &np->link_config;
 5288	u64 val;
 5289
 5290	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5291		val = nr64(MIF_CONFIG);
 5292		val |= MIF_CONFIG_ATCA_GE;
 5293		nw64(MIF_CONFIG, val);
 5294	}
 5295
 5296	val = nr64_mac(XMAC_CONFIG);
 5297	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5298
 5299	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5300
 5301	if (lp->loopback_mode == LOOPBACK_MAC) {
 5302		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5303		val |= XMAC_CONFIG_LOOPBACK;
 5304	} else {
 5305		val &= ~XMAC_CONFIG_LOOPBACK;
 5306	}
 5307
 5308	if (np->flags & NIU_FLAGS_10G) {
 5309		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5310	} else {
 5311		val |= XMAC_CONFIG_LFS_DISABLE;
 5312		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5313		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5314			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5315		else
 5316			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5317	}
 5318
 5319	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5320
 5321	if (lp->active_speed == SPEED_100)
 5322		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5323	else
 5324		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5325
 5326	nw64_mac(XMAC_CONFIG, val);
 5327
 5328	val = nr64_mac(XMAC_CONFIG);
 5329	val &= ~XMAC_CONFIG_MODE_MASK;
 5330	if (np->flags & NIU_FLAGS_10G) {
 5331		val |= XMAC_CONFIG_MODE_XGMII;
 5332	} else {
 5333		if (lp->active_speed == SPEED_1000)
 5334			val |= XMAC_CONFIG_MODE_GMII;
 5335		else
 5336			val |= XMAC_CONFIG_MODE_MII;
 5337	}
 5338
 5339	nw64_mac(XMAC_CONFIG, val);
 5340}
 5341
 5342static void niu_init_xif_bmac(struct niu *np)
 5343{
 5344	struct niu_link_config *lp = &np->link_config;
 5345	u64 val;
 5346
 5347	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5348
 5349	if (lp->loopback_mode == LOOPBACK_MAC)
 5350		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5351	else
 5352		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5353
 5354	if (lp->active_speed == SPEED_1000)
 5355		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5356	else
 5357		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5358
 5359	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5360		 BMAC_XIF_CONFIG_LED_POLARITY);
 5361
 5362	if (!(np->flags & NIU_FLAGS_10G) &&
 5363	    !(np->flags & NIU_FLAGS_FIBER) &&
 5364	    lp->active_speed == SPEED_100)
 5365		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5366	else
 5367		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5368
 5369	nw64_mac(BMAC_XIF_CONFIG, val);
 5370}
 5371
 5372static void niu_init_xif(struct niu *np)
 5373{
 5374	if (np->flags & NIU_FLAGS_XMAC)
 5375		niu_init_xif_xmac(np);
 5376	else
 5377		niu_init_xif_bmac(np);
 5378}
 5379
 5380static void niu_pcs_mii_reset(struct niu *np)
 5381{
 5382	int limit = 1000;
 5383	u64 val = nr64_pcs(PCS_MII_CTL);
 5384	val |= PCS_MII_CTL_RST;
 5385	nw64_pcs(PCS_MII_CTL, val);
 5386	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5387		udelay(100);
 5388		val = nr64_pcs(PCS_MII_CTL);
 5389	}
 5390}
 5391
 5392static void niu_xpcs_reset(struct niu *np)
 5393{
 5394	int limit = 1000;
 5395	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5396	val |= XPCS_CONTROL1_RESET;
 5397	nw64_xpcs(XPCS_CONTROL1, val);
 5398	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5399		udelay(100);
 5400		val = nr64_xpcs(XPCS_CONTROL1);
 5401	}
 5402}
 5403
 5404static int niu_init_pcs(struct niu *np)
 5405{
 5406	struct niu_link_config *lp = &np->link_config;
 5407	u64 val;
 5408
 5409	switch (np->flags & (NIU_FLAGS_10G |
 5410			     NIU_FLAGS_FIBER |
 5411			     NIU_FLAGS_XCVR_SERDES)) {
 5412	case NIU_FLAGS_FIBER:
 5413		/* 1G fiber */
 5414		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5415		nw64_pcs(PCS_DPATH_MODE, 0);
 5416		niu_pcs_mii_reset(np);
 5417		break;
 5418
 5419	case NIU_FLAGS_10G:
 5420	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5421	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5422		/* 10G SERDES */
 5423		if (!(np->flags & NIU_FLAGS_XMAC))
 5424			return -EINVAL;
 5425
 5426		/* 10G copper or fiber */
 5427		val = nr64_mac(XMAC_CONFIG);
 5428		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5429		nw64_mac(XMAC_CONFIG, val);
 5430
 5431		niu_xpcs_reset(np);
 5432
 5433		val = nr64_xpcs(XPCS_CONTROL1);
 5434		if (lp->loopback_mode == LOOPBACK_PHY)
 5435			val |= XPCS_CONTROL1_LOOPBACK;
 5436		else
 5437			val &= ~XPCS_CONTROL1_LOOPBACK;
 5438		nw64_xpcs(XPCS_CONTROL1, val);
 5439
 5440		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5441		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5442		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5443		break;
 5444
 5445
 5446	case NIU_FLAGS_XCVR_SERDES:
 5447		/* 1G SERDES */
 5448		niu_pcs_mii_reset(np);
 5449		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5450		nw64_pcs(PCS_DPATH_MODE, 0);
 5451		break;
 5452
 5453	case 0:
 5454		/* 1G copper */
 5455	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5456		/* 1G RGMII FIBER */
 5457		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5458		niu_pcs_mii_reset(np);
 5459		break;
 5460
 5461	default:
 5462		return -EINVAL;
 5463	}
 5464
 5465	return 0;
 5466}
 5467
 5468static int niu_reset_tx_xmac(struct niu *np)
 5469{
 5470	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5471					  (XTXMAC_SW_RST_REG_RS |
 5472					   XTXMAC_SW_RST_SOFT_RST),
 5473					  1000, 100, "XTXMAC_SW_RST");
 5474}
 5475
 5476static int niu_reset_tx_bmac(struct niu *np)
 5477{
 5478	int limit;
 5479
 5480	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5481	limit = 1000;
 5482	while (--limit >= 0) {
 5483		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5484			break;
 5485		udelay(100);
 5486	}
 5487	if (limit < 0) {
 5488		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5489			np->port,
 5490			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5491		return -ENODEV;
 5492	}
 5493
 5494	return 0;
 5495}
 5496
 5497static int niu_reset_tx_mac(struct niu *np)
 5498{
 5499	if (np->flags & NIU_FLAGS_XMAC)
 5500		return niu_reset_tx_xmac(np);
 5501	else
 5502		return niu_reset_tx_bmac(np);
 5503}
 5504
 5505static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5506{
 5507	u64 val;
 5508
 5509	val = nr64_mac(XMAC_MIN);
 5510	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5511		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5512	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5513	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5514	nw64_mac(XMAC_MIN, val);
 5515
 5516	nw64_mac(XMAC_MAX, max);
 5517
 5518	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5519
 5520	val = nr64_mac(XMAC_IPG);
 5521	if (np->flags & NIU_FLAGS_10G) {
 5522		val &= ~XMAC_IPG_IPG_XGMII;
 5523		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5524	} else {
 5525		val &= ~XMAC_IPG_IPG_MII_GMII;
 5526		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5527	}
 5528	nw64_mac(XMAC_IPG, val);
 5529
 5530	val = nr64_mac(XMAC_CONFIG);
 5531	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5532		 XMAC_CONFIG_STRETCH_MODE |
 5533		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5534		 XMAC_CONFIG_TX_ENABLE);
 5535	nw64_mac(XMAC_CONFIG, val);
 5536
 5537	nw64_mac(TXMAC_FRM_CNT, 0);
 5538	nw64_mac(TXMAC_BYTE_CNT, 0);
 5539}
 5540
 5541static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5542{
 5543	u64 val;
 5544
 5545	nw64_mac(BMAC_MIN_FRAME, min);
 5546	nw64_mac(BMAC_MAX_FRAME, max);
 5547
 5548	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5549	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5550	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5551
 5552	val = nr64_mac(BTXMAC_CONFIG);
 5553	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5554		 BTXMAC_CONFIG_ENABLE);
 5555	nw64_mac(BTXMAC_CONFIG, val);
 5556}
 5557
 5558static void niu_init_tx_mac(struct niu *np)
 5559{
 5560	u64 min, max;
 5561
 5562	min = 64;
 5563	if (np->dev->mtu > ETH_DATA_LEN)
 5564		max = 9216;
 5565	else
 5566		max = 1522;
 5567
 5568	/* The XMAC_MIN register only accepts values for TX min which
 5569	 * have the low 3 bits cleared.
 5570	 */
 5571	BUG_ON(min & 0x7);
 5572
 5573	if (np->flags & NIU_FLAGS_XMAC)
 5574		niu_init_tx_xmac(np, min, max);
 5575	else
 5576		niu_init_tx_bmac(np, min, max);
 5577}
 5578
 5579static int niu_reset_rx_xmac(struct niu *np)
 5580{
 5581	int limit;
 5582
 5583	nw64_mac(XRXMAC_SW_RST,
 5584		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5585	limit = 1000;
 5586	while (--limit >= 0) {
 5587		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5588						 XRXMAC_SW_RST_SOFT_RST)))
 5589			break;
 5590		udelay(100);
 5591	}
 5592	if (limit < 0) {
 5593		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5594			np->port,
 5595			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5596		return -ENODEV;
 5597	}
 5598
 5599	return 0;
 5600}
 5601
 5602static int niu_reset_rx_bmac(struct niu *np)
 5603{
 5604	int limit;
 5605
 5606	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5607	limit = 1000;
 5608	while (--limit >= 0) {
 5609		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5610			break;
 5611		udelay(100);
 5612	}
 5613	if (limit < 0) {
 5614		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5615			np->port,
 5616			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5617		return -ENODEV;
 5618	}
 5619
 5620	return 0;
 5621}
 5622
 5623static int niu_reset_rx_mac(struct niu *np)
 5624{
 5625	if (np->flags & NIU_FLAGS_XMAC)
 5626		return niu_reset_rx_xmac(np);
 5627	else
 5628		return niu_reset_rx_bmac(np);
 5629}
 5630
 5631static void niu_init_rx_xmac(struct niu *np)
 5632{
 5633	struct niu_parent *parent = np->parent;
 5634	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5635	int first_rdc_table = tp->first_table_num;
 5636	unsigned long i;
 5637	u64 val;
 5638
 5639	nw64_mac(XMAC_ADD_FILT0, 0);
 5640	nw64_mac(XMAC_ADD_FILT1, 0);
 5641	nw64_mac(XMAC_ADD_FILT2, 0);
 5642	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5643	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5644	for (i = 0; i < MAC_NUM_HASH; i++)
 5645		nw64_mac(XMAC_HASH_TBL(i), 0);
 5646	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5647	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5648	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5649
 5650	val = nr64_mac(XMAC_CONFIG);
 5651	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5652		 XMAC_CONFIG_PROMISCUOUS |
 5653		 XMAC_CONFIG_PROMISC_GROUP |
 5654		 XMAC_CONFIG_ERR_CHK_DIS |
 5655		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5656		 XMAC_CONFIG_RESERVED_MULTICAST |
 5657		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5658		 XMAC_CONFIG_ADDR_FILTER_EN |
 5659		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5660		 XMAC_CONFIG_STRIP_CRC |
 5661		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5662		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5663	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5664	nw64_mac(XMAC_CONFIG, val);
 5665
 5666	nw64_mac(RXMAC_BT_CNT, 0);
 5667	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5668	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5669	nw64_mac(RXMAC_FRAG_CNT, 0);
 5670	nw64_mac(RXMAC_HIST_CNT1, 0);
 5671	nw64_mac(RXMAC_HIST_CNT2, 0);
 5672	nw64_mac(RXMAC_HIST_CNT3, 0);
 5673	nw64_mac(RXMAC_HIST_CNT4, 0);
 5674	nw64_mac(RXMAC_HIST_CNT5, 0);
 5675	nw64_mac(RXMAC_HIST_CNT6, 0);
 5676	nw64_mac(RXMAC_HIST_CNT7, 0);
 5677	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5678	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5679	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5680	nw64_mac(LINK_FAULT_CNT, 0);
 5681}
 5682
 5683static void niu_init_rx_bmac(struct niu *np)
 5684{
 5685	struct niu_parent *parent = np->parent;
 5686	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5687	int first_rdc_table = tp->first_table_num;
 5688	unsigned long i;
 5689	u64 val;
 5690
 5691	nw64_mac(BMAC_ADD_FILT0, 0);
 5692	nw64_mac(BMAC_ADD_FILT1, 0);
 5693	nw64_mac(BMAC_ADD_FILT2, 0);
 5694	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5695	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5696	for (i = 0; i < MAC_NUM_HASH; i++)
 5697		nw64_mac(BMAC_HASH_TBL(i), 0);
 5698	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5699	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5700	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5701
 5702	val = nr64_mac(BRXMAC_CONFIG);
 5703	val &= ~(BRXMAC_CONFIG_ENABLE |
 5704		 BRXMAC_CONFIG_STRIP_PAD |
 5705		 BRXMAC_CONFIG_STRIP_FCS |
 5706		 BRXMAC_CONFIG_PROMISC |
 5707		 BRXMAC_CONFIG_PROMISC_GRP |
 5708		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5709		 BRXMAC_CONFIG_DISCARD_DIS);
 5710	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5711	nw64_mac(BRXMAC_CONFIG, val);
 5712
 5713	val = nr64_mac(BMAC_ADDR_CMPEN);
 5714	val |= BMAC_ADDR_CMPEN_EN0;
 5715	nw64_mac(BMAC_ADDR_CMPEN, val);
 5716}
 5717
 5718static void niu_init_rx_mac(struct niu *np)
 5719{
 5720	niu_set_primary_mac(np, np->dev->dev_addr);
 5721
 5722	if (np->flags & NIU_FLAGS_XMAC)
 5723		niu_init_rx_xmac(np);
 5724	else
 5725		niu_init_rx_bmac(np);
 5726}
 5727
 5728static void niu_enable_tx_xmac(struct niu *np, int on)
 5729{
 5730	u64 val = nr64_mac(XMAC_CONFIG);
 5731
 5732	if (on)
 5733		val |= XMAC_CONFIG_TX_ENABLE;
 5734	else
 5735		val &= ~XMAC_CONFIG_TX_ENABLE;
 5736	nw64_mac(XMAC_CONFIG, val);
 5737}
 5738
 5739static void niu_enable_tx_bmac(struct niu *np, int on)
 5740{
 5741	u64 val = nr64_mac(BTXMAC_CONFIG);
 5742
 5743	if (on)
 5744		val |= BTXMAC_CONFIG_ENABLE;
 5745	else
 5746		val &= ~BTXMAC_CONFIG_ENABLE;
 5747	nw64_mac(BTXMAC_CONFIG, val);
 5748}
 5749
 5750static void niu_enable_tx_mac(struct niu *np, int on)
 5751{
 5752	if (np->flags & NIU_FLAGS_XMAC)
 5753		niu_enable_tx_xmac(np, on);
 5754	else
 5755		niu_enable_tx_bmac(np, on);
 5756}
 5757
 5758static void niu_enable_rx_xmac(struct niu *np, int on)
 5759{
 5760	u64 val = nr64_mac(XMAC_CONFIG);
 5761
 5762	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5763		 XMAC_CONFIG_PROMISCUOUS);
 5764
 5765	if (np->flags & NIU_FLAGS_MCAST)
 5766		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5767	if (np->flags & NIU_FLAGS_PROMISC)
 5768		val |= XMAC_CONFIG_PROMISCUOUS;
 5769
 5770	if (on)
 5771		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5772	else
 5773		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5774	nw64_mac(XMAC_CONFIG, val);
 5775}
 5776
 5777static void niu_enable_rx_bmac(struct niu *np, int on)
 5778{
 5779	u64 val = nr64_mac(BRXMAC_CONFIG);
 5780
 5781	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5782		 BRXMAC_CONFIG_PROMISC);
 5783
 5784	if (np->flags & NIU_FLAGS_MCAST)
 5785		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5786	if (np->flags & NIU_FLAGS_PROMISC)
 5787		val |= BRXMAC_CONFIG_PROMISC;
 5788
 5789	if (on)
 5790		val |= BRXMAC_CONFIG_ENABLE;
 5791	else
 5792		val &= ~BRXMAC_CONFIG_ENABLE;
 5793	nw64_mac(BRXMAC_CONFIG, val);
 5794}
 5795
 5796static void niu_enable_rx_mac(struct niu *np, int on)
 5797{
 5798	if (np->flags & NIU_FLAGS_XMAC)
 5799		niu_enable_rx_xmac(np, on);
 5800	else
 5801		niu_enable_rx_bmac(np, on);
 5802}
 5803
 5804static int niu_init_mac(struct niu *np)
 5805{
 5806	int err;
 5807
 5808	niu_init_xif(np);
 5809	err = niu_init_pcs(np);
 5810	if (err)
 5811		return err;
 5812
 5813	err = niu_reset_tx_mac(np);
 5814	if (err)
 5815		return err;
 5816	niu_init_tx_mac(np);
 5817	err = niu_reset_rx_mac(np);
 5818	if (err)
 5819		return err;
 5820	niu_init_rx_mac(np);
 5821
 5822	/* This looks hookey but the RX MAC reset we just did will
 5823	 * undo some of the state we setup in niu_init_tx_mac() so we
 5824	 * have to call it again.  In particular, the RX MAC reset will
 5825	 * set the XMAC_MAX register back to it's default value.
 5826	 */
 5827	niu_init_tx_mac(np);
 5828	niu_enable_tx_mac(np, 1);
 5829
 5830	niu_enable_rx_mac(np, 1);
 5831
 5832	return 0;
 5833}
 5834
 5835static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5836{
 5837	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5838}
 5839
 5840static void niu_stop_tx_channels(struct niu *np)
 5841{
 5842	int i;
 5843
 5844	for (i = 0; i < np->num_tx_rings; i++) {
 5845		struct tx_ring_info *rp = &np->tx_rings[i];
 5846
 5847		niu_stop_one_tx_channel(np, rp);
 5848	}
 5849}
 5850
 5851static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5852{
 5853	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5854}
 5855
 5856static void niu_reset_tx_channels(struct niu *np)
 5857{
 5858	int i;
 5859
 5860	for (i = 0; i < np->num_tx_rings; i++) {
 5861		struct tx_ring_info *rp = &np->tx_rings[i];
 5862
 5863		niu_reset_one_tx_channel(np, rp);
 5864	}
 5865}
 5866
 5867static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5868{
 5869	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5870}
 5871
 5872static void niu_stop_rx_channels(struct niu *np)
 5873{
 5874	int i;
 5875
 5876	for (i = 0; i < np->num_rx_rings; i++) {
 5877		struct rx_ring_info *rp = &np->rx_rings[i];
 5878
 5879		niu_stop_one_rx_channel(np, rp);
 5880	}
 5881}
 5882
 5883static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5884{
 5885	int channel = rp->rx_channel;
 5886
 5887	(void) niu_rx_channel_reset(np, channel);
 5888	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5889	nw64(RX_DMA_CTL_STAT(channel), 0);
 5890	(void) niu_enable_rx_channel(np, channel, 0);
 5891}
 5892
 5893static void niu_reset_rx_channels(struct niu *np)
 5894{
 5895	int i;
 5896
 5897	for (i = 0; i < np->num_rx_rings; i++) {
 5898		struct rx_ring_info *rp = &np->rx_rings[i];
 5899
 5900		niu_reset_one_rx_channel(np, rp);
 5901	}
 5902}
 5903
 5904static void niu_disable_ipp(struct niu *np)
 5905{
 5906	u64 rd, wr, val;
 5907	int limit;
 5908
 5909	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5910	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5911	limit = 100;
 5912	while (--limit >= 0 && (rd != wr)) {
 5913		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5914		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5915	}
 5916	if (limit < 0 &&
 5917	    (rd != 0 && wr != 1)) {
 5918		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5919			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5920			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5921	}
 5922
 5923	val = nr64_ipp(IPP_CFIG);
 5924	val &= ~(IPP_CFIG_IPP_ENABLE |
 5925		 IPP_CFIG_DFIFO_ECC_EN |
 5926		 IPP_CFIG_DROP_BAD_CRC |
 5927		 IPP_CFIG_CKSUM_EN);
 5928	nw64_ipp(IPP_CFIG, val);
 5929
 5930	(void) niu_ipp_reset(np);
 5931}
 5932
 5933static int niu_init_hw(struct niu *np)
 5934{
 5935	int i, err;
 5936
 5937	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5938	niu_txc_enable_port(np, 1);
 5939	niu_txc_port_dma_enable(np, 1);
 5940	niu_txc_set_imask(np, 0);
 5941
 5942	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5943	for (i = 0; i < np->num_tx_rings; i++) {
 5944		struct tx_ring_info *rp = &np->tx_rings[i];
 5945
 5946		err = niu_init_one_tx_channel(np, rp);
 5947		if (err)
 5948			return err;
 5949	}
 5950
 5951	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5952	err = niu_init_rx_channels(np);
 5953	if (err)
 5954		goto out_uninit_tx_channels;
 5955
 5956	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5957	err = niu_init_classifier_hw(np);
 5958	if (err)
 5959		goto out_uninit_rx_channels;
 5960
 5961	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5962	err = niu_init_zcp(np);
 5963	if (err)
 5964		goto out_uninit_rx_channels;
 5965
 5966	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5967	err = niu_init_ipp(np);
 5968	if (err)
 5969		goto out_uninit_rx_channels;
 5970
 5971	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5972	err = niu_init_mac(np);
 5973	if (err)
 5974		goto out_uninit_ipp;
 5975
 5976	return 0;
 5977
 5978out_uninit_ipp:
 5979	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5980	niu_disable_ipp(np);
 5981
 5982out_uninit_rx_channels:
 5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 5984	niu_stop_rx_channels(np);
 5985	niu_reset_rx_channels(np);
 5986
 5987out_uninit_tx_channels:
 5988	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 5989	niu_stop_tx_channels(np);
 5990	niu_reset_tx_channels(np);
 5991
 5992	return err;
 5993}
 5994
 5995static void niu_stop_hw(struct niu *np)
 5996{
 5997	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 5998	niu_enable_interrupts(np, 0);
 5999
 6000	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 6001	niu_enable_rx_mac(np, 0);
 6002
 6003	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 6004	niu_disable_ipp(np);
 6005
 6006	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 6007	niu_stop_tx_channels(np);
 6008
 6009	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 6010	niu_stop_rx_channels(np);
 6011
 6012	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 6013	niu_reset_tx_channels(np);
 6014
 6015	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6016	niu_reset_rx_channels(np);
 6017}
 6018
 6019static void niu_set_irq_name(struct niu *np)
 6020{
 6021	int port = np->port;
 6022	int i, j = 1;
 6023
 6024	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6025
 6026	if (port == 0) {
 6027		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6028		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6029		j = 3;
 6030	}
 6031
 6032	for (i = 0; i < np->num_ldg - j; i++) {
 6033		if (i < np->num_rx_rings)
 6034			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6035				np->dev->name, i);
 6036		else if (i < np->num_tx_rings + np->num_rx_rings)
 6037			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6038				i - np->num_rx_rings);
 6039	}
 6040}
 6041
 6042static int niu_request_irq(struct niu *np)
 6043{
 6044	int i, j, err;
 6045
 6046	niu_set_irq_name(np);
 6047
 6048	err = 0;
 6049	for (i = 0; i < np->num_ldg; i++) {
 6050		struct niu_ldg *lp = &np->ldg[i];
 6051
 6052		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6053				  np->irq_name[i], lp);
 6054		if (err)
 6055			goto out_free_irqs;
 6056
 6057	}
 6058
 6059	return 0;
 6060
 6061out_free_irqs:
 6062	for (j = 0; j < i; j++) {
 6063		struct niu_ldg *lp = &np->ldg[j];
 6064
 6065		free_irq(lp->irq, lp);
 6066	}
 6067	return err;
 6068}
 6069
 6070static void niu_free_irq(struct niu *np)
 6071{
 6072	int i;
 6073
 6074	for (i = 0; i < np->num_ldg; i++) {
 6075		struct niu_ldg *lp = &np->ldg[i];
 6076
 6077		free_irq(lp->irq, lp);
 6078	}
 6079}
 6080
 6081static void niu_enable_napi(struct niu *np)
 6082{
 6083	int i;
 6084
 6085	for (i = 0; i < np->num_ldg; i++)
 6086		napi_enable(&np->ldg[i].napi);
 6087}
 6088
 6089static void niu_disable_napi(struct niu *np)
 6090{
 6091	int i;
 6092
 6093	for (i = 0; i < np->num_ldg; i++)
 6094		napi_disable(&np->ldg[i].napi);
 6095}
 6096
 6097static int niu_open(struct net_device *dev)
 6098{
 6099	struct niu *np = netdev_priv(dev);
 6100	int err;
 6101
 6102	netif_carrier_off(dev);
 6103
 6104	err = niu_alloc_channels(np);
 6105	if (err)
 6106		goto out_err;
 6107
 6108	err = niu_enable_interrupts(np, 0);
 6109	if (err)
 6110		goto out_free_channels;
 6111
 6112	err = niu_request_irq(np);
 6113	if (err)
 6114		goto out_free_channels;
 6115
 6116	niu_enable_napi(np);
 6117
 6118	spin_lock_irq(&np->lock);
 6119
 6120	err = niu_init_hw(np);
 6121	if (!err) {
 6122		init_timer(&np->timer);
 6123		np->timer.expires = jiffies + HZ;
 6124		np->timer.data = (unsigned long) np;
 6125		np->timer.function = niu_timer;
 6126
 6127		err = niu_enable_interrupts(np, 1);
 6128		if (err)
 6129			niu_stop_hw(np);
 6130	}
 6131
 6132	spin_unlock_irq(&np->lock);
 6133
 6134	if (err) {
 6135		niu_disable_napi(np);
 6136		goto out_free_irq;
 6137	}
 6138
 6139	netif_tx_start_all_queues(dev);
 6140
 6141	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6142		netif_carrier_on(dev);
 6143
 6144	add_timer(&np->timer);
 6145
 6146	return 0;
 6147
 6148out_free_irq:
 6149	niu_free_irq(np);
 6150
 6151out_free_channels:
 6152	niu_free_channels(np);
 6153
 6154out_err:
 6155	return err;
 6156}
 6157
 6158static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6159{
 6160	cancel_work_sync(&np->reset_task);
 6161
 6162	niu_disable_napi(np);
 6163	netif_tx_stop_all_queues(dev);
 6164
 6165	del_timer_sync(&np->timer);
 6166
 6167	spin_lock_irq(&np->lock);
 6168
 6169	niu_stop_hw(np);
 6170
 6171	spin_unlock_irq(&np->lock);
 6172}
 6173
 6174static int niu_close(struct net_device *dev)
 6175{
 6176	struct niu *np = netdev_priv(dev);
 6177
 6178	niu_full_shutdown(np, dev);
 6179
 6180	niu_free_irq(np);
 6181
 6182	niu_free_channels(np);
 6183
 6184	niu_handle_led(np, 0);
 6185
 6186	return 0;
 6187}
 6188
 6189static void niu_sync_xmac_stats(struct niu *np)
 6190{
 6191	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6192
 6193	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6194	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6195
 6196	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6197	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6198	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6199	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6200	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6201	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6202	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6203	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6204	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6205	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6206	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6207	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6208	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6209	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6210	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6211	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6212}
 6213
 6214static void niu_sync_bmac_stats(struct niu *np)
 6215{
 6216	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6217
 6218	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6219	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6220
 6221	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6222	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6223	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6224	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6225}
 6226
 6227static void niu_sync_mac_stats(struct niu *np)
 6228{
 6229	if (np->flags & NIU_FLAGS_XMAC)
 6230		niu_sync_xmac_stats(np);
 6231	else
 6232		niu_sync_bmac_stats(np);
 6233}
 6234
 6235static void niu_get_rx_stats(struct niu *np,
 6236			     struct rtnl_link_stats64 *stats)
 6237{
 6238	u64 pkts, dropped, errors, bytes;
 6239	struct rx_ring_info *rx_rings;
 6240	int i;
 6241
 6242	pkts = dropped = errors = bytes = 0;
 6243
 6244	rx_rings = ACCESS_ONCE(np->rx_rings);
 6245	if (!rx_rings)
 6246		goto no_rings;
 6247
 6248	for (i = 0; i < np->num_rx_rings; i++) {
 6249		struct rx_ring_info *rp = &rx_rings[i];
 6250
 6251		niu_sync_rx_discard_stats(np, rp, 0);
 6252
 6253		pkts += rp->rx_packets;
 6254		bytes += rp->rx_bytes;
 6255		dropped += rp->rx_dropped;
 6256		errors += rp->rx_errors;
 6257	}
 6258
 6259no_rings:
 6260	stats->rx_packets = pkts;
 6261	stats->rx_bytes = bytes;
 6262	stats->rx_dropped = dropped;
 6263	stats->rx_errors = errors;
 6264}
 6265
 6266static void niu_get_tx_stats(struct niu *np,
 6267			     struct rtnl_link_stats64 *stats)
 6268{
 6269	u64 pkts, errors, bytes;
 6270	struct tx_ring_info *tx_rings;
 6271	int i;
 6272
 6273	pkts = errors = bytes = 0;
 6274
 6275	tx_rings = ACCESS_ONCE(np->tx_rings);
 6276	if (!tx_rings)
 6277		goto no_rings;
 6278
 6279	for (i = 0; i < np->num_tx_rings; i++) {
 6280		struct tx_ring_info *rp = &tx_rings[i];
 6281
 6282		pkts += rp->tx_packets;
 6283		bytes += rp->tx_bytes;
 6284		errors += rp->tx_errors;
 6285	}
 6286
 6287no_rings:
 6288	stats->tx_packets = pkts;
 6289	stats->tx_bytes = bytes;
 6290	stats->tx_errors = errors;
 6291}
 6292
 6293static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
 6294					       struct rtnl_link_stats64 *stats)
 6295{
 6296	struct niu *np = netdev_priv(dev);
 6297
 6298	if (netif_running(dev)) {
 6299		niu_get_rx_stats(np, stats);
 6300		niu_get_tx_stats(np, stats);
 6301	}
 6302
 6303	return stats;
 6304}
 6305
 6306static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6307{
 6308	int i;
 6309
 6310	for (i = 0; i < 16; i++)
 6311		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6312}
 6313
 6314static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6315{
 6316	int i;
 6317
 6318	for (i = 0; i < 16; i++)
 6319		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6320}
 6321
 6322static void niu_load_hash(struct niu *np, u16 *hash)
 6323{
 6324	if (np->flags & NIU_FLAGS_XMAC)
 6325		niu_load_hash_xmac(np, hash);
 6326	else
 6327		niu_load_hash_bmac(np, hash);
 6328}
 6329
 6330static void niu_set_rx_mode(struct net_device *dev)
 6331{
 6332	struct niu *np = netdev_priv(dev);
 6333	int i, alt_cnt, err;
 6334	struct netdev_hw_addr *ha;
 6335	unsigned long flags;
 6336	u16 hash[16] = { 0, };
 6337
 6338	spin_lock_irqsave(&np->lock, flags);
 6339	niu_enable_rx_mac(np, 0);
 6340
 6341	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6342	if (dev->flags & IFF_PROMISC)
 6343		np->flags |= NIU_FLAGS_PROMISC;
 6344	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6345		np->flags |= NIU_FLAGS_MCAST;
 6346
 6347	alt_cnt = netdev_uc_count(dev);
 6348	if (alt_cnt > niu_num_alt_addr(np)) {
 6349		alt_cnt = 0;
 6350		np->flags |= NIU_FLAGS_PROMISC;
 6351	}
 6352
 6353	if (alt_cnt) {
 6354		int index = 0;
 6355
 6356		netdev_for_each_uc_addr(ha, dev) {
 6357			err = niu_set_alt_mac(np, index, ha->addr);
 6358			if (err)
 6359				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6360					    err, index);
 6361			err = niu_enable_alt_mac(np, index, 1);
 6362			if (err)
 6363				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6364					    err, index);
 6365
 6366			index++;
 6367		}
 6368	} else {
 6369		int alt_start;
 6370		if (np->flags & NIU_FLAGS_XMAC)
 6371			alt_start = 0;
 6372		else
 6373			alt_start = 1;
 6374		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6375			err = niu_enable_alt_mac(np, i, 0);
 6376			if (err)
 6377				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6378					    err, i);
 6379		}
 6380	}
 6381	if (dev->flags & IFF_ALLMULTI) {
 6382		for (i = 0; i < 16; i++)
 6383			hash[i] = 0xffff;
 6384	} else if (!netdev_mc_empty(dev)) {
 6385		netdev_for_each_mc_addr(ha, dev) {
 6386			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6387
 6388			crc >>= 24;
 6389			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6390		}
 6391	}
 6392
 6393	if (np->flags & NIU_FLAGS_MCAST)
 6394		niu_load_hash(np, hash);
 6395
 6396	niu_enable_rx_mac(np, 1);
 6397	spin_unlock_irqrestore(&np->lock, flags);
 6398}
 6399
 6400static int niu_set_mac_addr(struct net_device *dev, void *p)
 6401{
 6402	struct niu *np = netdev_priv(dev);
 6403	struct sockaddr *addr = p;
 6404	unsigned long flags;
 6405
 6406	if (!is_valid_ether_addr(addr->sa_data))
 6407		return -EADDRNOTAVAIL;
 6408
 6409	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6410
 6411	if (!netif_running(dev))
 6412		return 0;
 6413
 6414	spin_lock_irqsave(&np->lock, flags);
 6415	niu_enable_rx_mac(np, 0);
 6416	niu_set_primary_mac(np, dev->dev_addr);
 6417	niu_enable_rx_mac(np, 1);
 6418	spin_unlock_irqrestore(&np->lock, flags);
 6419
 6420	return 0;
 6421}
 6422
 6423static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6424{
 6425	return -EOPNOTSUPP;
 6426}
 6427
 6428static void niu_netif_stop(struct niu *np)
 6429{
 6430	np->dev->trans_start = jiffies;	/* prevent tx timeout */
 6431
 6432	niu_disable_napi(np);
 6433
 6434	netif_tx_disable(np->dev);
 6435}
 6436
 6437static void niu_netif_start(struct niu *np)
 6438{
 6439	/* NOTE: unconditional netif_wake_queue is only appropriate
 6440	 * so long as all callers are assured to have free tx slots
 6441	 * (such as after niu_init_hw).
 6442	 */
 6443	netif_tx_wake_all_queues(np->dev);
 6444
 6445	niu_enable_napi(np);
 6446
 6447	niu_enable_interrupts(np, 1);
 6448}
 6449
 6450static void niu_reset_buffers(struct niu *np)
 6451{
 6452	int i, j, k, err;
 6453
 6454	if (np->rx_rings) {
 6455		for (i = 0; i < np->num_rx_rings; i++) {
 6456			struct rx_ring_info *rp = &np->rx_rings[i];
 6457
 6458			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6459				struct page *page;
 6460
 6461				page = rp->rxhash[j];
 6462				while (page) {
 6463					struct page *next =
 6464						(struct page *) page->mapping;
 6465					u64 base = page->index;
 6466					base = base >> RBR_DESCR_ADDR_SHIFT;
 6467					rp->rbr[k++] = cpu_to_le32(base);
 6468					page = next;
 6469				}
 6470			}
 6471			for (; k < MAX_RBR_RING_SIZE; k++) {
 6472				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6473				if (unlikely(err))
 6474					break;
 6475			}
 6476
 6477			rp->rbr_index = rp->rbr_table_size - 1;
 6478			rp->rcr_index = 0;
 6479			rp->rbr_pending = 0;
 6480			rp->rbr_refill_pending = 0;
 6481		}
 6482	}
 6483	if (np->tx_rings) {
 6484		for (i = 0; i < np->num_tx_rings; i++) {
 6485			struct tx_ring_info *rp = &np->tx_rings[i];
 6486
 6487			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6488				if (rp->tx_buffs[j].skb)
 6489					(void) release_tx_packet(np, rp, j);
 6490			}
 6491
 6492			rp->pending = MAX_TX_RING_SIZE;
 6493			rp->prod = 0;
 6494			rp->cons = 0;
 6495			rp->wrap_bit = 0;
 6496		}
 6497	}
 6498}
 6499
 6500static void niu_reset_task(struct work_struct *work)
 6501{
 6502	struct niu *np = container_of(work, struct niu, reset_task);
 6503	unsigned long flags;
 6504	int err;
 6505
 6506	spin_lock_irqsave(&np->lock, flags);
 6507	if (!netif_running(np->dev)) {
 6508		spin_unlock_irqrestore(&np->lock, flags);
 6509		return;
 6510	}
 6511
 6512	spin_unlock_irqrestore(&np->lock, flags);
 6513
 6514	del_timer_sync(&np->timer);
 6515
 6516	niu_netif_stop(np);
 6517
 6518	spin_lock_irqsave(&np->lock, flags);
 6519
 6520	niu_stop_hw(np);
 6521
 6522	spin_unlock_irqrestore(&np->lock, flags);
 6523
 6524	niu_reset_buffers(np);
 6525
 6526	spin_lock_irqsave(&np->lock, flags);
 6527
 6528	err = niu_init_hw(np);
 6529	if (!err) {
 6530		np->timer.expires = jiffies + HZ;
 6531		add_timer(&np->timer);
 6532		niu_netif_start(np);
 6533	}
 6534
 6535	spin_unlock_irqrestore(&np->lock, flags);
 6536}
 6537
 6538static void niu_tx_timeout(struct net_device *dev)
 6539{
 6540	struct niu *np = netdev_priv(dev);
 6541
 6542	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6543		dev->name);
 6544
 6545	schedule_work(&np->reset_task);
 6546}
 6547
 6548static void niu_set_txd(struct tx_ring_info *rp, int index,
 6549			u64 mapping, u64 len, u64 mark,
 6550			u64 n_frags)
 6551{
 6552	__le64 *desc = &rp->descr[index];
 6553
 6554	*desc = cpu_to_le64(mark |
 6555			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6556			    (len << TX_DESC_TR_LEN_SHIFT) |
 6557			    (mapping & TX_DESC_SAD));
 6558}
 6559
 6560static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6561				u64 pad_bytes, u64 len)
 6562{
 6563	u16 eth_proto, eth_proto_inner;
 6564	u64 csum_bits, l3off, ihl, ret;
 6565	u8 ip_proto;
 6566	int ipv6;
 6567
 6568	eth_proto = be16_to_cpu(ehdr->h_proto);
 6569	eth_proto_inner = eth_proto;
 6570	if (eth_proto == ETH_P_8021Q) {
 6571		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6572		__be16 val = vp->h_vlan_encapsulated_proto;
 6573
 6574		eth_proto_inner = be16_to_cpu(val);
 6575	}
 6576
 6577	ipv6 = ihl = 0;
 6578	switch (skb->protocol) {
 6579	case cpu_to_be16(ETH_P_IP):
 6580		ip_proto = ip_hdr(skb)->protocol;
 6581		ihl = ip_hdr(skb)->ihl;
 6582		break;
 6583	case cpu_to_be16(ETH_P_IPV6):
 6584		ip_proto = ipv6_hdr(skb)->nexthdr;
 6585		ihl = (40 >> 2);
 6586		ipv6 = 1;
 6587		break;
 6588	default:
 6589		ip_proto = ihl = 0;
 6590		break;
 6591	}
 6592
 6593	csum_bits = TXHDR_CSUM_NONE;
 6594	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6595		u64 start, stuff;
 6596
 6597		csum_bits = (ip_proto == IPPROTO_TCP ?
 6598			     TXHDR_CSUM_TCP :
 6599			     (ip_proto == IPPROTO_UDP ?
 6600			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6601
 6602		start = skb_checksum_start_offset(skb) -
 6603			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6604		stuff = start + skb->csum_offset;
 6605
 6606		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6607		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6608	}
 6609
 6610	l3off = skb_network_offset(skb) -
 6611		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6612
 6613	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6614	       (len << TXHDR_LEN_SHIFT) |
 6615	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6616	       (ihl << TXHDR_IHL_SHIFT) |
 6617	       ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
 6618	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6619	       (ipv6 ? TXHDR_IP_VER : 0) |
 6620	       csum_bits);
 6621
 6622	return ret;
 6623}
 6624
 6625static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6626				  struct net_device *dev)
 6627{
 6628	struct niu *np = netdev_priv(dev);
 6629	unsigned long align, headroom;
 6630	struct netdev_queue *txq;
 6631	struct tx_ring_info *rp;
 6632	struct tx_pkt_hdr *tp;
 6633	unsigned int len, nfg;
 6634	struct ethhdr *ehdr;
 6635	int prod, i, tlen;
 6636	u64 mapping, mrk;
 6637
 6638	i = skb_get_queue_mapping(skb);
 6639	rp = &np->tx_rings[i];
 6640	txq = netdev_get_tx_queue(dev, i);
 6641
 6642	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6643		netif_tx_stop_queue(txq);
 6644		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6645		rp->tx_errors++;
 6646		return NETDEV_TX_BUSY;
 6647	}
 6648
 6649	if (skb->len < ETH_ZLEN) {
 6650		unsigned int pad_bytes = ETH_ZLEN - skb->len;
 6651
 6652		if (skb_pad(skb, pad_bytes))
 6653			goto out;
 6654		skb_put(skb, pad_bytes);
 6655	}
 6656
 6657	len = sizeof(struct tx_pkt_hdr) + 15;
 6658	if (skb_headroom(skb) < len) {
 6659		struct sk_buff *skb_new;
 6660
 6661		skb_new = skb_realloc_headroom(skb, len);
 6662		if (!skb_new) {
 6663			rp->tx_errors++;
 6664			goto out_drop;
 6665		}
 6666		kfree_skb(skb);
 6667		skb = skb_new;
 6668	} else
 6669		skb_orphan(skb);
 6670
 6671	align = ((unsigned long) skb->data & (16 - 1));
 6672	headroom = align + sizeof(struct tx_pkt_hdr);
 6673
 6674	ehdr = (struct ethhdr *) skb->data;
 6675	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
 6676
 6677	len = skb->len - sizeof(struct tx_pkt_hdr);
 6678	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6679	tp->resv = 0;
 6680
 6681	len = skb_headlen(skb);
 6682	mapping = np->ops->map_single(np->device, skb->data,
 6683				      len, DMA_TO_DEVICE);
 6684
 6685	prod = rp->prod;
 6686
 6687	rp->tx_buffs[prod].skb = skb;
 6688	rp->tx_buffs[prod].mapping = mapping;
 6689
 6690	mrk = TX_DESC_SOP;
 6691	if (++rp->mark_counter == rp->mark_freq) {
 6692		rp->mark_counter = 0;
 6693		mrk |= TX_DESC_MARK;
 6694		rp->mark_pending++;
 6695	}
 6696
 6697	tlen = len;
 6698	nfg = skb_shinfo(skb)->nr_frags;
 6699	while (tlen > 0) {
 6700		tlen -= MAX_TX_DESC_LEN;
 6701		nfg++;
 6702	}
 6703
 6704	while (len > 0) {
 6705		unsigned int this_len = len;
 6706
 6707		if (this_len > MAX_TX_DESC_LEN)
 6708			this_len = MAX_TX_DESC_LEN;
 6709
 6710		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6711		mrk = nfg = 0;
 6712
 6713		prod = NEXT_TX(rp, prod);
 6714		mapping += this_len;
 6715		len -= this_len;
 6716	}
 6717
 6718	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6719		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6720
 6721		len = skb_frag_size(frag);
 6722		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
 6723					    frag->page_offset, len,
 6724					    DMA_TO_DEVICE);
 6725
 6726		rp->tx_buffs[prod].skb = NULL;
 6727		rp->tx_buffs[prod].mapping = mapping;
 6728
 6729		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6730
 6731		prod = NEXT_TX(rp, prod);
 6732	}
 6733
 6734	if (prod < rp->prod)
 6735		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6736	rp->prod = prod;
 6737
 6738	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6739
 6740	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6741		netif_tx_stop_queue(txq);
 6742		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6743			netif_tx_wake_queue(txq);
 6744	}
 6745
 6746out:
 6747	return NETDEV_TX_OK;
 6748
 6749out_drop:
 6750	rp->tx_errors++;
 6751	kfree_skb(skb);
 6752	goto out;
 6753}
 6754
 6755static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6756{
 6757	struct niu *np = netdev_priv(dev);
 6758	int err, orig_jumbo, new_jumbo;
 6759
 6760	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
 6761		return -EINVAL;
 6762
 6763	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6764	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6765
 6766	dev->mtu = new_mtu;
 6767
 6768	if (!netif_running(dev) ||
 6769	    (orig_jumbo == new_jumbo))
 6770		return 0;
 6771
 6772	niu_full_shutdown(np, dev);
 6773
 6774	niu_free_channels(np);
 6775
 6776	niu_enable_napi(np);
 6777
 6778	err = niu_alloc_channels(np);
 6779	if (err)
 6780		return err;
 6781
 6782	spin_lock_irq(&np->lock);
 6783
 6784	err = niu_init_hw(np);
 6785	if (!err) {
 6786		init_timer(&np->timer);
 6787		np->timer.expires = jiffies + HZ;
 6788		np->timer.data = (unsigned long) np;
 6789		np->timer.function = niu_timer;
 6790
 6791		err = niu_enable_interrupts(np, 1);
 6792		if (err)
 6793			niu_stop_hw(np);
 6794	}
 6795
 6796	spin_unlock_irq(&np->lock);
 6797
 6798	if (!err) {
 6799		netif_tx_start_all_queues(dev);
 6800		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6801			netif_carrier_on(dev);
 6802
 6803		add_timer(&np->timer);
 6804	}
 6805
 6806	return err;
 6807}
 6808
 6809static void niu_get_drvinfo(struct net_device *dev,
 6810			    struct ethtool_drvinfo *info)
 6811{
 6812	struct niu *np = netdev_priv(dev);
 6813	struct niu_vpd *vpd = &np->vpd;
 6814
 6815	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 6816	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 6817	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 6818		vpd->fcode_major, vpd->fcode_minor);
 6819	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6820		strlcpy(info->bus_info, pci_name(np->pdev),
 6821			sizeof(info->bus_info));
 6822}
 6823
 6824static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6825{
 6826	struct niu *np = netdev_priv(dev);
 6827	struct niu_link_config *lp;
 6828
 6829	lp = &np->link_config;
 6830
 6831	memset(cmd, 0, sizeof(*cmd));
 6832	cmd->phy_address = np->phy_addr;
 6833	cmd->supported = lp->supported;
 6834	cmd->advertising = lp->active_advertising;
 6835	cmd->autoneg = lp->active_autoneg;
 6836	ethtool_cmd_speed_set(cmd, lp->active_speed);
 6837	cmd->duplex = lp->active_duplex;
 6838	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6839	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
 6840		XCVR_EXTERNAL : XCVR_INTERNAL;
 6841
 6842	return 0;
 6843}
 6844
 6845static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6846{
 6847	struct niu *np = netdev_priv(dev);
 6848	struct niu_link_config *lp = &np->link_config;
 6849
 6850	lp->advertising = cmd->advertising;
 6851	lp->speed = ethtool_cmd_speed(cmd);
 6852	lp->duplex = cmd->duplex;
 6853	lp->autoneg = cmd->autoneg;
 6854	return niu_init_link(np);
 6855}
 6856
 6857static u32 niu_get_msglevel(struct net_device *dev)
 6858{
 6859	struct niu *np = netdev_priv(dev);
 6860	return np->msg_enable;
 6861}
 6862
 6863static void niu_set_msglevel(struct net_device *dev, u32 value)
 6864{
 6865	struct niu *np = netdev_priv(dev);
 6866	np->msg_enable = value;
 6867}
 6868
 6869static int niu_nway_reset(struct net_device *dev)
 6870{
 6871	struct niu *np = netdev_priv(dev);
 6872
 6873	if (np->link_config.autoneg)
 6874		return niu_init_link(np);
 6875
 6876	return 0;
 6877}
 6878
 6879static int niu_get_eeprom_len(struct net_device *dev)
 6880{
 6881	struct niu *np = netdev_priv(dev);
 6882
 6883	return np->eeprom_len;
 6884}
 6885
 6886static int niu_get_eeprom(struct net_device *dev,
 6887			  struct ethtool_eeprom *eeprom, u8 *data)
 6888{
 6889	struct niu *np = netdev_priv(dev);
 6890	u32 offset, len, val;
 6891
 6892	offset = eeprom->offset;
 6893	len = eeprom->len;
 6894
 6895	if (offset + len < offset)
 6896		return -EINVAL;
 6897	if (offset >= np->eeprom_len)
 6898		return -EINVAL;
 6899	if (offset + len > np->eeprom_len)
 6900		len = eeprom->len = np->eeprom_len - offset;
 6901
 6902	if (offset & 3) {
 6903		u32 b_offset, b_count;
 6904
 6905		b_offset = offset & 3;
 6906		b_count = 4 - b_offset;
 6907		if (b_count > len)
 6908			b_count = len;
 6909
 6910		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6911		memcpy(data, ((char *)&val) + b_offset, b_count);
 6912		data += b_count;
 6913		len -= b_count;
 6914		offset += b_count;
 6915	}
 6916	while (len >= 4) {
 6917		val = nr64(ESPC_NCR(offset / 4));
 6918		memcpy(data, &val, 4);
 6919		data += 4;
 6920		len -= 4;
 6921		offset += 4;
 6922	}
 6923	if (len) {
 6924		val = nr64(ESPC_NCR(offset / 4));
 6925		memcpy(data, &val, len);
 6926	}
 6927	return 0;
 6928}
 6929
 6930static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6931{
 6932	switch (flow_type) {
 6933	case TCP_V4_FLOW:
 6934	case TCP_V6_FLOW:
 6935		*pid = IPPROTO_TCP;
 6936		break;
 6937	case UDP_V4_FLOW:
 6938	case UDP_V6_FLOW:
 6939		*pid = IPPROTO_UDP;
 6940		break;
 6941	case SCTP_V4_FLOW:
 6942	case SCTP_V6_FLOW:
 6943		*pid = IPPROTO_SCTP;
 6944		break;
 6945	case AH_V4_FLOW:
 6946	case AH_V6_FLOW:
 6947		*pid = IPPROTO_AH;
 6948		break;
 6949	case ESP_V4_FLOW:
 6950	case ESP_V6_FLOW:
 6951		*pid = IPPROTO_ESP;
 6952		break;
 6953	default:
 6954		*pid = 0;
 6955		break;
 6956	}
 6957}
 6958
 6959static int niu_class_to_ethflow(u64 class, int *flow_type)
 6960{
 6961	switch (class) {
 6962	case CLASS_CODE_TCP_IPV4:
 6963		*flow_type = TCP_V4_FLOW;
 6964		break;
 6965	case CLASS_CODE_UDP_IPV4:
 6966		*flow_type = UDP_V4_FLOW;
 6967		break;
 6968	case CLASS_CODE_AH_ESP_IPV4:
 6969		*flow_type = AH_V4_FLOW;
 6970		break;
 6971	case CLASS_CODE_SCTP_IPV4:
 6972		*flow_type = SCTP_V4_FLOW;
 6973		break;
 6974	case CLASS_CODE_TCP_IPV6:
 6975		*flow_type = TCP_V6_FLOW;
 6976		break;
 6977	case CLASS_CODE_UDP_IPV6:
 6978		*flow_type = UDP_V6_FLOW;
 6979		break;
 6980	case CLASS_CODE_AH_ESP_IPV6:
 6981		*flow_type = AH_V6_FLOW;
 6982		break;
 6983	case CLASS_CODE_SCTP_IPV6:
 6984		*flow_type = SCTP_V6_FLOW;
 6985		break;
 6986	case CLASS_CODE_USER_PROG1:
 6987	case CLASS_CODE_USER_PROG2:
 6988	case CLASS_CODE_USER_PROG3:
 6989	case CLASS_CODE_USER_PROG4:
 6990		*flow_type = IP_USER_FLOW;
 6991		break;
 6992	default:
 6993		return 0;
 6994	}
 6995
 6996	return 1;
 6997}
 6998
 6999static int niu_ethflow_to_class(int flow_type, u64 *class)
 7000{
 7001	switch (flow_type) {
 7002	case TCP_V4_FLOW:
 7003		*class = CLASS_CODE_TCP_IPV4;
 7004		break;
 7005	case UDP_V4_FLOW:
 7006		*class = CLASS_CODE_UDP_IPV4;
 7007		break;
 7008	case AH_ESP_V4_FLOW:
 7009	case AH_V4_FLOW:
 7010	case ESP_V4_FLOW:
 7011		*class = CLASS_CODE_AH_ESP_IPV4;
 7012		break;
 7013	case SCTP_V4_FLOW:
 7014		*class = CLASS_CODE_SCTP_IPV4;
 7015		break;
 7016	case TCP_V6_FLOW:
 7017		*class = CLASS_CODE_TCP_IPV6;
 7018		break;
 7019	case UDP_V6_FLOW:
 7020		*class = CLASS_CODE_UDP_IPV6;
 7021		break;
 7022	case AH_ESP_V6_FLOW:
 7023	case AH_V6_FLOW:
 7024	case ESP_V6_FLOW:
 7025		*class = CLASS_CODE_AH_ESP_IPV6;
 7026		break;
 7027	case SCTP_V6_FLOW:
 7028		*class = CLASS_CODE_SCTP_IPV6;
 7029		break;
 7030	default:
 7031		return 0;
 7032	}
 7033
 7034	return 1;
 7035}
 7036
 7037static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7038{
 7039	u64 ethflow = 0;
 7040
 7041	if (flow_key & FLOW_KEY_L2DA)
 7042		ethflow |= RXH_L2DA;
 7043	if (flow_key & FLOW_KEY_VLAN)
 7044		ethflow |= RXH_VLAN;
 7045	if (flow_key & FLOW_KEY_IPSA)
 7046		ethflow |= RXH_IP_SRC;
 7047	if (flow_key & FLOW_KEY_IPDA)
 7048		ethflow |= RXH_IP_DST;
 7049	if (flow_key & FLOW_KEY_PROTO)
 7050		ethflow |= RXH_L3_PROTO;
 7051	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7052		ethflow |= RXH_L4_B_0_1;
 7053	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7054		ethflow |= RXH_L4_B_2_3;
 7055
 7056	return ethflow;
 7057
 7058}
 7059
 7060static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7061{
 7062	u64 key = 0;
 7063
 7064	if (ethflow & RXH_L2DA)
 7065		key |= FLOW_KEY_L2DA;
 7066	if (ethflow & RXH_VLAN)
 7067		key |= FLOW_KEY_VLAN;
 7068	if (ethflow & RXH_IP_SRC)
 7069		key |= FLOW_KEY_IPSA;
 7070	if (ethflow & RXH_IP_DST)
 7071		key |= FLOW_KEY_IPDA;
 7072	if (ethflow & RXH_L3_PROTO)
 7073		key |= FLOW_KEY_PROTO;
 7074	if (ethflow & RXH_L4_B_0_1)
 7075		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7076	if (ethflow & RXH_L4_B_2_3)
 7077		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7078
 7079	*flow_key = key;
 7080
 7081	return 1;
 7082
 7083}
 7084
 7085static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7086{
 7087	u64 class;
 7088
 7089	nfc->data = 0;
 7090
 7091	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7092		return -EINVAL;
 7093
 7094	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7095	    TCAM_KEY_DISC)
 7096		nfc->data = RXH_DISCARD;
 7097	else
 7098		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7099						      CLASS_CODE_USER_PROG1]);
 7100	return 0;
 7101}
 7102
 7103static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7104					struct ethtool_rx_flow_spec *fsp)
 7105{
 7106	u32 tmp;
 7107	u16 prt;
 7108
 7109	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7110	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7111
 7112	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7113	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7114
 7115	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7116	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7117
 7118	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7119	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7120
 7121	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7122		TCAM_V4KEY2_TOS_SHIFT;
 7123	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7124		TCAM_V4KEY2_TOS_SHIFT;
 7125
 7126	switch (fsp->flow_type) {
 7127	case TCP_V4_FLOW:
 7128	case UDP_V4_FLOW:
 7129	case SCTP_V4_FLOW:
 7130		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7131			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7132		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7133
 7134		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7135			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7136		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7137
 7138		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7139			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7140		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7141
 7142		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7143			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7144		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7145		break;
 7146	case AH_V4_FLOW:
 7147	case ESP_V4_FLOW:
 7148		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7149			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7150		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7151
 7152		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7153			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7154		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7155		break;
 7156	case IP_USER_FLOW:
 7157		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7158			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7159		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7160
 7161		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7162			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7163		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7164
 7165		fsp->h_u.usr_ip4_spec.proto =
 7166			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7167			TCAM_V4KEY2_PROTO_SHIFT;
 7168		fsp->m_u.usr_ip4_spec.proto =
 7169			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7170			TCAM_V4KEY2_PROTO_SHIFT;
 7171
 7172		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7173		break;
 7174	default:
 7175		break;
 7176	}
 7177}
 7178
 7179static int niu_get_ethtool_tcam_entry(struct niu *np,
 7180				      struct ethtool_rxnfc *nfc)
 7181{
 7182	struct niu_parent *parent = np->parent;
 7183	struct niu_tcam_entry *tp;
 7184	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7185	u16 idx;
 7186	u64 class;
 7187	int ret = 0;
 7188
 7189	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7190
 7191	tp = &parent->tcam[idx];
 7192	if (!tp->valid) {
 7193		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7194			    parent->index, (u16)nfc->fs.location, idx);
 7195		return -EINVAL;
 7196	}
 7197
 7198	/* fill the flow spec entry */
 7199	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7200		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7201	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7202
 7203	if (ret < 0) {
 7204		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7205			    parent->index);
 7206		ret = -EINVAL;
 7207		goto out;
 7208	}
 7209
 7210	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7211		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7212			TCAM_V4KEY2_PROTO_SHIFT;
 7213		if (proto == IPPROTO_ESP) {
 7214			if (fsp->flow_type == AH_V4_FLOW)
 7215				fsp->flow_type = ESP_V4_FLOW;
 7216			else
 7217				fsp->flow_type = ESP_V6_FLOW;
 7218		}
 7219	}
 7220
 7221	switch (fsp->flow_type) {
 7222	case TCP_V4_FLOW:
 7223	case UDP_V4_FLOW:
 7224	case SCTP_V4_FLOW:
 7225	case AH_V4_FLOW:
 7226	case ESP_V4_FLOW:
 7227		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7228		break;
 7229	case TCP_V6_FLOW:
 7230	case UDP_V6_FLOW:
 7231	case SCTP_V6_FLOW:
 7232	case AH_V6_FLOW:
 7233	case ESP_V6_FLOW:
 7234		/* Not yet implemented */
 7235		ret = -EINVAL;
 7236		break;
 7237	case IP_USER_FLOW:
 7238		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7239		break;
 7240	default:
 7241		ret = -EINVAL;
 7242		break;
 7243	}
 7244
 7245	if (ret < 0)
 7246		goto out;
 7247
 7248	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7249		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7250	else
 7251		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7252			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7253
 7254	/* put the tcam size here */
 7255	nfc->data = tcam_get_size(np);
 7256out:
 7257	return ret;
 7258}
 7259
 7260static int niu_get_ethtool_tcam_all(struct niu *np,
 7261				    struct ethtool_rxnfc *nfc,
 7262				    u32 *rule_locs)
 7263{
 7264	struct niu_parent *parent = np->parent;
 7265	struct niu_tcam_entry *tp;
 7266	int i, idx, cnt;
 7267	unsigned long flags;
 7268	int ret = 0;
 7269
 7270	/* put the tcam size here */
 7271	nfc->data = tcam_get_size(np);
 7272
 7273	niu_lock_parent(np, flags);
 7274	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7275		idx = tcam_get_index(np, i);
 7276		tp = &parent->tcam[idx];
 7277		if (!tp->valid)
 7278			continue;
 7279		if (cnt == nfc->rule_cnt) {
 7280			ret = -EMSGSIZE;
 7281			break;
 7282		}
 7283		rule_locs[cnt] = i;
 7284		cnt++;
 7285	}
 7286	niu_unlock_parent(np, flags);
 7287
 7288	nfc->rule_cnt = cnt;
 7289
 7290	return ret;
 7291}
 7292
 7293static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7294		       u32 *rule_locs)
 7295{
 7296	struct niu *np = netdev_priv(dev);
 7297	int ret = 0;
 7298
 7299	switch (cmd->cmd) {
 7300	case ETHTOOL_GRXFH:
 7301		ret = niu_get_hash_opts(np, cmd);
 7302		break;
 7303	case ETHTOOL_GRXRINGS:
 7304		cmd->data = np->num_rx_rings;
 7305		break;
 7306	case ETHTOOL_GRXCLSRLCNT:
 7307		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7308		break;
 7309	case ETHTOOL_GRXCLSRULE:
 7310		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7311		break;
 7312	case ETHTOOL_GRXCLSRLALL:
 7313		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
 7314		break;
 7315	default:
 7316		ret = -EINVAL;
 7317		break;
 7318	}
 7319
 7320	return ret;
 7321}
 7322
 7323static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7324{
 7325	u64 class;
 7326	u64 flow_key = 0;
 7327	unsigned long flags;
 7328
 7329	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7330		return -EINVAL;
 7331
 7332	if (class < CLASS_CODE_USER_PROG1 ||
 7333	    class > CLASS_CODE_SCTP_IPV6)
 7334		return -EINVAL;
 7335
 7336	if (nfc->data & RXH_DISCARD) {
 7337		niu_lock_parent(np, flags);
 7338		flow_key = np->parent->tcam_key[class -
 7339					       CLASS_CODE_USER_PROG1];
 7340		flow_key |= TCAM_KEY_DISC;
 7341		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7342		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7343		niu_unlock_parent(np, flags);
 7344		return 0;
 7345	} else {
 7346		/* Discard was set before, but is not set now */
 7347		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7348		    TCAM_KEY_DISC) {
 7349			niu_lock_parent(np, flags);
 7350			flow_key = np->parent->tcam_key[class -
 7351					       CLASS_CODE_USER_PROG1];
 7352			flow_key &= ~TCAM_KEY_DISC;
 7353			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7354			     flow_key);
 7355			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7356				flow_key;
 7357			niu_unlock_parent(np, flags);
 7358		}
 7359	}
 7360
 7361	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7362		return -EINVAL;
 7363
 7364	niu_lock_parent(np, flags);
 7365	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7366	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7367	niu_unlock_parent(np, flags);
 7368
 7369	return 0;
 7370}
 7371
 7372static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7373				       struct niu_tcam_entry *tp,
 7374				       int l2_rdc_tab, u64 class)
 7375{
 7376	u8 pid = 0;
 7377	u32 sip, dip, sipm, dipm, spi, spim;
 7378	u16 sport, dport, spm, dpm;
 7379
 7380	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7381	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7382	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7383	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7384
 7385	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7386	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7387	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7388	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7389
 7390	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7391	tp->key[3] |= dip;
 7392
 7393	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7394	tp->key_mask[3] |= dipm;
 7395
 7396	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7397		       TCAM_V4KEY2_TOS_SHIFT);
 7398	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7399			    TCAM_V4KEY2_TOS_SHIFT);
 7400	switch (fsp->flow_type) {
 7401	case TCP_V4_FLOW:
 7402	case UDP_V4_FLOW:
 7403	case SCTP_V4_FLOW:
 7404		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7405		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7406		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7407		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7408
 7409		tp->key[2] |= (((u64)sport << 16) | dport);
 7410		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7411		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7412		break;
 7413	case AH_V4_FLOW:
 7414	case ESP_V4_FLOW:
 7415		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7416		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7417
 7418		tp->key[2] |= spi;
 7419		tp->key_mask[2] |= spim;
 7420		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7421		break;
 7422	case IP_USER_FLOW:
 7423		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7424		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7425
 7426		tp->key[2] |= spi;
 7427		tp->key_mask[2] |= spim;
 7428		pid = fsp->h_u.usr_ip4_spec.proto;
 7429		break;
 7430	default:
 7431		break;
 7432	}
 7433
 7434	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7435	if (pid) {
 7436		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7437	}
 7438}
 7439
 7440static int niu_add_ethtool_tcam_entry(struct niu *np,
 7441				      struct ethtool_rxnfc *nfc)
 7442{
 7443	struct niu_parent *parent = np->parent;
 7444	struct niu_tcam_entry *tp;
 7445	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7446	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7447	int l2_rdc_table = rdc_table->first_table_num;
 7448	u16 idx;
 7449	u64 class;
 7450	unsigned long flags;
 7451	int err, ret;
 7452
 7453	ret = 0;
 7454
 7455	idx = nfc->fs.location;
 7456	if (idx >= tcam_get_size(np))
 7457		return -EINVAL;
 7458
 7459	if (fsp->flow_type == IP_USER_FLOW) {
 7460		int i;
 7461		int add_usr_cls = 0;
 7462		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7463		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7464
 7465		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7466			return -EINVAL;
 7467
 7468		niu_lock_parent(np, flags);
 7469
 7470		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7471			if (parent->l3_cls[i]) {
 7472				if (uspec->proto == parent->l3_cls_pid[i]) {
 7473					class = parent->l3_cls[i];
 7474					parent->l3_cls_refcnt[i]++;
 7475					add_usr_cls = 1;
 7476					break;
 7477				}
 7478			} else {
 7479				/* Program new user IP class */
 7480				switch (i) {
 7481				case 0:
 7482					class = CLASS_CODE_USER_PROG1;
 7483					break;
 7484				case 1:
 7485					class = CLASS_CODE_USER_PROG2;
 7486					break;
 7487				case 2:
 7488					class = CLASS_CODE_USER_PROG3;
 7489					break;
 7490				case 3:
 7491					class = CLASS_CODE_USER_PROG4;
 7492					break;
 7493				default:
 7494					break;
 7495				}
 7496				ret = tcam_user_ip_class_set(np, class, 0,
 7497							     uspec->proto,
 7498							     uspec->tos,
 7499							     umask->tos);
 7500				if (ret)
 7501					goto out;
 7502
 7503				ret = tcam_user_ip_class_enable(np, class, 1);
 7504				if (ret)
 7505					goto out;
 7506				parent->l3_cls[i] = class;
 7507				parent->l3_cls_pid[i] = uspec->proto;
 7508				parent->l3_cls_refcnt[i]++;
 7509				add_usr_cls = 1;
 7510				break;
 7511			}
 7512		}
 7513		if (!add_usr_cls) {
 7514			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7515				    parent->index, __func__, uspec->proto);
 7516			ret = -EINVAL;
 7517			goto out;
 7518		}
 7519		niu_unlock_parent(np, flags);
 7520	} else {
 7521		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7522			return -EINVAL;
 7523		}
 7524	}
 7525
 7526	niu_lock_parent(np, flags);
 7527
 7528	idx = tcam_get_index(np, idx);
 7529	tp = &parent->tcam[idx];
 7530
 7531	memset(tp, 0, sizeof(*tp));
 7532
 7533	/* fill in the tcam key and mask */
 7534	switch (fsp->flow_type) {
 7535	case TCP_V4_FLOW:
 7536	case UDP_V4_FLOW:
 7537	case SCTP_V4_FLOW:
 7538	case AH_V4_FLOW:
 7539	case ESP_V4_FLOW:
 7540		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7541		break;
 7542	case TCP_V6_FLOW:
 7543	case UDP_V6_FLOW:
 7544	case SCTP_V6_FLOW:
 7545	case AH_V6_FLOW:
 7546	case ESP_V6_FLOW:
 7547		/* Not yet implemented */
 7548		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7549			    parent->index, __func__, fsp->flow_type);
 7550		ret = -EINVAL;
 7551		goto out;
 7552	case IP_USER_FLOW:
 7553		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7554		break;
 7555	default:
 7556		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7557			    parent->index, __func__, fsp->flow_type);
 7558		ret = -EINVAL;
 7559		goto out;
 7560	}
 7561
 7562	/* fill in the assoc data */
 7563	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7564		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7565	} else {
 7566		if (fsp->ring_cookie >= np->num_rx_rings) {
 7567			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7568				    parent->index, __func__,
 7569				    (long long)fsp->ring_cookie);
 7570			ret = -EINVAL;
 7571			goto out;
 7572		}
 7573		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7574				  (fsp->ring_cookie <<
 7575				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7576	}
 7577
 7578	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7579	if (err) {
 7580		ret = -EINVAL;
 7581		goto out;
 7582	}
 7583	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7584	if (err) {
 7585		ret = -EINVAL;
 7586		goto out;
 7587	}
 7588
 7589	/* validate the entry */
 7590	tp->valid = 1;
 7591	np->clas.tcam_valid_entries++;
 7592out:
 7593	niu_unlock_parent(np, flags);
 7594
 7595	return ret;
 7596}
 7597
 7598static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7599{
 7600	struct niu_parent *parent = np->parent;
 7601	struct niu_tcam_entry *tp;
 7602	u16 idx;
 7603	unsigned long flags;
 7604	u64 class;
 7605	int ret = 0;
 7606
 7607	if (loc >= tcam_get_size(np))
 7608		return -EINVAL;
 7609
 7610	niu_lock_parent(np, flags);
 7611
 7612	idx = tcam_get_index(np, loc);
 7613	tp = &parent->tcam[idx];
 7614
 7615	/* if the entry is of a user defined class, then update*/
 7616	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7617		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7618
 7619	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7620		int i;
 7621		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7622			if (parent->l3_cls[i] == class) {
 7623				parent->l3_cls_refcnt[i]--;
 7624				if (!parent->l3_cls_refcnt[i]) {
 7625					/* disable class */
 7626					ret = tcam_user_ip_class_enable(np,
 7627									class,
 7628									0);
 7629					if (ret)
 7630						goto out;
 7631					parent->l3_cls[i] = 0;
 7632					parent->l3_cls_pid[i] = 0;
 7633				}
 7634				break;
 7635			}
 7636		}
 7637		if (i == NIU_L3_PROG_CLS) {
 7638			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7639				    parent->index, __func__,
 7640				    (unsigned long long)class);
 7641			ret = -EINVAL;
 7642			goto out;
 7643		}
 7644	}
 7645
 7646	ret = tcam_flush(np, idx);
 7647	if (ret)
 7648		goto out;
 7649
 7650	/* invalidate the entry */
 7651	tp->valid = 0;
 7652	np->clas.tcam_valid_entries--;
 7653out:
 7654	niu_unlock_parent(np, flags);
 7655
 7656	return ret;
 7657}
 7658
 7659static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7660{
 7661	struct niu *np = netdev_priv(dev);
 7662	int ret = 0;
 7663
 7664	switch (cmd->cmd) {
 7665	case ETHTOOL_SRXFH:
 7666		ret = niu_set_hash_opts(np, cmd);
 7667		break;
 7668	case ETHTOOL_SRXCLSRLINS:
 7669		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7670		break;
 7671	case ETHTOOL_SRXCLSRLDEL:
 7672		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7673		break;
 7674	default:
 7675		ret = -EINVAL;
 7676		break;
 7677	}
 7678
 7679	return ret;
 7680}
 7681
 7682static const struct {
 7683	const char string[ETH_GSTRING_LEN];
 7684} niu_xmac_stat_keys[] = {
 7685	{ "tx_frames" },
 7686	{ "tx_bytes" },
 7687	{ "tx_fifo_errors" },
 7688	{ "tx_overflow_errors" },
 7689	{ "tx_max_pkt_size_errors" },
 7690	{ "tx_underflow_errors" },
 7691	{ "rx_local_faults" },
 7692	{ "rx_remote_faults" },
 7693	{ "rx_link_faults" },
 7694	{ "rx_align_errors" },
 7695	{ "rx_frags" },
 7696	{ "rx_mcasts" },
 7697	{ "rx_bcasts" },
 7698	{ "rx_hist_cnt1" },
 7699	{ "rx_hist_cnt2" },
 7700	{ "rx_hist_cnt3" },
 7701	{ "rx_hist_cnt4" },
 7702	{ "rx_hist_cnt5" },
 7703	{ "rx_hist_cnt6" },
 7704	{ "rx_hist_cnt7" },
 7705	{ "rx_octets" },
 7706	{ "rx_code_violations" },
 7707	{ "rx_len_errors" },
 7708	{ "rx_crc_errors" },
 7709	{ "rx_underflows" },
 7710	{ "rx_overflows" },
 7711	{ "pause_off_state" },
 7712	{ "pause_on_state" },
 7713	{ "pause_received" },
 7714};
 7715
 7716#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7717
 7718static const struct {
 7719	const char string[ETH_GSTRING_LEN];
 7720} niu_bmac_stat_keys[] = {
 7721	{ "tx_underflow_errors" },
 7722	{ "tx_max_pkt_size_errors" },
 7723	{ "tx_bytes" },
 7724	{ "tx_frames" },
 7725	{ "rx_overflows" },
 7726	{ "rx_frames" },
 7727	{ "rx_align_errors" },
 7728	{ "rx_crc_errors" },
 7729	{ "rx_len_errors" },
 7730	{ "pause_off_state" },
 7731	{ "pause_on_state" },
 7732	{ "pause_received" },
 7733};
 7734
 7735#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7736
 7737static const struct {
 7738	const char string[ETH_GSTRING_LEN];
 7739} niu_rxchan_stat_keys[] = {
 7740	{ "rx_channel" },
 7741	{ "rx_packets" },
 7742	{ "rx_bytes" },
 7743	{ "rx_dropped" },
 7744	{ "rx_errors" },
 7745};
 7746
 7747#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7748
 7749static const struct {
 7750	const char string[ETH_GSTRING_LEN];
 7751} niu_txchan_stat_keys[] = {
 7752	{ "tx_channel" },
 7753	{ "tx_packets" },
 7754	{ "tx_bytes" },
 7755	{ "tx_errors" },
 7756};
 7757
 7758#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7759
 7760static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7761{
 7762	struct niu *np = netdev_priv(dev);
 7763	int i;
 7764
 7765	if (stringset != ETH_SS_STATS)
 7766		return;
 7767
 7768	if (np->flags & NIU_FLAGS_XMAC) {
 7769		memcpy(data, niu_xmac_stat_keys,
 7770		       sizeof(niu_xmac_stat_keys));
 7771		data += sizeof(niu_xmac_stat_keys);
 7772	} else {
 7773		memcpy(data, niu_bmac_stat_keys,
 7774		       sizeof(niu_bmac_stat_keys));
 7775		data += sizeof(niu_bmac_stat_keys);
 7776	}
 7777	for (i = 0; i < np->num_rx_rings; i++) {
 7778		memcpy(data, niu_rxchan_stat_keys,
 7779		       sizeof(niu_rxchan_stat_keys));
 7780		data += sizeof(niu_rxchan_stat_keys);
 7781	}
 7782	for (i = 0; i < np->num_tx_rings; i++) {
 7783		memcpy(data, niu_txchan_stat_keys,
 7784		       sizeof(niu_txchan_stat_keys));
 7785		data += sizeof(niu_txchan_stat_keys);
 7786	}
 7787}
 7788
 7789static int niu_get_sset_count(struct net_device *dev, int stringset)
 7790{
 7791	struct niu *np = netdev_priv(dev);
 7792
 7793	if (stringset != ETH_SS_STATS)
 7794		return -EINVAL;
 7795
 7796	return (np->flags & NIU_FLAGS_XMAC ?
 7797		 NUM_XMAC_STAT_KEYS :
 7798		 NUM_BMAC_STAT_KEYS) +
 7799		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7800		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7801}
 7802
 7803static void niu_get_ethtool_stats(struct net_device *dev,
 7804				  struct ethtool_stats *stats, u64 *data)
 7805{
 7806	struct niu *np = netdev_priv(dev);
 7807	int i;
 7808
 7809	niu_sync_mac_stats(np);
 7810	if (np->flags & NIU_FLAGS_XMAC) {
 7811		memcpy(data, &np->mac_stats.xmac,
 7812		       sizeof(struct niu_xmac_stats));
 7813		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7814	} else {
 7815		memcpy(data, &np->mac_stats.bmac,
 7816		       sizeof(struct niu_bmac_stats));
 7817		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7818	}
 7819	for (i = 0; i < np->num_rx_rings; i++) {
 7820		struct rx_ring_info *rp = &np->rx_rings[i];
 7821
 7822		niu_sync_rx_discard_stats(np, rp, 0);
 7823
 7824		data[0] = rp->rx_channel;
 7825		data[1] = rp->rx_packets;
 7826		data[2] = rp->rx_bytes;
 7827		data[3] = rp->rx_dropped;
 7828		data[4] = rp->rx_errors;
 7829		data += 5;
 7830	}
 7831	for (i = 0; i < np->num_tx_rings; i++) {
 7832		struct tx_ring_info *rp = &np->tx_rings[i];
 7833
 7834		data[0] = rp->tx_channel;
 7835		data[1] = rp->tx_packets;
 7836		data[2] = rp->tx_bytes;
 7837		data[3] = rp->tx_errors;
 7838		data += 4;
 7839	}
 7840}
 7841
 7842static u64 niu_led_state_save(struct niu *np)
 7843{
 7844	if (np->flags & NIU_FLAGS_XMAC)
 7845		return nr64_mac(XMAC_CONFIG);
 7846	else
 7847		return nr64_mac(BMAC_XIF_CONFIG);
 7848}
 7849
 7850static void niu_led_state_restore(struct niu *np, u64 val)
 7851{
 7852	if (np->flags & NIU_FLAGS_XMAC)
 7853		nw64_mac(XMAC_CONFIG, val);
 7854	else
 7855		nw64_mac(BMAC_XIF_CONFIG, val);
 7856}
 7857
 7858static void niu_force_led(struct niu *np, int on)
 7859{
 7860	u64 val, reg, bit;
 7861
 7862	if (np->flags & NIU_FLAGS_XMAC) {
 7863		reg = XMAC_CONFIG;
 7864		bit = XMAC_CONFIG_FORCE_LED_ON;
 7865	} else {
 7866		reg = BMAC_XIF_CONFIG;
 7867		bit = BMAC_XIF_CONFIG_LINK_LED;
 7868	}
 7869
 7870	val = nr64_mac(reg);
 7871	if (on)
 7872		val |= bit;
 7873	else
 7874		val &= ~bit;
 7875	nw64_mac(reg, val);
 7876}
 7877
 7878static int niu_set_phys_id(struct net_device *dev,
 7879			   enum ethtool_phys_id_state state)
 7880
 7881{
 7882	struct niu *np = netdev_priv(dev);
 7883
 7884	if (!netif_running(dev))
 7885		return -EAGAIN;
 7886
 7887	switch (state) {
 7888	case ETHTOOL_ID_ACTIVE:
 7889		np->orig_led_state = niu_led_state_save(np);
 7890		return 1;	/* cycle on/off once per second */
 7891
 7892	case ETHTOOL_ID_ON:
 7893		niu_force_led(np, 1);
 7894		break;
 7895
 7896	case ETHTOOL_ID_OFF:
 7897		niu_force_led(np, 0);
 7898		break;
 7899
 7900	case ETHTOOL_ID_INACTIVE:
 7901		niu_led_state_restore(np, np->orig_led_state);
 7902	}
 7903
 7904	return 0;
 7905}
 7906
 7907static const struct ethtool_ops niu_ethtool_ops = {
 7908	.get_drvinfo		= niu_get_drvinfo,
 7909	.get_link		= ethtool_op_get_link,
 7910	.get_msglevel		= niu_get_msglevel,
 7911	.set_msglevel		= niu_set_msglevel,
 7912	.nway_reset		= niu_nway_reset,
 7913	.get_eeprom_len		= niu_get_eeprom_len,
 7914	.get_eeprom		= niu_get_eeprom,
 7915	.get_settings		= niu_get_settings,
 7916	.set_settings		= niu_set_settings,
 7917	.get_strings		= niu_get_strings,
 7918	.get_sset_count		= niu_get_sset_count,
 7919	.get_ethtool_stats	= niu_get_ethtool_stats,
 7920	.set_phys_id		= niu_set_phys_id,
 7921	.get_rxnfc		= niu_get_nfc,
 7922	.set_rxnfc		= niu_set_nfc,
 7923};
 7924
 7925static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7926			      int ldg, int ldn)
 7927{
 7928	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7929		return -EINVAL;
 7930	if (ldn < 0 || ldn > LDN_MAX)
 7931		return -EINVAL;
 7932
 7933	parent->ldg_map[ldn] = ldg;
 7934
 7935	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7936		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7937		 * the firmware, and we're not supposed to change them.
 7938		 * Validate the mapping, because if it's wrong we probably
 7939		 * won't get any interrupts and that's painful to debug.
 7940		 */
 7941		if (nr64(LDG_NUM(ldn)) != ldg) {
 7942			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7943				np->port, ldn, ldg,
 7944				(unsigned long long) nr64(LDG_NUM(ldn)));
 7945			return -EINVAL;
 7946		}
 7947	} else
 7948		nw64(LDG_NUM(ldn), ldg);
 7949
 7950	return 0;
 7951}
 7952
 7953static int niu_set_ldg_timer_res(struct niu *np, int res)
 7954{
 7955	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7956		return -EINVAL;
 7957
 7958
 7959	nw64(LDG_TIMER_RES, res);
 7960
 7961	return 0;
 7962}
 7963
 7964static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7965{
 7966	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7967	    (func < 0 || func > 3) ||
 7968	    (vector < 0 || vector > 0x1f))
 7969		return -EINVAL;
 7970
 7971	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7972
 7973	return 0;
 7974}
 7975
 7976static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
 7977{
 7978	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7979				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7980	int limit;
 7981
 7982	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7983		return -EINVAL;
 7984
 7985	frame = frame_base;
 7986	nw64(ESPC_PIO_STAT, frame);
 7987	limit = 64;
 7988	do {
 7989		udelay(5);
 7990		frame = nr64(ESPC_PIO_STAT);
 7991		if (frame & ESPC_PIO_STAT_READ_END)
 7992			break;
 7993	} while (limit--);
 7994	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7995		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 7996			(unsigned long long) frame);
 7997		return -ENODEV;
 7998	}
 7999
 8000	frame = frame_base;
 8001	nw64(ESPC_PIO_STAT, frame);
 8002	limit = 64;
 8003	do {
 8004		udelay(5);
 8005		frame = nr64(ESPC_PIO_STAT);
 8006		if (frame & ESPC_PIO_STAT_READ_END)
 8007			break;
 8008	} while (limit--);
 8009	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8010		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8011			(unsigned long long) frame);
 8012		return -ENODEV;
 8013	}
 8014
 8015	frame = nr64(ESPC_PIO_STAT);
 8016	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 8017}
 8018
 8019static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
 8020{
 8021	int err = niu_pci_eeprom_read(np, off);
 8022	u16 val;
 8023
 8024	if (err < 0)
 8025		return err;
 8026	val = (err << 8);
 8027	err = niu_pci_eeprom_read(np, off + 1);
 8028	if (err < 0)
 8029		return err;
 8030	val |= (err & 0xff);
 8031
 8032	return val;
 8033}
 8034
 8035static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8036{
 8037	int err = niu_pci_eeprom_read(np, off);
 8038	u16 val;
 8039
 8040	if (err < 0)
 8041		return err;
 8042
 8043	val = (err & 0xff);
 8044	err = niu_pci_eeprom_read(np, off + 1);
 8045	if (err < 0)
 8046		return err;
 8047
 8048	val |= (err & 0xff) << 8;
 8049
 8050	return val;
 8051}
 8052
 8053static int __devinit niu_pci_vpd_get_propname(struct niu *np,
 8054					      u32 off,
 8055					      char *namebuf,
 8056					      int namebuf_len)
 8057{
 8058	int i;
 8059
 8060	for (i = 0; i < namebuf_len; i++) {
 8061		int err = niu_pci_eeprom_read(np, off + i);
 8062		if (err < 0)
 8063			return err;
 8064		*namebuf++ = err;
 8065		if (!err)
 8066			break;
 8067	}
 8068	if (i >= namebuf_len)
 8069		return -EINVAL;
 8070
 8071	return i + 1;
 8072}
 8073
 8074static void __devinit niu_vpd_parse_version(struct niu *np)
 8075{
 8076	struct niu_vpd *vpd = &np->vpd;
 8077	int len = strlen(vpd->version) + 1;
 8078	const char *s = vpd->version;
 8079	int i;
 8080
 8081	for (i = 0; i < len - 5; i++) {
 8082		if (!strncmp(s + i, "FCode ", 6))
 8083			break;
 8084	}
 8085	if (i >= len - 5)
 8086		return;
 8087
 8088	s += i + 5;
 8089	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8090
 8091	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8092		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8093		     vpd->fcode_major, vpd->fcode_minor);
 8094	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8095	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8096	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8097		np->flags |= NIU_FLAGS_VPD_VALID;
 8098}
 8099
 8100/* ESPC_PIO_EN_ENABLE must be set */
 8101static int __devinit niu_pci_vpd_scan_props(struct niu *np,
 8102					    u32 start, u32 end)
 8103{
 8104	unsigned int found_mask = 0;
 8105#define FOUND_MASK_MODEL	0x00000001
 8106#define FOUND_MASK_BMODEL	0x00000002
 8107#define FOUND_MASK_VERS		0x00000004
 8108#define FOUND_MASK_MAC		0x00000008
 8109#define FOUND_MASK_NMAC		0x00000010
 8110#define FOUND_MASK_PHY		0x00000020
 8111#define FOUND_MASK_ALL		0x0000003f
 8112
 8113	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8114		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8115	while (start < end) {
 8116		int len, err, prop_len;
 8117		char namebuf[64];
 8118		u8 *prop_buf;
 8119		int max_len;
 8120
 8121		if (found_mask == FOUND_MASK_ALL) {
 8122			niu_vpd_parse_version(np);
 8123			return 1;
 8124		}
 8125
 8126		err = niu_pci_eeprom_read(np, start + 2);
 8127		if (err < 0)
 8128			return err;
 8129		len = err;
 8130		start += 3;
 8131
 8132		prop_len = niu_pci_eeprom_read(np, start + 4);
 8133		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8134		if (err < 0)
 8135			return err;
 8136
 8137		prop_buf = NULL;
 8138		max_len = 0;
 8139		if (!strcmp(namebuf, "model")) {
 8140			prop_buf = np->vpd.model;
 8141			max_len = NIU_VPD_MODEL_MAX;
 8142			found_mask |= FOUND_MASK_MODEL;
 8143		} else if (!strcmp(namebuf, "board-model")) {
 8144			prop_buf = np->vpd.board_model;
 8145			max_len = NIU_VPD_BD_MODEL_MAX;
 8146			found_mask |= FOUND_MASK_BMODEL;
 8147		} else if (!strcmp(namebuf, "version")) {
 8148			prop_buf = np->vpd.version;
 8149			max_len = NIU_VPD_VERSION_MAX;
 8150			found_mask |= FOUND_MASK_VERS;
 8151		} else if (!strcmp(namebuf, "local-mac-address")) {
 8152			prop_buf = np->vpd.local_mac;
 8153			max_len = ETH_ALEN;
 8154			found_mask |= FOUND_MASK_MAC;
 8155		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8156			prop_buf = &np->vpd.mac_num;
 8157			max_len = 1;
 8158			found_mask |= FOUND_MASK_NMAC;
 8159		} else if (!strcmp(namebuf, "phy-type")) {
 8160			prop_buf = np->vpd.phy_type;
 8161			max_len = NIU_VPD_PHY_TYPE_MAX;
 8162			found_mask |= FOUND_MASK_PHY;
 8163		}
 8164
 8165		if (max_len && prop_len > max_len) {
 8166			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8167			return -EINVAL;
 8168		}
 8169
 8170		if (prop_buf) {
 8171			u32 off = start + 5 + err;
 8172			int i;
 8173
 8174			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8175				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8176				     namebuf, prop_len);
 8177			for (i = 0; i < prop_len; i++)
 8178				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
 8179		}
 8180
 8181		start += len;
 8182	}
 8183
 8184	return 0;
 8185}
 8186
 8187/* ESPC_PIO_EN_ENABLE must be set */
 8188static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
 8189{
 8190	u32 offset;
 8191	int err;
 8192
 8193	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8194	if (err < 0)
 8195		return;
 8196
 8197	offset = err + 3;
 8198
 8199	while (start + offset < ESPC_EEPROM_SIZE) {
 8200		u32 here = start + offset;
 8201		u32 end;
 8202
 8203		err = niu_pci_eeprom_read(np, here);
 8204		if (err != 0x90)
 8205			return;
 8206
 8207		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8208		if (err < 0)
 8209			return;
 8210
 8211		here = start + offset + 3;
 8212		end = start + offset + err;
 8213
 8214		offset += err;
 8215
 8216		err = niu_pci_vpd_scan_props(np, here, end);
 8217		if (err < 0 || err == 1)
 8218			return;
 8219	}
 8220}
 8221
 8222/* ESPC_PIO_EN_ENABLE must be set */
 8223static u32 __devinit niu_pci_vpd_offset(struct niu *np)
 8224{
 8225	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8226	int err;
 8227
 8228	while (start < end) {
 8229		ret = start;
 8230
 8231		/* ROM header signature?  */
 8232		err = niu_pci_eeprom_read16(np, start +  0);
 8233		if (err != 0x55aa)
 8234			return 0;
 8235
 8236		/* Apply offset to PCI data structure.  */
 8237		err = niu_pci_eeprom_read16(np, start + 23);
 8238		if (err < 0)
 8239			return 0;
 8240		start += err;
 8241
 8242		/* Check for "PCIR" signature.  */
 8243		err = niu_pci_eeprom_read16(np, start +  0);
 8244		if (err != 0x5043)
 8245			return 0;
 8246		err = niu_pci_eeprom_read16(np, start +  2);
 8247		if (err != 0x4952)
 8248			return 0;
 8249
 8250		/* Check for OBP image type.  */
 8251		err = niu_pci_eeprom_read(np, start + 20);
 8252		if (err < 0)
 8253			return 0;
 8254		if (err != 0x01) {
 8255			err = niu_pci_eeprom_read(np, ret + 2);
 8256			if (err < 0)
 8257				return 0;
 8258
 8259			start = ret + (err * 512);
 8260			continue;
 8261		}
 8262
 8263		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8264		if (err < 0)
 8265			return err;
 8266		ret += err;
 8267
 8268		err = niu_pci_eeprom_read(np, ret + 0);
 8269		if (err != 0x82)
 8270			return 0;
 8271
 8272		return ret;
 8273	}
 8274
 8275	return 0;
 8276}
 8277
 8278static int __devinit niu_phy_type_prop_decode(struct niu *np,
 8279					      const char *phy_prop)
 8280{
 8281	if (!strcmp(phy_prop, "mif")) {
 8282		/* 1G copper, MII */
 8283		np->flags &= ~(NIU_FLAGS_FIBER |
 8284			       NIU_FLAGS_10G);
 8285		np->mac_xcvr = MAC_XCVR_MII;
 8286	} else if (!strcmp(phy_prop, "xgf")) {
 8287		/* 10G fiber, XPCS */
 8288		np->flags |= (NIU_FLAGS_10G |
 8289			      NIU_FLAGS_FIBER);
 8290		np->mac_xcvr = MAC_XCVR_XPCS;
 8291	} else if (!strcmp(phy_prop, "pcs")) {
 8292		/* 1G fiber, PCS */
 8293		np->flags &= ~NIU_FLAGS_10G;
 8294		np->flags |= NIU_FLAGS_FIBER;
 8295		np->mac_xcvr = MAC_XCVR_PCS;
 8296	} else if (!strcmp(phy_prop, "xgc")) {
 8297		/* 10G copper, XPCS */
 8298		np->flags |= NIU_FLAGS_10G;
 8299		np->flags &= ~NIU_FLAGS_FIBER;
 8300		np->mac_xcvr = MAC_XCVR_XPCS;
 8301	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8302		/* 10G Serdes or 1G Serdes, default to 10G */
 8303		np->flags |= NIU_FLAGS_10G;
 8304		np->flags &= ~NIU_FLAGS_FIBER;
 8305		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8306		np->mac_xcvr = MAC_XCVR_XPCS;
 8307	} else {
 8308		return -EINVAL;
 8309	}
 8310	return 0;
 8311}
 8312
 8313static int niu_pci_vpd_get_nports(struct niu *np)
 8314{
 8315	int ports = 0;
 8316
 8317	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8318	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8319	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8320	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8321	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8322		ports = 4;
 8323	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8324		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8325		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8326		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8327		ports = 2;
 8328	}
 8329
 8330	return ports;
 8331}
 8332
 8333static void __devinit niu_pci_vpd_validate(struct niu *np)
 8334{
 8335	struct net_device *dev = np->dev;
 8336	struct niu_vpd *vpd = &np->vpd;
 8337	u8 val8;
 8338
 8339	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8340		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8341
 8342		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8343		return;
 8344	}
 8345
 8346	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8347	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8348		np->flags |= NIU_FLAGS_10G;
 8349		np->flags &= ~NIU_FLAGS_FIBER;
 8350		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8351		np->mac_xcvr = MAC_XCVR_PCS;
 8352		if (np->port > 1) {
 8353			np->flags |= NIU_FLAGS_FIBER;
 8354			np->flags &= ~NIU_FLAGS_10G;
 8355		}
 8356		if (np->flags & NIU_FLAGS_10G)
 8357			np->mac_xcvr = MAC_XCVR_XPCS;
 8358	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8359		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8360			      NIU_FLAGS_HOTPLUG_PHY);
 8361	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8362		dev_err(np->device, "Illegal phy string [%s]\n",
 8363			np->vpd.phy_type);
 8364		dev_err(np->device, "Falling back to SPROM\n");
 8365		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8366		return;
 8367	}
 8368
 8369	memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
 8370
 8371	val8 = dev->perm_addr[5];
 8372	dev->perm_addr[5] += np->port;
 8373	if (dev->perm_addr[5] < val8)
 8374		dev->perm_addr[4]++;
 8375
 8376	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 8377}
 8378
 8379static int __devinit niu_pci_probe_sprom(struct niu *np)
 8380{
 8381	struct net_device *dev = np->dev;
 8382	int len, i;
 8383	u64 val, sum;
 8384	u8 val8;
 8385
 8386	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8387	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8388	len = val / 4;
 8389
 8390	np->eeprom_len = len;
 8391
 8392	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8393		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8394
 8395	sum = 0;
 8396	for (i = 0; i < len; i++) {
 8397		val = nr64(ESPC_NCR(i));
 8398		sum += (val >>  0) & 0xff;
 8399		sum += (val >>  8) & 0xff;
 8400		sum += (val >> 16) & 0xff;
 8401		sum += (val >> 24) & 0xff;
 8402	}
 8403	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8404		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8405	if ((sum & 0xff) != 0xab) {
 8406		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8407		return -EINVAL;
 8408	}
 8409
 8410	val = nr64(ESPC_PHY_TYPE);
 8411	switch (np->port) {
 8412	case 0:
 8413		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8414			ESPC_PHY_TYPE_PORT0_SHIFT;
 8415		break;
 8416	case 1:
 8417		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8418			ESPC_PHY_TYPE_PORT1_SHIFT;
 8419		break;
 8420	case 2:
 8421		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8422			ESPC_PHY_TYPE_PORT2_SHIFT;
 8423		break;
 8424	case 3:
 8425		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8426			ESPC_PHY_TYPE_PORT3_SHIFT;
 8427		break;
 8428	default:
 8429		dev_err(np->device, "Bogus port number %u\n",
 8430			np->port);
 8431		return -EINVAL;
 8432	}
 8433	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8434		     "SPROM: PHY type %x\n", val8);
 8435
 8436	switch (val8) {
 8437	case ESPC_PHY_TYPE_1G_COPPER:
 8438		/* 1G copper, MII */
 8439		np->flags &= ~(NIU_FLAGS_FIBER |
 8440			       NIU_FLAGS_10G);
 8441		np->mac_xcvr = MAC_XCVR_MII;
 8442		break;
 8443
 8444	case ESPC_PHY_TYPE_1G_FIBER:
 8445		/* 1G fiber, PCS */
 8446		np->flags &= ~NIU_FLAGS_10G;
 8447		np->flags |= NIU_FLAGS_FIBER;
 8448		np->mac_xcvr = MAC_XCVR_PCS;
 8449		break;
 8450
 8451	case ESPC_PHY_TYPE_10G_COPPER:
 8452		/* 10G copper, XPCS */
 8453		np->flags |= NIU_FLAGS_10G;
 8454		np->flags &= ~NIU_FLAGS_FIBER;
 8455		np->mac_xcvr = MAC_XCVR_XPCS;
 8456		break;
 8457
 8458	case ESPC_PHY_TYPE_10G_FIBER:
 8459		/* 10G fiber, XPCS */
 8460		np->flags |= (NIU_FLAGS_10G |
 8461			      NIU_FLAGS_FIBER);
 8462		np->mac_xcvr = MAC_XCVR_XPCS;
 8463		break;
 8464
 8465	default:
 8466		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8467		return -EINVAL;
 8468	}
 8469
 8470	val = nr64(ESPC_MAC_ADDR0);
 8471	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8472		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8473	dev->perm_addr[0] = (val >>  0) & 0xff;
 8474	dev->perm_addr[1] = (val >>  8) & 0xff;
 8475	dev->perm_addr[2] = (val >> 16) & 0xff;
 8476	dev->perm_addr[3] = (val >> 24) & 0xff;
 8477
 8478	val = nr64(ESPC_MAC_ADDR1);
 8479	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8480		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8481	dev->perm_addr[4] = (val >>  0) & 0xff;
 8482	dev->perm_addr[5] = (val >>  8) & 0xff;
 8483
 8484	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
 8485		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8486			dev->perm_addr);
 8487		return -EINVAL;
 8488	}
 8489
 8490	val8 = dev->perm_addr[5];
 8491	dev->perm_addr[5] += np->port;
 8492	if (dev->perm_addr[5] < val8)
 8493		dev->perm_addr[4]++;
 8494
 8495	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 8496
 8497	val = nr64(ESPC_MOD_STR_LEN);
 8498	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8499		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8500	if (val >= 8 * 4)
 8501		return -EINVAL;
 8502
 8503	for (i = 0; i < val; i += 4) {
 8504		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8505
 8506		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8507		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8508		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8509		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8510	}
 8511	np->vpd.model[val] = '\0';
 8512
 8513	val = nr64(ESPC_BD_MOD_STR_LEN);
 8514	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8515		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8516	if (val >= 4 * 4)
 8517		return -EINVAL;
 8518
 8519	for (i = 0; i < val; i += 4) {
 8520		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8521
 8522		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8523		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8524		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8525		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8526	}
 8527	np->vpd.board_model[val] = '\0';
 8528
 8529	np->vpd.mac_num =
 8530		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8531	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8532		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8533
 8534	return 0;
 8535}
 8536
 8537static int __devinit niu_get_and_validate_port(struct niu *np)
 8538{
 8539	struct niu_parent *parent = np->parent;
 8540
 8541	if (np->port <= 1)
 8542		np->flags |= NIU_FLAGS_XMAC;
 8543
 8544	if (!parent->num_ports) {
 8545		if (parent->plat_type == PLAT_TYPE_NIU) {
 8546			parent->num_ports = 2;
 8547		} else {
 8548			parent->num_ports = niu_pci_vpd_get_nports(np);
 8549			if (!parent->num_ports) {
 8550				/* Fall back to SPROM as last resort.
 8551				 * This will fail on most cards.
 8552				 */
 8553				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8554					ESPC_NUM_PORTS_MACS_VAL;
 8555
 8556				/* All of the current probing methods fail on
 8557				 * Maramba on-board parts.
 8558				 */
 8559				if (!parent->num_ports)
 8560					parent->num_ports = 4;
 8561			}
 8562		}
 8563	}
 8564
 8565	if (np->port >= parent->num_ports)
 8566		return -ENODEV;
 8567
 8568	return 0;
 8569}
 8570
 8571static int __devinit phy_record(struct niu_parent *parent,
 8572				struct phy_probe_info *p,
 8573				int dev_id_1, int dev_id_2, u8 phy_port,
 8574				int type)
 8575{
 8576	u32 id = (dev_id_1 << 16) | dev_id_2;
 8577	u8 idx;
 8578
 8579	if (dev_id_1 < 0 || dev_id_2 < 0)
 8580		return 0;
 8581	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8582		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
 8583		 * test covers the 8706 as well.
 8584		 */
 8585		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8586		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 8587			return 0;
 8588	} else {
 8589		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8590			return 0;
 8591	}
 8592
 8593	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8594		parent->index, id,
 8595		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8596		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8597		phy_port);
 8598
 8599	if (p->cur[type] >= NIU_MAX_PORTS) {
 8600		pr_err("Too many PHY ports\n");
 8601		return -EINVAL;
 8602	}
 8603	idx = p->cur[type];
 8604	p->phy_id[type][idx] = id;
 8605	p->phy_port[type][idx] = phy_port;
 8606	p->cur[type] = idx + 1;
 8607	return 0;
 8608}
 8609
 8610static int __devinit port_has_10g(struct phy_probe_info *p, int port)
 8611{
 8612	int i;
 8613
 8614	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8615		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8616			return 1;
 8617	}
 8618	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8619		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8620			return 1;
 8621	}
 8622
 8623	return 0;
 8624}
 8625
 8626static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
 8627{
 8628	int port, cnt;
 8629
 8630	cnt = 0;
 8631	*lowest = 32;
 8632	for (port = 8; port < 32; port++) {
 8633		if (port_has_10g(p, port)) {
 8634			if (!cnt)
 8635				*lowest = port;
 8636			cnt++;
 8637		}
 8638	}
 8639
 8640	return cnt;
 8641}
 8642
 8643static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
 8644{
 8645	*lowest = 32;
 8646	if (p->cur[PHY_TYPE_MII])
 8647		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8648
 8649	return p->cur[PHY_TYPE_MII];
 8650}
 8651
 8652static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
 8653{
 8654	int num_ports = parent->num_ports;
 8655	int i;
 8656
 8657	for (i = 0; i < num_ports; i++) {
 8658		parent->rxchan_per_port[i] = (16 / num_ports);
 8659		parent->txchan_per_port[i] = (16 / num_ports);
 8660
 8661		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8662			parent->index, i,
 8663			parent->rxchan_per_port[i],
 8664			parent->txchan_per_port[i]);
 8665	}
 8666}
 8667
 8668static void __devinit niu_divide_channels(struct niu_parent *parent,
 8669					  int num_10g, int num_1g)
 8670{
 8671	int num_ports = parent->num_ports;
 8672	int rx_chans_per_10g, rx_chans_per_1g;
 8673	int tx_chans_per_10g, tx_chans_per_1g;
 8674	int i, tot_rx, tot_tx;
 8675
 8676	if (!num_10g || !num_1g) {
 8677		rx_chans_per_10g = rx_chans_per_1g =
 8678			(NIU_NUM_RXCHAN / num_ports);
 8679		tx_chans_per_10g = tx_chans_per_1g =
 8680			(NIU_NUM_TXCHAN / num_ports);
 8681	} else {
 8682		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8683		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8684				    (rx_chans_per_1g * num_1g)) /
 8685			num_10g;
 8686
 8687		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8688		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8689				    (tx_chans_per_1g * num_1g)) /
 8690			num_10g;
 8691	}
 8692
 8693	tot_rx = tot_tx = 0;
 8694	for (i = 0; i < num_ports; i++) {
 8695		int type = phy_decode(parent->port_phy, i);
 8696
 8697		if (type == PORT_TYPE_10G) {
 8698			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8699			parent->txchan_per_port[i] = tx_chans_per_10g;
 8700		} else {
 8701			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8702			parent->txchan_per_port[i] = tx_chans_per_1g;
 8703		}
 8704		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8705			parent->index, i,
 8706			parent->rxchan_per_port[i],
 8707			parent->txchan_per_port[i]);
 8708		tot_rx += parent->rxchan_per_port[i];
 8709		tot_tx += parent->txchan_per_port[i];
 8710	}
 8711
 8712	if (tot_rx > NIU_NUM_RXCHAN) {
 8713		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8714		       parent->index, tot_rx);
 8715		for (i = 0; i < num_ports; i++)
 8716			parent->rxchan_per_port[i] = 1;
 8717	}
 8718	if (tot_tx > NIU_NUM_TXCHAN) {
 8719		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8720		       parent->index, tot_tx);
 8721		for (i = 0; i < num_ports; i++)
 8722			parent->txchan_per_port[i] = 1;
 8723	}
 8724	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8725		pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8726			   parent->index, tot_rx, tot_tx);
 8727	}
 8728}
 8729
 8730static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
 8731					    int num_10g, int num_1g)
 8732{
 8733	int i, num_ports = parent->num_ports;
 8734	int rdc_group, rdc_groups_per_port;
 8735	int rdc_channel_base;
 8736
 8737	rdc_group = 0;
 8738	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8739
 8740	rdc_channel_base = 0;
 8741
 8742	for (i = 0; i < num_ports; i++) {
 8743		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8744		int grp, num_channels = parent->rxchan_per_port[i];
 8745		int this_channel_offset;
 8746
 8747		tp->first_table_num = rdc_group;
 8748		tp->num_tables = rdc_groups_per_port;
 8749		this_channel_offset = 0;
 8750		for (grp = 0; grp < tp->num_tables; grp++) {
 8751			struct rdc_table *rt = &tp->tables[grp];
 8752			int slot;
 8753
 8754			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8755				parent->index, i, tp->first_table_num + grp);
 8756			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8757				rt->rxdma_channel[slot] =
 8758					rdc_channel_base + this_channel_offset;
 8759
 8760				pr_cont("%d ", rt->rxdma_channel[slot]);
 8761
 8762				if (++this_channel_offset == num_channels)
 8763					this_channel_offset = 0;
 8764			}
 8765			pr_cont("]\n");
 8766		}
 8767
 8768		parent->rdc_default[i] = rdc_channel_base;
 8769
 8770		rdc_channel_base += num_channels;
 8771		rdc_group += rdc_groups_per_port;
 8772	}
 8773}
 8774
 8775static int __devinit fill_phy_probe_info(struct niu *np,
 8776					 struct niu_parent *parent,
 8777					 struct phy_probe_info *info)
 8778{
 8779	unsigned long flags;
 8780	int port, err;
 8781
 8782	memset(info, 0, sizeof(*info));
 8783
 8784	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8785	niu_lock_parent(np, flags);
 8786	err = 0;
 8787	for (port = 8; port < 32; port++) {
 8788		int dev_id_1, dev_id_2;
 8789
 8790		dev_id_1 = mdio_read(np, port,
 8791				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8792		dev_id_2 = mdio_read(np, port,
 8793				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8794		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8795				 PHY_TYPE_PMA_PMD);
 8796		if (err)
 8797			break;
 8798		dev_id_1 = mdio_read(np, port,
 8799				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8800		dev_id_2 = mdio_read(np, port,
 8801				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8802		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8803				 PHY_TYPE_PCS);
 8804		if (err)
 8805			break;
 8806		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8807		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8808		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8809				 PHY_TYPE_MII);
 8810		if (err)
 8811			break;
 8812	}
 8813	niu_unlock_parent(np, flags);
 8814
 8815	return err;
 8816}
 8817
 8818static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
 8819{
 8820	struct phy_probe_info *info = &parent->phy_probe_info;
 8821	int lowest_10g, lowest_1g;
 8822	int num_10g, num_1g;
 8823	u32 val;
 8824	int err;
 8825
 8826	num_10g = num_1g = 0;
 8827
 8828	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8829	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8830		num_10g = 0;
 8831		num_1g = 2;
 8832		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8833		parent->num_ports = 4;
 8834		val = (phy_encode(PORT_TYPE_1G, 0) |
 8835		       phy_encode(PORT_TYPE_1G, 1) |
 8836		       phy_encode(PORT_TYPE_1G, 2) |
 8837		       phy_encode(PORT_TYPE_1G, 3));
 8838	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8839		num_10g = 2;
 8840		num_1g = 0;
 8841		parent->num_ports = 2;
 8842		val = (phy_encode(PORT_TYPE_10G, 0) |
 8843		       phy_encode(PORT_TYPE_10G, 1));
 8844	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8845		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8846		/* this is the Monza case */
 8847		if (np->flags & NIU_FLAGS_10G) {
 8848			val = (phy_encode(PORT_TYPE_10G, 0) |
 8849			       phy_encode(PORT_TYPE_10G, 1));
 8850		} else {
 8851			val = (phy_encode(PORT_TYPE_1G, 0) |
 8852			       phy_encode(PORT_TYPE_1G, 1));
 8853		}
 8854	} else {
 8855		err = fill_phy_probe_info(np, parent, info);
 8856		if (err)
 8857			return err;
 8858
 8859		num_10g = count_10g_ports(info, &lowest_10g);
 8860		num_1g = count_1g_ports(info, &lowest_1g);
 8861
 8862		switch ((num_10g << 4) | num_1g) {
 8863		case 0x24:
 8864			if (lowest_1g == 10)
 8865				parent->plat_type = PLAT_TYPE_VF_P0;
 8866			else if (lowest_1g == 26)
 8867				parent->plat_type = PLAT_TYPE_VF_P1;
 8868			else
 8869				goto unknown_vg_1g_port;
 8870
 8871			/* fallthru */
 8872		case 0x22:
 8873			val = (phy_encode(PORT_TYPE_10G, 0) |
 8874			       phy_encode(PORT_TYPE_10G, 1) |
 8875			       phy_encode(PORT_TYPE_1G, 2) |
 8876			       phy_encode(PORT_TYPE_1G, 3));
 8877			break;
 8878
 8879		case 0x20:
 8880			val = (phy_encode(PORT_TYPE_10G, 0) |
 8881			       phy_encode(PORT_TYPE_10G, 1));
 8882			break;
 8883
 8884		case 0x10:
 8885			val = phy_encode(PORT_TYPE_10G, np->port);
 8886			break;
 8887
 8888		case 0x14:
 8889			if (lowest_1g == 10)
 8890				parent->plat_type = PLAT_TYPE_VF_P0;
 8891			else if (lowest_1g == 26)
 8892				parent->plat_type = PLAT_TYPE_VF_P1;
 8893			else
 8894				goto unknown_vg_1g_port;
 8895
 8896			/* fallthru */
 8897		case 0x13:
 8898			if ((lowest_10g & 0x7) == 0)
 8899				val = (phy_encode(PORT_TYPE_10G, 0) |
 8900				       phy_encode(PORT_TYPE_1G, 1) |
 8901				       phy_encode(PORT_TYPE_1G, 2) |
 8902				       phy_encode(PORT_TYPE_1G, 3));
 8903			else
 8904				val = (phy_encode(PORT_TYPE_1G, 0) |
 8905				       phy_encode(PORT_TYPE_10G, 1) |
 8906				       phy_encode(PORT_TYPE_1G, 2) |
 8907				       phy_encode(PORT_TYPE_1G, 3));
 8908			break;
 8909
 8910		case 0x04:
 8911			if (lowest_1g == 10)
 8912				parent->plat_type = PLAT_TYPE_VF_P0;
 8913			else if (lowest_1g == 26)
 8914				parent->plat_type = PLAT_TYPE_VF_P1;
 8915			else
 8916				goto unknown_vg_1g_port;
 8917
 8918			val = (phy_encode(PORT_TYPE_1G, 0) |
 8919			       phy_encode(PORT_TYPE_1G, 1) |
 8920			       phy_encode(PORT_TYPE_1G, 2) |
 8921			       phy_encode(PORT_TYPE_1G, 3));
 8922			break;
 8923
 8924		default:
 8925			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8926			       num_10g, num_1g);
 8927			return -EINVAL;
 8928		}
 8929	}
 8930
 8931	parent->port_phy = val;
 8932
 8933	if (parent->plat_type == PLAT_TYPE_NIU)
 8934		niu_n2_divide_channels(parent);
 8935	else
 8936		niu_divide_channels(parent, num_10g, num_1g);
 8937
 8938	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8939
 8940	return 0;
 8941
 8942unknown_vg_1g_port:
 8943	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8944	return -EINVAL;
 8945}
 8946
 8947static int __devinit niu_probe_ports(struct niu *np)
 8948{
 8949	struct niu_parent *parent = np->parent;
 8950	int err, i;
 8951
 8952	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8953		err = walk_phys(np, parent);
 8954		if (err)
 8955			return err;
 8956
 8957		niu_set_ldg_timer_res(np, 2);
 8958		for (i = 0; i <= LDN_MAX; i++)
 8959			niu_ldn_irq_enable(np, i, 0);
 8960	}
 8961
 8962	if (parent->port_phy == PORT_PHY_INVALID)
 8963		return -EINVAL;
 8964
 8965	return 0;
 8966}
 8967
 8968static int __devinit niu_classifier_swstate_init(struct niu *np)
 8969{
 8970	struct niu_classifier *cp = &np->clas;
 8971
 8972	cp->tcam_top = (u16) np->port;
 8973	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8974	cp->h1_init = 0xffffffff;
 8975	cp->h2_init = 0xffff;
 8976
 8977	return fflp_early_init(np);
 8978}
 8979
 8980static void __devinit niu_link_config_init(struct niu *np)
 8981{
 8982	struct niu_link_config *lp = &np->link_config;
 8983
 8984	lp->advertising = (ADVERTISED_10baseT_Half |
 8985			   ADVERTISED_10baseT_Full |
 8986			   ADVERTISED_100baseT_Half |
 8987			   ADVERTISED_100baseT_Full |
 8988			   ADVERTISED_1000baseT_Half |
 8989			   ADVERTISED_1000baseT_Full |
 8990			   ADVERTISED_10000baseT_Full |
 8991			   ADVERTISED_Autoneg);
 8992	lp->speed = lp->active_speed = SPEED_INVALID;
 8993	lp->duplex = DUPLEX_FULL;
 8994	lp->active_duplex = DUPLEX_INVALID;
 8995	lp->autoneg = 1;
 8996#if 0
 8997	lp->loopback_mode = LOOPBACK_MAC;
 8998	lp->active_speed = SPEED_10000;
 8999	lp->active_duplex = DUPLEX_FULL;
 9000#else
 9001	lp->loopback_mode = LOOPBACK_DISABLED;
 9002#endif
 9003}
 9004
 9005static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
 9006{
 9007	switch (np->port) {
 9008	case 0:
 9009		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 9010		np->ipp_off  = 0x00000;
 9011		np->pcs_off  = 0x04000;
 9012		np->xpcs_off = 0x02000;
 9013		break;
 9014
 9015	case 1:
 9016		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 9017		np->ipp_off  = 0x08000;
 9018		np->pcs_off  = 0x0a000;
 9019		np->xpcs_off = 0x08000;
 9020		break;
 9021
 9022	case 2:
 9023		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 9024		np->ipp_off  = 0x04000;
 9025		np->pcs_off  = 0x0e000;
 9026		np->xpcs_off = ~0UL;
 9027		break;
 9028
 9029	case 3:
 9030		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 9031		np->ipp_off  = 0x0c000;
 9032		np->pcs_off  = 0x12000;
 9033		np->xpcs_off = ~0UL;
 9034		break;
 9035
 9036	default:
 9037		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9038		return -EINVAL;
 9039	}
 9040
 9041	return 0;
 9042}
 9043
 9044static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9045{
 9046	struct msix_entry msi_vec[NIU_NUM_LDG];
 9047	struct niu_parent *parent = np->parent;
 9048	struct pci_dev *pdev = np->pdev;
 9049	int i, num_irqs, err;
 9050	u8 first_ldg;
 9051
 9052	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9053	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9054		ldg_num_map[i] = first_ldg + i;
 9055
 9056	num_irqs = (parent->rxchan_per_port[np->port] +
 9057		    parent->txchan_per_port[np->port] +
 9058		    (np->port == 0 ? 3 : 1));
 9059	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9060
 9061retry:
 9062	for (i = 0; i < num_irqs; i++) {
 9063		msi_vec[i].vector = 0;
 9064		msi_vec[i].entry = i;
 9065	}
 9066
 9067	err = pci_enable_msix(pdev, msi_vec, num_irqs);
 9068	if (err < 0) {
 9069		np->flags &= ~NIU_FLAGS_MSIX;
 9070		return;
 9071	}
 9072	if (err > 0) {
 9073		num_irqs = err;
 9074		goto retry;
 9075	}
 9076
 9077	np->flags |= NIU_FLAGS_MSIX;
 9078	for (i = 0; i < num_irqs; i++)
 9079		np->ldg[i].irq = msi_vec[i].vector;
 9080	np->num_ldg = num_irqs;
 9081}
 9082
 9083static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9084{
 9085#ifdef CONFIG_SPARC64
 9086	struct platform_device *op = np->op;
 9087	const u32 *int_prop;
 9088	int i;
 9089
 9090	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9091	if (!int_prop)
 9092		return -ENODEV;
 9093
 9094	for (i = 0; i < op->archdata.num_irqs; i++) {
 9095		ldg_num_map[i] = int_prop[i];
 9096		np->ldg[i].irq = op->archdata.irqs[i];
 9097	}
 9098
 9099	np->num_ldg = op->archdata.num_irqs;
 9100
 9101	return 0;
 9102#else
 9103	return -EINVAL;
 9104#endif
 9105}
 9106
 9107static int __devinit niu_ldg_init(struct niu *np)
 9108{
 9109	struct niu_parent *parent = np->parent;
 9110	u8 ldg_num_map[NIU_NUM_LDG];
 9111	int first_chan, num_chan;
 9112	int i, err, ldg_rotor;
 9113	u8 port;
 9114
 9115	np->num_ldg = 1;
 9116	np->ldg[0].irq = np->dev->irq;
 9117	if (parent->plat_type == PLAT_TYPE_NIU) {
 9118		err = niu_n2_irq_init(np, ldg_num_map);
 9119		if (err)
 9120			return err;
 9121	} else
 9122		niu_try_msix(np, ldg_num_map);
 9123
 9124	port = np->port;
 9125	for (i = 0; i < np->num_ldg; i++) {
 9126		struct niu_ldg *lp = &np->ldg[i];
 9127
 9128		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9129
 9130		lp->np = np;
 9131		lp->ldg_num = ldg_num_map[i];
 9132		lp->timer = 2; /* XXX */
 9133
 9134		/* On N2 NIU the firmware has setup the SID mappings so they go
 9135		 * to the correct values that will route the LDG to the proper
 9136		 * interrupt in the NCU interrupt table.
 9137		 */
 9138		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9139			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9140			if (err)
 9141				return err;
 9142		}
 9143	}
 9144
 9145	/* We adopt the LDG assignment ordering used by the N2 NIU
 9146	 * 'interrupt' properties because that simplifies a lot of
 9147	 * things.  This ordering is:
 9148	 *
 9149	 *	MAC
 9150	 *	MIF	(if port zero)
 9151	 *	SYSERR	(if port zero)
 9152	 *	RX channels
 9153	 *	TX channels
 9154	 */
 9155
 9156	ldg_rotor = 0;
 9157
 9158	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9159				  LDN_MAC(port));
 9160	if (err)
 9161		return err;
 9162
 9163	ldg_rotor++;
 9164	if (ldg_rotor == np->num_ldg)
 9165		ldg_rotor = 0;
 9166
 9167	if (port == 0) {
 9168		err = niu_ldg_assign_ldn(np, parent,
 9169					 ldg_num_map[ldg_rotor],
 9170					 LDN_MIF);
 9171		if (err)
 9172			return err;
 9173
 9174		ldg_rotor++;
 9175		if (ldg_rotor == np->num_ldg)
 9176			ldg_rotor = 0;
 9177
 9178		err = niu_ldg_assign_ldn(np, parent,
 9179					 ldg_num_map[ldg_rotor],
 9180					 LDN_DEVICE_ERROR);
 9181		if (err)
 9182			return err;
 9183
 9184		ldg_rotor++;
 9185		if (ldg_rotor == np->num_ldg)
 9186			ldg_rotor = 0;
 9187
 9188	}
 9189
 9190	first_chan = 0;
 9191	for (i = 0; i < port; i++)
 9192		first_chan += parent->rxchan_per_port[i];
 9193	num_chan = parent->rxchan_per_port[port];
 9194
 9195	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9196		err = niu_ldg_assign_ldn(np, parent,
 9197					 ldg_num_map[ldg_rotor],
 9198					 LDN_RXDMA(i));
 9199		if (err)
 9200			return err;
 9201		ldg_rotor++;
 9202		if (ldg_rotor == np->num_ldg)
 9203			ldg_rotor = 0;
 9204	}
 9205
 9206	first_chan = 0;
 9207	for (i = 0; i < port; i++)
 9208		first_chan += parent->txchan_per_port[i];
 9209	num_chan = parent->txchan_per_port[port];
 9210	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9211		err = niu_ldg_assign_ldn(np, parent,
 9212					 ldg_num_map[ldg_rotor],
 9213					 LDN_TXDMA(i));
 9214		if (err)
 9215			return err;
 9216		ldg_rotor++;
 9217		if (ldg_rotor == np->num_ldg)
 9218			ldg_rotor = 0;
 9219	}
 9220
 9221	return 0;
 9222}
 9223
 9224static void __devexit niu_ldg_free(struct niu *np)
 9225{
 9226	if (np->flags & NIU_FLAGS_MSIX)
 9227		pci_disable_msix(np->pdev);
 9228}
 9229
 9230static int __devinit niu_get_of_props(struct niu *np)
 9231{
 9232#ifdef CONFIG_SPARC64
 9233	struct net_device *dev = np->dev;
 9234	struct device_node *dp;
 9235	const char *phy_type;
 9236	const u8 *mac_addr;
 9237	const char *model;
 9238	int prop_len;
 9239
 9240	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9241		dp = np->op->dev.of_node;
 9242	else
 9243		dp = pci_device_to_OF_node(np->pdev);
 9244
 9245	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9246	if (!phy_type) {
 9247		netdev_err(dev, "%s: OF node lacks phy-type property\n",
 9248			   dp->full_name);
 9249		return -EINVAL;
 9250	}
 9251
 9252	if (!strcmp(phy_type, "none"))
 9253		return -ENODEV;
 9254
 9255	strcpy(np->vpd.phy_type, phy_type);
 9256
 9257	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9258		netdev_err(dev, "%s: Illegal phy string [%s]\n",
 9259			   dp->full_name, np->vpd.phy_type);
 9260		return -EINVAL;
 9261	}
 9262
 9263	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9264	if (!mac_addr) {
 9265		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
 9266			   dp->full_name);
 9267		return -EINVAL;
 9268	}
 9269	if (prop_len != dev->addr_len) {
 9270		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
 9271			   dp->full_name, prop_len);
 9272	}
 9273	memcpy(dev->perm_addr, mac_addr, dev->addr_len);
 9274	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
 9275		netdev_err(dev, "%s: OF MAC address is invalid\n",
 9276			   dp->full_name);
 9277		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
 9278		return -EINVAL;
 9279	}
 9280
 9281	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 9282
 9283	model = of_get_property(dp, "model", &prop_len);
 9284
 9285	if (model)
 9286		strcpy(np->vpd.model, model);
 9287
 9288	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9289		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9290			NIU_FLAGS_HOTPLUG_PHY);
 9291	}
 9292
 9293	return 0;
 9294#else
 9295	return -EINVAL;
 9296#endif
 9297}
 9298
 9299static int __devinit niu_get_invariants(struct niu *np)
 9300{
 9301	int err, have_props;
 9302	u32 offset;
 9303
 9304	err = niu_get_of_props(np);
 9305	if (err == -ENODEV)
 9306		return err;
 9307
 9308	have_props = !err;
 9309
 9310	err = niu_init_mac_ipp_pcs_base(np);
 9311	if (err)
 9312		return err;
 9313
 9314	if (have_props) {
 9315		err = niu_get_and_validate_port(np);
 9316		if (err)
 9317			return err;
 9318
 9319	} else  {
 9320		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9321			return -EINVAL;
 9322
 9323		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9324		offset = niu_pci_vpd_offset(np);
 9325		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9326			     "%s() VPD offset [%08x]\n", __func__, offset);
 9327		if (offset)
 9328			niu_pci_vpd_fetch(np, offset);
 9329		nw64(ESPC_PIO_EN, 0);
 9330
 9331		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9332			niu_pci_vpd_validate(np);
 9333			err = niu_get_and_validate_port(np);
 9334			if (err)
 9335				return err;
 9336		}
 9337
 9338		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9339			err = niu_get_and_validate_port(np);
 9340			if (err)
 9341				return err;
 9342			err = niu_pci_probe_sprom(np);
 9343			if (err)
 9344				return err;
 9345		}
 9346	}
 9347
 9348	err = niu_probe_ports(np);
 9349	if (err)
 9350		return err;
 9351
 9352	niu_ldg_init(np);
 9353
 9354	niu_classifier_swstate_init(np);
 9355	niu_link_config_init(np);
 9356
 9357	err = niu_determine_phy_disposition(np);
 9358	if (!err)
 9359		err = niu_init_link(np);
 9360
 9361	return err;
 9362}
 9363
 9364static LIST_HEAD(niu_parent_list);
 9365static DEFINE_MUTEX(niu_parent_lock);
 9366static int niu_parent_index;
 9367
 9368static ssize_t show_port_phy(struct device *dev,
 9369			     struct device_attribute *attr, char *buf)
 9370{
 9371	struct platform_device *plat_dev = to_platform_device(dev);
 9372	struct niu_parent *p = plat_dev->dev.platform_data;
 9373	u32 port_phy = p->port_phy;
 9374	char *orig_buf = buf;
 9375	int i;
 9376
 9377	if (port_phy == PORT_PHY_UNKNOWN ||
 9378	    port_phy == PORT_PHY_INVALID)
 9379		return 0;
 9380
 9381	for (i = 0; i < p->num_ports; i++) {
 9382		const char *type_str;
 9383		int type;
 9384
 9385		type = phy_decode(port_phy, i);
 9386		if (type == PORT_TYPE_10G)
 9387			type_str = "10G";
 9388		else
 9389			type_str = "1G";
 9390		buf += sprintf(buf,
 9391			       (i == 0) ? "%s" : " %s",
 9392			       type_str);
 9393	}
 9394	buf += sprintf(buf, "\n");
 9395	return buf - orig_buf;
 9396}
 9397
 9398static ssize_t show_plat_type(struct device *dev,
 9399			      struct device_attribute *attr, char *buf)
 9400{
 9401	struct platform_device *plat_dev = to_platform_device(dev);
 9402	struct niu_parent *p = plat_dev->dev.platform_data;
 9403	const char *type_str;
 9404
 9405	switch (p->plat_type) {
 9406	case PLAT_TYPE_ATLAS:
 9407		type_str = "atlas";
 9408		break;
 9409	case PLAT_TYPE_NIU:
 9410		type_str = "niu";
 9411		break;
 9412	case PLAT_TYPE_VF_P0:
 9413		type_str = "vf_p0";
 9414		break;
 9415	case PLAT_TYPE_VF_P1:
 9416		type_str = "vf_p1";
 9417		break;
 9418	default:
 9419		type_str = "unknown";
 9420		break;
 9421	}
 9422
 9423	return sprintf(buf, "%s\n", type_str);
 9424}
 9425
 9426static ssize_t __show_chan_per_port(struct device *dev,
 9427				    struct device_attribute *attr, char *buf,
 9428				    int rx)
 9429{
 9430	struct platform_device *plat_dev = to_platform_device(dev);
 9431	struct niu_parent *p = plat_dev->dev.platform_data;
 9432	char *orig_buf = buf;
 9433	u8 *arr;
 9434	int i;
 9435
 9436	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9437
 9438	for (i = 0; i < p->num_ports; i++) {
 9439		buf += sprintf(buf,
 9440			       (i == 0) ? "%d" : " %d",
 9441			       arr[i]);
 9442	}
 9443	buf += sprintf(buf, "\n");
 9444
 9445	return buf - orig_buf;
 9446}
 9447
 9448static ssize_t show_rxchan_per_port(struct device *dev,
 9449				    struct device_attribute *attr, char *buf)
 9450{
 9451	return __show_chan_per_port(dev, attr, buf, 1);
 9452}
 9453
 9454static ssize_t show_txchan_per_port(struct device *dev,
 9455				    struct device_attribute *attr, char *buf)
 9456{
 9457	return __show_chan_per_port(dev, attr, buf, 1);
 9458}
 9459
 9460static ssize_t show_num_ports(struct device *dev,
 9461			      struct device_attribute *attr, char *buf)
 9462{
 9463	struct platform_device *plat_dev = to_platform_device(dev);
 9464	struct niu_parent *p = plat_dev->dev.platform_data;
 9465
 9466	return sprintf(buf, "%d\n", p->num_ports);
 9467}
 9468
 9469static struct device_attribute niu_parent_attributes[] = {
 9470	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
 9471	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
 9472	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
 9473	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
 9474	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
 9475	{}
 9476};
 9477
 9478static struct niu_parent * __devinit niu_new_parent(struct niu *np,
 9479						    union niu_parent_id *id,
 9480						    u8 ptype)
 9481{
 9482	struct platform_device *plat_dev;
 9483	struct niu_parent *p;
 9484	int i;
 9485
 9486	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9487						   NULL, 0);
 9488	if (IS_ERR(plat_dev))
 9489		return NULL;
 9490
 9491	for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
 9492		int err = device_create_file(&plat_dev->dev,
 9493					     &niu_parent_attributes[i]);
 9494		if (err)
 9495			goto fail_unregister;
 9496	}
 9497
 9498	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9499	if (!p)
 9500		goto fail_unregister;
 9501
 9502	p->index = niu_parent_index++;
 9503
 9504	plat_dev->dev.platform_data = p;
 9505	p->plat_dev = plat_dev;
 9506
 9507	memcpy(&p->id, id, sizeof(*id));
 9508	p->plat_type = ptype;
 9509	INIT_LIST_HEAD(&p->list);
 9510	atomic_set(&p->refcnt, 0);
 9511	list_add(&p->list, &niu_parent_list);
 9512	spin_lock_init(&p->lock);
 9513
 9514	p->rxdma_clock_divider = 7500;
 9515
 9516	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9517	if (p->plat_type == PLAT_TYPE_NIU)
 9518		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9519
 9520	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9521		int index = i - CLASS_CODE_USER_PROG1;
 9522
 9523		p->tcam_key[index] = TCAM_KEY_TSEL;
 9524		p->flow_key[index] = (FLOW_KEY_IPSA |
 9525				      FLOW_KEY_IPDA |
 9526				      FLOW_KEY_PROTO |
 9527				      (FLOW_KEY_L4_BYTE12 <<
 9528				       FLOW_KEY_L4_0_SHIFT) |
 9529				      (FLOW_KEY_L4_BYTE12 <<
 9530				       FLOW_KEY_L4_1_SHIFT));
 9531	}
 9532
 9533	for (i = 0; i < LDN_MAX + 1; i++)
 9534		p->ldg_map[i] = LDG_INVALID;
 9535
 9536	return p;
 9537
 9538fail_unregister:
 9539	platform_device_unregister(plat_dev);
 9540	return NULL;
 9541}
 9542
 9543static struct niu_parent * __devinit niu_get_parent(struct niu *np,
 9544						    union niu_parent_id *id,
 9545						    u8 ptype)
 9546{
 9547	struct niu_parent *p, *tmp;
 9548	int port = np->port;
 9549
 9550	mutex_lock(&niu_parent_lock);
 9551	p = NULL;
 9552	list_for_each_entry(tmp, &niu_parent_list, list) {
 9553		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9554			p = tmp;
 9555			break;
 9556		}
 9557	}
 9558	if (!p)
 9559		p = niu_new_parent(np, id, ptype);
 9560
 9561	if (p) {
 9562		char port_name[6];
 9563		int err;
 9564
 9565		sprintf(port_name, "port%d", port);
 9566		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9567					&np->device->kobj,
 9568					port_name);
 9569		if (!err) {
 9570			p->ports[port] = np;
 9571			atomic_inc(&p->refcnt);
 9572		}
 9573	}
 9574	mutex_unlock(&niu_parent_lock);
 9575
 9576	return p;
 9577}
 9578
 9579static void niu_put_parent(struct niu *np)
 9580{
 9581	struct niu_parent *p = np->parent;
 9582	u8 port = np->port;
 9583	char port_name[6];
 9584
 9585	BUG_ON(!p || p->ports[port] != np);
 9586
 9587	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9588		     "%s() port[%u]\n", __func__, port);
 9589
 9590	sprintf(port_name, "port%d", port);
 9591
 9592	mutex_lock(&niu_parent_lock);
 9593
 9594	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9595
 9596	p->ports[port] = NULL;
 9597	np->parent = NULL;
 9598
 9599	if (atomic_dec_and_test(&p->refcnt)) {
 9600		list_del(&p->list);
 9601		platform_device_unregister(p->plat_dev);
 9602	}
 9603
 9604	mutex_unlock(&niu_parent_lock);
 9605}
 9606
 9607static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9608				    u64 *handle, gfp_t flag)
 9609{
 9610	dma_addr_t dh;
 9611	void *ret;
 9612
 9613	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9614	if (ret)
 9615		*handle = dh;
 9616	return ret;
 9617}
 9618
 9619static void niu_pci_free_coherent(struct device *dev, size_t size,
 9620				  void *cpu_addr, u64 handle)
 9621{
 9622	dma_free_coherent(dev, size, cpu_addr, handle);
 9623}
 9624
 9625static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9626			    unsigned long offset, size_t size,
 9627			    enum dma_data_direction direction)
 9628{
 9629	return dma_map_page(dev, page, offset, size, direction);
 9630}
 9631
 9632static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9633			       size_t size, enum dma_data_direction direction)
 9634{
 9635	dma_unmap_page(dev, dma_address, size, direction);
 9636}
 9637
 9638static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9639			      size_t size,
 9640			      enum dma_data_direction direction)
 9641{
 9642	return dma_map_single(dev, cpu_addr, size, direction);
 9643}
 9644
 9645static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9646				 size_t size,
 9647				 enum dma_data_direction direction)
 9648{
 9649	dma_unmap_single(dev, dma_address, size, direction);
 9650}
 9651
 9652static const struct niu_ops niu_pci_ops = {
 9653	.alloc_coherent	= niu_pci_alloc_coherent,
 9654	.free_coherent	= niu_pci_free_coherent,
 9655	.map_page	= niu_pci_map_page,
 9656	.unmap_page	= niu_pci_unmap_page,
 9657	.map_single	= niu_pci_map_single,
 9658	.unmap_single	= niu_pci_unmap_single,
 9659};
 9660
 9661static void __devinit niu_driver_version(void)
 9662{
 9663	static int niu_version_printed;
 9664
 9665	if (niu_version_printed++ == 0)
 9666		pr_info("%s", version);
 9667}
 9668
 9669static struct net_device * __devinit niu_alloc_and_init(
 9670	struct device *gen_dev, struct pci_dev *pdev,
 9671	struct platform_device *op, const struct niu_ops *ops,
 9672	u8 port)
 9673{
 9674	struct net_device *dev;
 9675	struct niu *np;
 9676
 9677	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9678	if (!dev)
 9679		return NULL;
 9680
 9681	SET_NETDEV_DEV(dev, gen_dev);
 9682
 9683	np = netdev_priv(dev);
 9684	np->dev = dev;
 9685	np->pdev = pdev;
 9686	np->op = op;
 9687	np->device = gen_dev;
 9688	np->ops = ops;
 9689
 9690	np->msg_enable = niu_debug;
 9691
 9692	spin_lock_init(&np->lock);
 9693	INIT_WORK(&np->reset_task, niu_reset_task);
 9694
 9695	np->port = port;
 9696
 9697	return dev;
 9698}
 9699
 9700static const struct net_device_ops niu_netdev_ops = {
 9701	.ndo_open		= niu_open,
 9702	.ndo_stop		= niu_close,
 9703	.ndo_start_xmit		= niu_start_xmit,
 9704	.ndo_get_stats64	= niu_get_stats,
 9705	.ndo_set_rx_mode	= niu_set_rx_mode,
 9706	.ndo_validate_addr	= eth_validate_addr,
 9707	.ndo_set_mac_address	= niu_set_mac_addr,
 9708	.ndo_do_ioctl		= niu_ioctl,
 9709	.ndo_tx_timeout		= niu_tx_timeout,
 9710	.ndo_change_mtu		= niu_change_mtu,
 9711};
 9712
 9713static void __devinit niu_assign_netdev_ops(struct net_device *dev)
 9714{
 9715	dev->netdev_ops = &niu_netdev_ops;
 9716	dev->ethtool_ops = &niu_ethtool_ops;
 9717	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9718}
 9719
 9720static void __devinit niu_device_announce(struct niu *np)
 9721{
 9722	struct net_device *dev = np->dev;
 9723
 9724	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9725
 9726	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9727		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9728				dev->name,
 9729				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9730				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9731				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9732				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9733				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9734				np->vpd.phy_type);
 9735	} else {
 9736		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9737				dev->name,
 9738				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9739				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9740				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9741				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9742				  "COPPER")),
 9743				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9744				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9745				np->vpd.phy_type);
 9746	}
 9747}
 9748
 9749static void __devinit niu_set_basic_features(struct net_device *dev)
 9750{
 9751	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9752	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9753}
 9754
 9755static int __devinit niu_pci_init_one(struct pci_dev *pdev,
 9756				      const struct pci_device_id *ent)
 9757{
 9758	union niu_parent_id parent_id;
 9759	struct net_device *dev;
 9760	struct niu *np;
 9761	int err, pos;
 9762	u64 dma_mask;
 9763	u16 val16;
 9764
 9765	niu_driver_version();
 9766
 9767	err = pci_enable_device(pdev);
 9768	if (err) {
 9769		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9770		return err;
 9771	}
 9772
 9773	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9774	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9775		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9776		err = -ENODEV;
 9777		goto err_out_disable_pdev;
 9778	}
 9779
 9780	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9781	if (err) {
 9782		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9783		goto err_out_disable_pdev;
 9784	}
 9785
 9786	pos = pci_pcie_cap(pdev);
 9787	if (pos <= 0) {
 9788		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9789		goto err_out_free_res;
 9790	}
 9791
 9792	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9793				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9794	if (!dev) {
 9795		err = -ENOMEM;
 9796		goto err_out_free_res;
 9797	}
 9798	np = netdev_priv(dev);
 9799
 9800	memset(&parent_id, 0, sizeof(parent_id));
 9801	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9802	parent_id.pci.bus = pdev->bus->number;
 9803	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9804
 9805	np->parent = niu_get_parent(np, &parent_id,
 9806				    PLAT_TYPE_ATLAS);
 9807	if (!np->parent) {
 9808		err = -ENOMEM;
 9809		goto err_out_free_dev;
 9810	}
 9811
 9812	pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
 9813	val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
 9814	val16 |= (PCI_EXP_DEVCTL_CERE |
 9815		  PCI_EXP_DEVCTL_NFERE |
 9816		  PCI_EXP_DEVCTL_FERE |
 9817		  PCI_EXP_DEVCTL_URRE |
 9818		  PCI_EXP_DEVCTL_RELAX_EN);
 9819	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
 9820
 9821	dma_mask = DMA_BIT_MASK(44);
 9822	err = pci_set_dma_mask(pdev, dma_mask);
 9823	if (!err) {
 9824		dev->features |= NETIF_F_HIGHDMA;
 9825		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9826		if (err) {
 9827			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9828			goto err_out_release_parent;
 9829		}
 9830	}
 9831	if (err) {
 9832		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9833		if (err) {
 9834			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9835			goto err_out_release_parent;
 9836		}
 9837	}
 9838
 9839	niu_set_basic_features(dev);
 9840
 9841	dev->priv_flags |= IFF_UNICAST_FLT;
 9842
 9843	np->regs = pci_ioremap_bar(pdev, 0);
 9844	if (!np->regs) {
 9845		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9846		err = -ENOMEM;
 9847		goto err_out_release_parent;
 9848	}
 9849
 9850	pci_set_master(pdev);
 9851	pci_save_state(pdev);
 9852
 9853	dev->irq = pdev->irq;
 9854
 9855	niu_assign_netdev_ops(dev);
 9856
 9857	err = niu_get_invariants(np);
 9858	if (err) {
 9859		if (err != -ENODEV)
 9860			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9861		goto err_out_iounmap;
 9862	}
 9863
 9864	err = register_netdev(dev);
 9865	if (err) {
 9866		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9867		goto err_out_iounmap;
 9868	}
 9869
 9870	pci_set_drvdata(pdev, dev);
 9871
 9872	niu_device_announce(np);
 9873
 9874	return 0;
 9875
 9876err_out_iounmap:
 9877	if (np->regs) {
 9878		iounmap(np->regs);
 9879		np->regs = NULL;
 9880	}
 9881
 9882err_out_release_parent:
 9883	niu_put_parent(np);
 9884
 9885err_out_free_dev:
 9886	free_netdev(dev);
 9887
 9888err_out_free_res:
 9889	pci_release_regions(pdev);
 9890
 9891err_out_disable_pdev:
 9892	pci_disable_device(pdev);
 9893	pci_set_drvdata(pdev, NULL);
 9894
 9895	return err;
 9896}
 9897
 9898static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
 9899{
 9900	struct net_device *dev = pci_get_drvdata(pdev);
 9901
 9902	if (dev) {
 9903		struct niu *np = netdev_priv(dev);
 9904
 9905		unregister_netdev(dev);
 9906		if (np->regs) {
 9907			iounmap(np->regs);
 9908			np->regs = NULL;
 9909		}
 9910
 9911		niu_ldg_free(np);
 9912
 9913		niu_put_parent(np);
 9914
 9915		free_netdev(dev);
 9916		pci_release_regions(pdev);
 9917		pci_disable_device(pdev);
 9918		pci_set_drvdata(pdev, NULL);
 9919	}
 9920}
 9921
 9922static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9923{
 9924	struct net_device *dev = pci_get_drvdata(pdev);
 9925	struct niu *np = netdev_priv(dev);
 9926	unsigned long flags;
 9927
 9928	if (!netif_running(dev))
 9929		return 0;
 9930
 9931	flush_work_sync(&np->reset_task);
 9932	niu_netif_stop(np);
 9933
 9934	del_timer_sync(&np->timer);
 9935
 9936	spin_lock_irqsave(&np->lock, flags);
 9937	niu_enable_interrupts(np, 0);
 9938	spin_unlock_irqrestore(&np->lock, flags);
 9939
 9940	netif_device_detach(dev);
 9941
 9942	spin_lock_irqsave(&np->lock, flags);
 9943	niu_stop_hw(np);
 9944	spin_unlock_irqrestore(&np->lock, flags);
 9945
 9946	pci_save_state(pdev);
 9947
 9948	return 0;
 9949}
 9950
 9951static int niu_resume(struct pci_dev *pdev)
 9952{
 9953	struct net_device *dev = pci_get_drvdata(pdev);
 9954	struct niu *np = netdev_priv(dev);
 9955	unsigned long flags;
 9956	int err;
 9957
 9958	if (!netif_running(dev))
 9959		return 0;
 9960
 9961	pci_restore_state(pdev);
 9962
 9963	netif_device_attach(dev);
 9964
 9965	spin_lock_irqsave(&np->lock, flags);
 9966
 9967	err = niu_init_hw(np);
 9968	if (!err) {
 9969		np->timer.expires = jiffies + HZ;
 9970		add_timer(&np->timer);
 9971		niu_netif_start(np);
 9972	}
 9973
 9974	spin_unlock_irqrestore(&np->lock, flags);
 9975
 9976	return err;
 9977}
 9978
 9979static struct pci_driver niu_pci_driver = {
 9980	.name		= DRV_MODULE_NAME,
 9981	.id_table	= niu_pci_tbl,
 9982	.probe		= niu_pci_init_one,
 9983	.remove		= __devexit_p(niu_pci_remove_one),
 9984	.suspend	= niu_suspend,
 9985	.resume		= niu_resume,
 9986};
 9987
 9988#ifdef CONFIG_SPARC64
 9989static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
 9990				     u64 *dma_addr, gfp_t flag)
 9991{
 9992	unsigned long order = get_order(size);
 9993	unsigned long page = __get_free_pages(flag, order);
 9994
 9995	if (page == 0UL)
 9996		return NULL;
 9997	memset((char *)page, 0, PAGE_SIZE << order);
 9998	*dma_addr = __pa(page);
 9999
10000	return (void *) page;
10001}
10002
10003static void niu_phys_free_coherent(struct device *dev, size_t size,
10004				   void *cpu_addr, u64 handle)
10005{
10006	unsigned long order = get_order(size);
10007
10008	free_pages((unsigned long) cpu_addr, order);
10009}
10010
10011static u64 niu_phys_map_page(struct device *dev, struct page *page,
10012			     unsigned long offset, size_t size,
10013			     enum dma_data_direction direction)
10014{
10015	return page_to_phys(page) + offset;
10016}
10017
10018static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
10019				size_t size, enum dma_data_direction direction)
10020{
10021	/* Nothing to do.  */
10022}
10023
10024static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
10025			       size_t size,
10026			       enum dma_data_direction direction)
10027{
10028	return __pa(cpu_addr);
10029}
10030
10031static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10032				  size_t size,
10033				  enum dma_data_direction direction)
10034{
10035	/* Nothing to do.  */
10036}
10037
10038static const struct niu_ops niu_phys_ops = {
10039	.alloc_coherent	= niu_phys_alloc_coherent,
10040	.free_coherent	= niu_phys_free_coherent,
10041	.map_page	= niu_phys_map_page,
10042	.unmap_page	= niu_phys_unmap_page,
10043	.map_single	= niu_phys_map_single,
10044	.unmap_single	= niu_phys_unmap_single,
10045};
10046
10047static int __devinit niu_of_probe(struct platform_device *op)
10048{
10049	union niu_parent_id parent_id;
10050	struct net_device *dev;
10051	struct niu *np;
10052	const u32 *reg;
10053	int err;
10054
10055	niu_driver_version();
10056
10057	reg = of_get_property(op->dev.of_node, "reg", NULL);
10058	if (!reg) {
10059		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10060			op->dev.of_node->full_name);
10061		return -ENODEV;
10062	}
10063
10064	dev = niu_alloc_and_init(&op->dev, NULL, op,
10065				 &niu_phys_ops, reg[0] & 0x1);
10066	if (!dev) {
10067		err = -ENOMEM;
10068		goto err_out;
10069	}
10070	np = netdev_priv(dev);
10071
10072	memset(&parent_id, 0, sizeof(parent_id));
10073	parent_id.of = of_get_parent(op->dev.of_node);
10074
10075	np->parent = niu_get_parent(np, &parent_id,
10076				    PLAT_TYPE_NIU);
10077	if (!np->parent) {
10078		err = -ENOMEM;
10079		goto err_out_free_dev;
10080	}
10081
10082	niu_set_basic_features(dev);
10083
10084	np->regs = of_ioremap(&op->resource[1], 0,
10085			      resource_size(&op->resource[1]),
10086			      "niu regs");
10087	if (!np->regs) {
10088		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10089		err = -ENOMEM;
10090		goto err_out_release_parent;
10091	}
10092
10093	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10094				    resource_size(&op->resource[2]),
10095				    "niu vregs-1");
10096	if (!np->vir_regs_1) {
10097		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10098		err = -ENOMEM;
10099		goto err_out_iounmap;
10100	}
10101
10102	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10103				    resource_size(&op->resource[3]),
10104				    "niu vregs-2");
10105	if (!np->vir_regs_2) {
10106		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10107		err = -ENOMEM;
10108		goto err_out_iounmap;
10109	}
10110
10111	niu_assign_netdev_ops(dev);
10112
10113	err = niu_get_invariants(np);
10114	if (err) {
10115		if (err != -ENODEV)
10116			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10117		goto err_out_iounmap;
10118	}
10119
10120	err = register_netdev(dev);
10121	if (err) {
10122		dev_err(&op->dev, "Cannot register net device, aborting\n");
10123		goto err_out_iounmap;
10124	}
10125
10126	dev_set_drvdata(&op->dev, dev);
10127
10128	niu_device_announce(np);
10129
10130	return 0;
10131
10132err_out_iounmap:
10133	if (np->vir_regs_1) {
10134		of_iounmap(&op->resource[2], np->vir_regs_1,
10135			   resource_size(&op->resource[2]));
10136		np->vir_regs_1 = NULL;
10137	}
10138
10139	if (np->vir_regs_2) {
10140		of_iounmap(&op->resource[3], np->vir_regs_2,
10141			   resource_size(&op->resource[3]));
10142		np->vir_regs_2 = NULL;
10143	}
10144
10145	if (np->regs) {
10146		of_iounmap(&op->resource[1], np->regs,
10147			   resource_size(&op->resource[1]));
10148		np->regs = NULL;
10149	}
10150
10151err_out_release_parent:
10152	niu_put_parent(np);
10153
10154err_out_free_dev:
10155	free_netdev(dev);
10156
10157err_out:
10158	return err;
10159}
10160
10161static int __devexit niu_of_remove(struct platform_device *op)
10162{
10163	struct net_device *dev = dev_get_drvdata(&op->dev);
10164
10165	if (dev) {
10166		struct niu *np = netdev_priv(dev);
10167
10168		unregister_netdev(dev);
10169
10170		if (np->vir_regs_1) {
10171			of_iounmap(&op->resource[2], np->vir_regs_1,
10172				   resource_size(&op->resource[2]));
10173			np->vir_regs_1 = NULL;
10174		}
10175
10176		if (np->vir_regs_2) {
10177			of_iounmap(&op->resource[3], np->vir_regs_2,
10178				   resource_size(&op->resource[3]));
10179			np->vir_regs_2 = NULL;
10180		}
10181
10182		if (np->regs) {
10183			of_iounmap(&op->resource[1], np->regs,
10184				   resource_size(&op->resource[1]));
10185			np->regs = NULL;
10186		}
10187
10188		niu_ldg_free(np);
10189
10190		niu_put_parent(np);
10191
10192		free_netdev(dev);
10193		dev_set_drvdata(&op->dev, NULL);
10194	}
10195	return 0;
10196}
10197
10198static const struct of_device_id niu_match[] = {
10199	{
10200		.name = "network",
10201		.compatible = "SUNW,niusl",
10202	},
10203	{},
10204};
10205MODULE_DEVICE_TABLE(of, niu_match);
10206
10207static struct platform_driver niu_of_driver = {
10208	.driver = {
10209		.name = "niu",
10210		.owner = THIS_MODULE,
10211		.of_match_table = niu_match,
10212	},
10213	.probe		= niu_of_probe,
10214	.remove		= __devexit_p(niu_of_remove),
10215};
10216
10217#endif /* CONFIG_SPARC64 */
10218
10219static int __init niu_init(void)
10220{
10221	int err = 0;
10222
10223	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10224
10225	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10226
10227#ifdef CONFIG_SPARC64
10228	err = platform_driver_register(&niu_of_driver);
10229#endif
10230
10231	if (!err) {
10232		err = pci_register_driver(&niu_pci_driver);
10233#ifdef CONFIG_SPARC64
10234		if (err)
10235			platform_driver_unregister(&niu_of_driver);
10236#endif
10237	}
10238
10239	return err;
10240}
10241
10242static void __exit niu_exit(void)
10243{
10244	pci_unregister_driver(&niu_pci_driver);
10245#ifdef CONFIG_SPARC64
10246	platform_driver_unregister(&niu_of_driver);
10247#endif
10248}
10249
10250module_init(niu_init);
10251module_exit(niu_exit);