Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1/* niu.c: Neptune ethernet driver.
    2 *
    3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    4 */
    5
    6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    7
    8#include <linux/module.h>
    9#include <linux/init.h>
   10#include <linux/interrupt.h>
   11#include <linux/pci.h>
   12#include <linux/dma-mapping.h>
   13#include <linux/netdevice.h>
   14#include <linux/ethtool.h>
   15#include <linux/etherdevice.h>
   16#include <linux/platform_device.h>
   17#include <linux/delay.h>
   18#include <linux/bitops.h>
   19#include <linux/mii.h>
   20#include <linux/if.h>
   21#include <linux/if_ether.h>
   22#include <linux/if_vlan.h>
   23#include <linux/ip.h>
   24#include <linux/in.h>
   25#include <linux/ipv6.h>
   26#include <linux/log2.h>
   27#include <linux/jiffies.h>
   28#include <linux/crc32.h>
   29#include <linux/list.h>
   30#include <linux/slab.h>
   31
   32#include <linux/io.h>
   33#include <linux/of_device.h>
   34
   35#include "niu.h"
   36
   37#define DRV_MODULE_NAME		"niu"
   38#define DRV_MODULE_VERSION	"1.1"
   39#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   40
   41static char version[] =
   42	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   43
   44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   45MODULE_DESCRIPTION("NIU ethernet driver");
   46MODULE_LICENSE("GPL");
   47MODULE_VERSION(DRV_MODULE_VERSION);
   48
   49#ifndef readq
   50static u64 readq(void __iomem *reg)
   51{
   52	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   53}
   54
   55static void writeq(u64 val, void __iomem *reg)
   56{
   57	writel(val & 0xffffffff, reg);
   58	writel(val >> 32, reg + 0x4UL);
   59}
   60#endif
   61
   62static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
   63	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   64	{}
   65};
   66
   67MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   68
   69#define NIU_TX_TIMEOUT			(5 * HZ)
   70
   71#define nr64(reg)		readq(np->regs + (reg))
   72#define nw64(reg, val)		writeq((val), np->regs + (reg))
   73
   74#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   75#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   76
   77#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   78#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   79
   80#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   81#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   82
   83#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   84#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   85
   86#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   87
   88static int niu_debug;
   89static int debug = -1;
   90module_param(debug, int, 0);
   91MODULE_PARM_DESC(debug, "NIU debug level");
   92
   93#define niu_lock_parent(np, flags) \
   94	spin_lock_irqsave(&np->parent->lock, flags)
   95#define niu_unlock_parent(np, flags) \
   96	spin_unlock_irqrestore(&np->parent->lock, flags)
   97
   98static int serdes_init_10g_serdes(struct niu *np);
   99
  100static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  101				     u64 bits, int limit, int delay)
  102{
  103	while (--limit >= 0) {
  104		u64 val = nr64_mac(reg);
  105
  106		if (!(val & bits))
  107			break;
  108		udelay(delay);
  109	}
  110	if (limit < 0)
  111		return -ENODEV;
  112	return 0;
  113}
  114
  115static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  116					u64 bits, int limit, int delay,
  117					const char *reg_name)
  118{
  119	int err;
  120
  121	nw64_mac(reg, bits);
  122	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  123	if (err)
  124		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  125			   (unsigned long long)bits, reg_name,
  126			   (unsigned long long)nr64_mac(reg));
  127	return err;
  128}
  129
  130#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  131({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  132	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  133})
  134
  135static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  136				     u64 bits, int limit, int delay)
  137{
  138	while (--limit >= 0) {
  139		u64 val = nr64_ipp(reg);
  140
  141		if (!(val & bits))
  142			break;
  143		udelay(delay);
  144	}
  145	if (limit < 0)
  146		return -ENODEV;
  147	return 0;
  148}
  149
  150static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  151					u64 bits, int limit, int delay,
  152					const char *reg_name)
  153{
  154	int err;
  155	u64 val;
  156
  157	val = nr64_ipp(reg);
  158	val |= bits;
  159	nw64_ipp(reg, val);
  160
  161	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  162	if (err)
  163		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  164			   (unsigned long long)bits, reg_name,
  165			   (unsigned long long)nr64_ipp(reg));
  166	return err;
  167}
  168
  169#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  170({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  171	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  172})
  173
  174static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  175				 u64 bits, int limit, int delay)
  176{
  177	while (--limit >= 0) {
  178		u64 val = nr64(reg);
  179
  180		if (!(val & bits))
  181			break;
  182		udelay(delay);
  183	}
  184	if (limit < 0)
  185		return -ENODEV;
  186	return 0;
  187}
  188
  189#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  190({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  191	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  192})
  193
  194static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  195				    u64 bits, int limit, int delay,
  196				    const char *reg_name)
  197{
  198	int err;
  199
  200	nw64(reg, bits);
  201	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  202	if (err)
  203		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  204			   (unsigned long long)bits, reg_name,
  205			   (unsigned long long)nr64(reg));
  206	return err;
  207}
  208
  209#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  210({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  211	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  212})
  213
  214static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  215{
  216	u64 val = (u64) lp->timer;
  217
  218	if (on)
  219		val |= LDG_IMGMT_ARM;
  220
  221	nw64(LDG_IMGMT(lp->ldg_num), val);
  222}
  223
  224static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  225{
  226	unsigned long mask_reg, bits;
  227	u64 val;
  228
  229	if (ldn < 0 || ldn > LDN_MAX)
  230		return -EINVAL;
  231
  232	if (ldn < 64) {
  233		mask_reg = LD_IM0(ldn);
  234		bits = LD_IM0_MASK;
  235	} else {
  236		mask_reg = LD_IM1(ldn - 64);
  237		bits = LD_IM1_MASK;
  238	}
  239
  240	val = nr64(mask_reg);
  241	if (on)
  242		val &= ~bits;
  243	else
  244		val |= bits;
  245	nw64(mask_reg, val);
  246
  247	return 0;
  248}
  249
  250static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  251{
  252	struct niu_parent *parent = np->parent;
  253	int i;
  254
  255	for (i = 0; i <= LDN_MAX; i++) {
  256		int err;
  257
  258		if (parent->ldg_map[i] != lp->ldg_num)
  259			continue;
  260
  261		err = niu_ldn_irq_enable(np, i, on);
  262		if (err)
  263			return err;
  264	}
  265	return 0;
  266}
  267
  268static int niu_enable_interrupts(struct niu *np, int on)
  269{
  270	int i;
  271
  272	for (i = 0; i < np->num_ldg; i++) {
  273		struct niu_ldg *lp = &np->ldg[i];
  274		int err;
  275
  276		err = niu_enable_ldn_in_ldg(np, lp, on);
  277		if (err)
  278			return err;
  279	}
  280	for (i = 0; i < np->num_ldg; i++)
  281		niu_ldg_rearm(np, &np->ldg[i], on);
  282
  283	return 0;
  284}
  285
  286static u32 phy_encode(u32 type, int port)
  287{
  288	return type << (port * 2);
  289}
  290
  291static u32 phy_decode(u32 val, int port)
  292{
  293	return (val >> (port * 2)) & PORT_TYPE_MASK;
  294}
  295
  296static int mdio_wait(struct niu *np)
  297{
  298	int limit = 1000;
  299	u64 val;
  300
  301	while (--limit > 0) {
  302		val = nr64(MIF_FRAME_OUTPUT);
  303		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  304			return val & MIF_FRAME_OUTPUT_DATA;
  305
  306		udelay(10);
  307	}
  308
  309	return -ENODEV;
  310}
  311
  312static int mdio_read(struct niu *np, int port, int dev, int reg)
  313{
  314	int err;
  315
  316	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  317	err = mdio_wait(np);
  318	if (err < 0)
  319		return err;
  320
  321	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  322	return mdio_wait(np);
  323}
  324
  325static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  326{
  327	int err;
  328
  329	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  330	err = mdio_wait(np);
  331	if (err < 0)
  332		return err;
  333
  334	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  335	err = mdio_wait(np);
  336	if (err < 0)
  337		return err;
  338
  339	return 0;
  340}
  341
  342static int mii_read(struct niu *np, int port, int reg)
  343{
  344	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  345	return mdio_wait(np);
  346}
  347
  348static int mii_write(struct niu *np, int port, int reg, int data)
  349{
  350	int err;
  351
  352	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  353	err = mdio_wait(np);
  354	if (err < 0)
  355		return err;
  356
  357	return 0;
  358}
  359
  360static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  361{
  362	int err;
  363
  364	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  365			 ESR2_TI_PLL_TX_CFG_L(channel),
  366			 val & 0xffff);
  367	if (!err)
  368		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  369				 ESR2_TI_PLL_TX_CFG_H(channel),
  370				 val >> 16);
  371	return err;
  372}
  373
  374static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  375{
  376	int err;
  377
  378	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  379			 ESR2_TI_PLL_RX_CFG_L(channel),
  380			 val & 0xffff);
  381	if (!err)
  382		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  383				 ESR2_TI_PLL_RX_CFG_H(channel),
  384				 val >> 16);
  385	return err;
  386}
  387
  388/* Mode is always 10G fiber.  */
  389static int serdes_init_niu_10g_fiber(struct niu *np)
  390{
  391	struct niu_link_config *lp = &np->link_config;
  392	u32 tx_cfg, rx_cfg;
  393	unsigned long i;
  394
  395	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  396	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  397		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  398		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  399
  400	if (lp->loopback_mode == LOOPBACK_PHY) {
  401		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  402
  403		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  404			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  405
  406		tx_cfg |= PLL_TX_CFG_ENTEST;
  407		rx_cfg |= PLL_RX_CFG_ENTEST;
  408	}
  409
  410	/* Initialize all 4 lanes of the SERDES.  */
  411	for (i = 0; i < 4; i++) {
  412		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  413		if (err)
  414			return err;
  415	}
  416
  417	for (i = 0; i < 4; i++) {
  418		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  419		if (err)
  420			return err;
  421	}
  422
  423	return 0;
  424}
  425
  426static int serdes_init_niu_1g_serdes(struct niu *np)
  427{
  428	struct niu_link_config *lp = &np->link_config;
  429	u16 pll_cfg, pll_sts;
  430	int max_retry = 100;
  431	u64 uninitialized_var(sig), mask, val;
  432	u32 tx_cfg, rx_cfg;
  433	unsigned long i;
  434	int err;
  435
  436	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  437		  PLL_TX_CFG_RATE_HALF);
  438	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  439		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  440		  PLL_RX_CFG_RATE_HALF);
  441
  442	if (np->port == 0)
  443		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  444
  445	if (lp->loopback_mode == LOOPBACK_PHY) {
  446		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  447
  448		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  449			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  450
  451		tx_cfg |= PLL_TX_CFG_ENTEST;
  452		rx_cfg |= PLL_RX_CFG_ENTEST;
  453	}
  454
  455	/* Initialize PLL for 1G */
  456	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  457
  458	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  459			 ESR2_TI_PLL_CFG_L, pll_cfg);
  460	if (err) {
  461		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  462			   np->port, __func__);
  463		return err;
  464	}
  465
  466	pll_sts = PLL_CFG_ENPLL;
  467
  468	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  469			 ESR2_TI_PLL_STS_L, pll_sts);
  470	if (err) {
  471		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  472			   np->port, __func__);
  473		return err;
  474	}
  475
  476	udelay(200);
  477
  478	/* Initialize all 4 lanes of the SERDES.  */
  479	for (i = 0; i < 4; i++) {
  480		err = esr2_set_tx_cfg(np, i, tx_cfg);
  481		if (err)
  482			return err;
  483	}
  484
  485	for (i = 0; i < 4; i++) {
  486		err = esr2_set_rx_cfg(np, i, rx_cfg);
  487		if (err)
  488			return err;
  489	}
  490
  491	switch (np->port) {
  492	case 0:
  493		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  494		mask = val;
  495		break;
  496
  497	case 1:
  498		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  499		mask = val;
  500		break;
  501
  502	default:
  503		return -EINVAL;
  504	}
  505
  506	while (max_retry--) {
  507		sig = nr64(ESR_INT_SIGNALS);
  508		if ((sig & mask) == val)
  509			break;
  510
  511		mdelay(500);
  512	}
  513
  514	if ((sig & mask) != val) {
  515		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  516			   np->port, (int)(sig & mask), (int)val);
  517		return -ENODEV;
  518	}
  519
  520	return 0;
  521}
  522
  523static int serdes_init_niu_10g_serdes(struct niu *np)
  524{
  525	struct niu_link_config *lp = &np->link_config;
  526	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  527	int max_retry = 100;
  528	u64 uninitialized_var(sig), mask, val;
  529	unsigned long i;
  530	int err;
  531
  532	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  533	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  534		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  535		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  536
  537	if (lp->loopback_mode == LOOPBACK_PHY) {
  538		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  539
  540		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  541			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  542
  543		tx_cfg |= PLL_TX_CFG_ENTEST;
  544		rx_cfg |= PLL_RX_CFG_ENTEST;
  545	}
  546
  547	/* Initialize PLL for 10G */
  548	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  549
  550	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  551			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  552	if (err) {
  553		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  554			   np->port, __func__);
  555		return err;
  556	}
  557
  558	pll_sts = PLL_CFG_ENPLL;
  559
  560	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  561			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  562	if (err) {
  563		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  564			   np->port, __func__);
  565		return err;
  566	}
  567
  568	udelay(200);
  569
  570	/* Initialize all 4 lanes of the SERDES.  */
  571	for (i = 0; i < 4; i++) {
  572		err = esr2_set_tx_cfg(np, i, tx_cfg);
  573		if (err)
  574			return err;
  575	}
  576
  577	for (i = 0; i < 4; i++) {
  578		err = esr2_set_rx_cfg(np, i, rx_cfg);
  579		if (err)
  580			return err;
  581	}
  582
  583	/* check if serdes is ready */
  584
  585	switch (np->port) {
  586	case 0:
  587		mask = ESR_INT_SIGNALS_P0_BITS;
  588		val = (ESR_INT_SRDY0_P0 |
  589		       ESR_INT_DET0_P0 |
  590		       ESR_INT_XSRDY_P0 |
  591		       ESR_INT_XDP_P0_CH3 |
  592		       ESR_INT_XDP_P0_CH2 |
  593		       ESR_INT_XDP_P0_CH1 |
  594		       ESR_INT_XDP_P0_CH0);
  595		break;
  596
  597	case 1:
  598		mask = ESR_INT_SIGNALS_P1_BITS;
  599		val = (ESR_INT_SRDY0_P1 |
  600		       ESR_INT_DET0_P1 |
  601		       ESR_INT_XSRDY_P1 |
  602		       ESR_INT_XDP_P1_CH3 |
  603		       ESR_INT_XDP_P1_CH2 |
  604		       ESR_INT_XDP_P1_CH1 |
  605		       ESR_INT_XDP_P1_CH0);
  606		break;
  607
  608	default:
  609		return -EINVAL;
  610	}
  611
  612	while (max_retry--) {
  613		sig = nr64(ESR_INT_SIGNALS);
  614		if ((sig & mask) == val)
  615			break;
  616
  617		mdelay(500);
  618	}
  619
  620	if ((sig & mask) != val) {
  621		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  622			np->port, (int)(sig & mask), (int)val);
  623
  624		/* 10G failed, try initializing at 1G */
  625		err = serdes_init_niu_1g_serdes(np);
  626		if (!err) {
  627			np->flags &= ~NIU_FLAGS_10G;
  628			np->mac_xcvr = MAC_XCVR_PCS;
  629		}  else {
  630			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  631				   np->port);
  632			return -ENODEV;
  633		}
  634	}
  635	return 0;
  636}
  637
  638static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  639{
  640	int err;
  641
  642	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  643	if (err >= 0) {
  644		*val = (err & 0xffff);
  645		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  646				ESR_RXTX_CTRL_H(chan));
  647		if (err >= 0)
  648			*val |= ((err & 0xffff) << 16);
  649		err = 0;
  650	}
  651	return err;
  652}
  653
  654static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  655{
  656	int err;
  657
  658	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  659			ESR_GLUE_CTRL0_L(chan));
  660	if (err >= 0) {
  661		*val = (err & 0xffff);
  662		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  663				ESR_GLUE_CTRL0_H(chan));
  664		if (err >= 0) {
  665			*val |= ((err & 0xffff) << 16);
  666			err = 0;
  667		}
  668	}
  669	return err;
  670}
  671
  672static int esr_read_reset(struct niu *np, u32 *val)
  673{
  674	int err;
  675
  676	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  677			ESR_RXTX_RESET_CTRL_L);
  678	if (err >= 0) {
  679		*val = (err & 0xffff);
  680		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  681				ESR_RXTX_RESET_CTRL_H);
  682		if (err >= 0) {
  683			*val |= ((err & 0xffff) << 16);
  684			err = 0;
  685		}
  686	}
  687	return err;
  688}
  689
  690static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  691{
  692	int err;
  693
  694	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  695			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  696	if (!err)
  697		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  698				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  699	return err;
  700}
  701
  702static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  703{
  704	int err;
  705
  706	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  707			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  708	if (!err)
  709		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  710				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  711	return err;
  712}
  713
  714static int esr_reset(struct niu *np)
  715{
  716	u32 uninitialized_var(reset);
  717	int err;
  718
  719	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  720			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  721	if (err)
  722		return err;
  723	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  724			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  725	if (err)
  726		return err;
  727	udelay(200);
  728
  729	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  730			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  731	if (err)
  732		return err;
  733	udelay(200);
  734
  735	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  736			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  737	if (err)
  738		return err;
  739	udelay(200);
  740
  741	err = esr_read_reset(np, &reset);
  742	if (err)
  743		return err;
  744	if (reset != 0) {
  745		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  746			   np->port, reset);
  747		return -ENODEV;
  748	}
  749
  750	return 0;
  751}
  752
  753static int serdes_init_10g(struct niu *np)
  754{
  755	struct niu_link_config *lp = &np->link_config;
  756	unsigned long ctrl_reg, test_cfg_reg, i;
  757	u64 ctrl_val, test_cfg_val, sig, mask, val;
  758	int err;
  759
  760	switch (np->port) {
  761	case 0:
  762		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  763		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  764		break;
  765	case 1:
  766		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  767		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  768		break;
  769
  770	default:
  771		return -EINVAL;
  772	}
  773	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  774		    ENET_SERDES_CTRL_SDET_1 |
  775		    ENET_SERDES_CTRL_SDET_2 |
  776		    ENET_SERDES_CTRL_SDET_3 |
  777		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  780		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  781		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  784		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  785	test_cfg_val = 0;
  786
  787	if (lp->loopback_mode == LOOPBACK_PHY) {
  788		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  789				  ENET_SERDES_TEST_MD_0_SHIFT) |
  790				 (ENET_TEST_MD_PAD_LOOPBACK <<
  791				  ENET_SERDES_TEST_MD_1_SHIFT) |
  792				 (ENET_TEST_MD_PAD_LOOPBACK <<
  793				  ENET_SERDES_TEST_MD_2_SHIFT) |
  794				 (ENET_TEST_MD_PAD_LOOPBACK <<
  795				  ENET_SERDES_TEST_MD_3_SHIFT));
  796	}
  797
  798	nw64(ctrl_reg, ctrl_val);
  799	nw64(test_cfg_reg, test_cfg_val);
  800
  801	/* Initialize all 4 lanes of the SERDES.  */
  802	for (i = 0; i < 4; i++) {
  803		u32 rxtx_ctrl, glue0;
  804
  805		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  806		if (err)
  807			return err;
  808		err = esr_read_glue0(np, i, &glue0);
  809		if (err)
  810			return err;
  811
  812		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  813		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  814			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  815
  816		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  817			   ESR_GLUE_CTRL0_THCNT |
  818			   ESR_GLUE_CTRL0_BLTIME);
  819		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  820			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  821			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  822			  (BLTIME_300_CYCLES <<
  823			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  824
  825		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  826		if (err)
  827			return err;
  828		err = esr_write_glue0(np, i, glue0);
  829		if (err)
  830			return err;
  831	}
  832
  833	err = esr_reset(np);
  834	if (err)
  835		return err;
  836
  837	sig = nr64(ESR_INT_SIGNALS);
  838	switch (np->port) {
  839	case 0:
  840		mask = ESR_INT_SIGNALS_P0_BITS;
  841		val = (ESR_INT_SRDY0_P0 |
  842		       ESR_INT_DET0_P0 |
  843		       ESR_INT_XSRDY_P0 |
  844		       ESR_INT_XDP_P0_CH3 |
  845		       ESR_INT_XDP_P0_CH2 |
  846		       ESR_INT_XDP_P0_CH1 |
  847		       ESR_INT_XDP_P0_CH0);
  848		break;
  849
  850	case 1:
  851		mask = ESR_INT_SIGNALS_P1_BITS;
  852		val = (ESR_INT_SRDY0_P1 |
  853		       ESR_INT_DET0_P1 |
  854		       ESR_INT_XSRDY_P1 |
  855		       ESR_INT_XDP_P1_CH3 |
  856		       ESR_INT_XDP_P1_CH2 |
  857		       ESR_INT_XDP_P1_CH1 |
  858		       ESR_INT_XDP_P1_CH0);
  859		break;
  860
  861	default:
  862		return -EINVAL;
  863	}
  864
  865	if ((sig & mask) != val) {
  866		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  867			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  868			return 0;
  869		}
  870		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  871			   np->port, (int)(sig & mask), (int)val);
  872		return -ENODEV;
  873	}
  874	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  875		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  876	return 0;
  877}
  878
  879static int serdes_init_1g(struct niu *np)
  880{
  881	u64 val;
  882
  883	val = nr64(ENET_SERDES_1_PLL_CFG);
  884	val &= ~ENET_SERDES_PLL_FBDIV2;
  885	switch (np->port) {
  886	case 0:
  887		val |= ENET_SERDES_PLL_HRATE0;
  888		break;
  889	case 1:
  890		val |= ENET_SERDES_PLL_HRATE1;
  891		break;
  892	case 2:
  893		val |= ENET_SERDES_PLL_HRATE2;
  894		break;
  895	case 3:
  896		val |= ENET_SERDES_PLL_HRATE3;
  897		break;
  898	default:
  899		return -EINVAL;
  900	}
  901	nw64(ENET_SERDES_1_PLL_CFG, val);
  902
  903	return 0;
  904}
  905
  906static int serdes_init_1g_serdes(struct niu *np)
  907{
  908	struct niu_link_config *lp = &np->link_config;
  909	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  910	u64 ctrl_val, test_cfg_val, sig, mask, val;
  911	int err;
  912	u64 reset_val, val_rd;
  913
  914	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  915		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  916		ENET_SERDES_PLL_FBDIV0;
  917	switch (np->port) {
  918	case 0:
  919		reset_val =  ENET_SERDES_RESET_0;
  920		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  921		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  922		pll_cfg = ENET_SERDES_0_PLL_CFG;
  923		break;
  924	case 1:
  925		reset_val =  ENET_SERDES_RESET_1;
  926		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  927		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  928		pll_cfg = ENET_SERDES_1_PLL_CFG;
  929		break;
  930
  931	default:
  932		return -EINVAL;
  933	}
  934	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  935		    ENET_SERDES_CTRL_SDET_1 |
  936		    ENET_SERDES_CTRL_SDET_2 |
  937		    ENET_SERDES_CTRL_SDET_3 |
  938		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  941		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  942		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  945		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  946	test_cfg_val = 0;
  947
  948	if (lp->loopback_mode == LOOPBACK_PHY) {
  949		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  950				  ENET_SERDES_TEST_MD_0_SHIFT) |
  951				 (ENET_TEST_MD_PAD_LOOPBACK <<
  952				  ENET_SERDES_TEST_MD_1_SHIFT) |
  953				 (ENET_TEST_MD_PAD_LOOPBACK <<
  954				  ENET_SERDES_TEST_MD_2_SHIFT) |
  955				 (ENET_TEST_MD_PAD_LOOPBACK <<
  956				  ENET_SERDES_TEST_MD_3_SHIFT));
  957	}
  958
  959	nw64(ENET_SERDES_RESET, reset_val);
  960	mdelay(20);
  961	val_rd = nr64(ENET_SERDES_RESET);
  962	val_rd &= ~reset_val;
  963	nw64(pll_cfg, val);
  964	nw64(ctrl_reg, ctrl_val);
  965	nw64(test_cfg_reg, test_cfg_val);
  966	nw64(ENET_SERDES_RESET, val_rd);
  967	mdelay(2000);
  968
  969	/* Initialize all 4 lanes of the SERDES.  */
  970	for (i = 0; i < 4; i++) {
  971		u32 rxtx_ctrl, glue0;
  972
  973		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  974		if (err)
  975			return err;
  976		err = esr_read_glue0(np, i, &glue0);
  977		if (err)
  978			return err;
  979
  980		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  981		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  982			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  983
  984		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  985			   ESR_GLUE_CTRL0_THCNT |
  986			   ESR_GLUE_CTRL0_BLTIME);
  987		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  988			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  989			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  990			  (BLTIME_300_CYCLES <<
  991			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  992
  993		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  994		if (err)
  995			return err;
  996		err = esr_write_glue0(np, i, glue0);
  997		if (err)
  998			return err;
  999	}
 1000
 1001
 1002	sig = nr64(ESR_INT_SIGNALS);
 1003	switch (np->port) {
 1004	case 0:
 1005		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1006		mask = val;
 1007		break;
 1008
 1009	case 1:
 1010		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1011		mask = val;
 1012		break;
 1013
 1014	default:
 1015		return -EINVAL;
 1016	}
 1017
 1018	if ((sig & mask) != val) {
 1019		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1020			   np->port, (int)(sig & mask), (int)val);
 1021		return -ENODEV;
 1022	}
 1023
 1024	return 0;
 1025}
 1026
 1027static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1028{
 1029	struct niu_link_config *lp = &np->link_config;
 1030	int link_up;
 1031	u64 val;
 1032	u16 current_speed;
 1033	unsigned long flags;
 1034	u8 current_duplex;
 1035
 1036	link_up = 0;
 1037	current_speed = SPEED_INVALID;
 1038	current_duplex = DUPLEX_INVALID;
 1039
 1040	spin_lock_irqsave(&np->lock, flags);
 1041
 1042	val = nr64_pcs(PCS_MII_STAT);
 1043
 1044	if (val & PCS_MII_STAT_LINK_STATUS) {
 1045		link_up = 1;
 1046		current_speed = SPEED_1000;
 1047		current_duplex = DUPLEX_FULL;
 1048	}
 1049
 1050	lp->active_speed = current_speed;
 1051	lp->active_duplex = current_duplex;
 1052	spin_unlock_irqrestore(&np->lock, flags);
 1053
 1054	*link_up_p = link_up;
 1055	return 0;
 1056}
 1057
 1058static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1059{
 1060	unsigned long flags;
 1061	struct niu_link_config *lp = &np->link_config;
 1062	int link_up = 0;
 1063	int link_ok = 1;
 1064	u64 val, val2;
 1065	u16 current_speed;
 1066	u8 current_duplex;
 1067
 1068	if (!(np->flags & NIU_FLAGS_10G))
 1069		return link_status_1g_serdes(np, link_up_p);
 1070
 1071	current_speed = SPEED_INVALID;
 1072	current_duplex = DUPLEX_INVALID;
 1073	spin_lock_irqsave(&np->lock, flags);
 1074
 1075	val = nr64_xpcs(XPCS_STATUS(0));
 1076	val2 = nr64_mac(XMAC_INTER2);
 1077	if (val2 & 0x01000000)
 1078		link_ok = 0;
 1079
 1080	if ((val & 0x1000ULL) && link_ok) {
 1081		link_up = 1;
 1082		current_speed = SPEED_10000;
 1083		current_duplex = DUPLEX_FULL;
 1084	}
 1085	lp->active_speed = current_speed;
 1086	lp->active_duplex = current_duplex;
 1087	spin_unlock_irqrestore(&np->lock, flags);
 1088	*link_up_p = link_up;
 1089	return 0;
 1090}
 1091
 1092static int link_status_mii(struct niu *np, int *link_up_p)
 1093{
 1094	struct niu_link_config *lp = &np->link_config;
 1095	int err;
 1096	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1097	int supported, advertising, active_speed, active_duplex;
 1098
 1099	err = mii_read(np, np->phy_addr, MII_BMCR);
 1100	if (unlikely(err < 0))
 1101		return err;
 1102	bmcr = err;
 1103
 1104	err = mii_read(np, np->phy_addr, MII_BMSR);
 1105	if (unlikely(err < 0))
 1106		return err;
 1107	bmsr = err;
 1108
 1109	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1110	if (unlikely(err < 0))
 1111		return err;
 1112	advert = err;
 1113
 1114	err = mii_read(np, np->phy_addr, MII_LPA);
 1115	if (unlikely(err < 0))
 1116		return err;
 1117	lpa = err;
 1118
 1119	if (likely(bmsr & BMSR_ESTATEN)) {
 1120		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1121		if (unlikely(err < 0))
 1122			return err;
 1123		estatus = err;
 1124
 1125		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1126		if (unlikely(err < 0))
 1127			return err;
 1128		ctrl1000 = err;
 1129
 1130		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1131		if (unlikely(err < 0))
 1132			return err;
 1133		stat1000 = err;
 1134	} else
 1135		estatus = ctrl1000 = stat1000 = 0;
 1136
 1137	supported = 0;
 1138	if (bmsr & BMSR_ANEGCAPABLE)
 1139		supported |= SUPPORTED_Autoneg;
 1140	if (bmsr & BMSR_10HALF)
 1141		supported |= SUPPORTED_10baseT_Half;
 1142	if (bmsr & BMSR_10FULL)
 1143		supported |= SUPPORTED_10baseT_Full;
 1144	if (bmsr & BMSR_100HALF)
 1145		supported |= SUPPORTED_100baseT_Half;
 1146	if (bmsr & BMSR_100FULL)
 1147		supported |= SUPPORTED_100baseT_Full;
 1148	if (estatus & ESTATUS_1000_THALF)
 1149		supported |= SUPPORTED_1000baseT_Half;
 1150	if (estatus & ESTATUS_1000_TFULL)
 1151		supported |= SUPPORTED_1000baseT_Full;
 1152	lp->supported = supported;
 1153
 1154	advertising = mii_adv_to_ethtool_adv_t(advert);
 1155	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 1156
 1157	if (bmcr & BMCR_ANENABLE) {
 1158		int neg, neg1000;
 1159
 1160		lp->active_autoneg = 1;
 1161		advertising |= ADVERTISED_Autoneg;
 1162
 1163		neg = advert & lpa;
 1164		neg1000 = (ctrl1000 << 2) & stat1000;
 1165
 1166		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1167			active_speed = SPEED_1000;
 1168		else if (neg & LPA_100)
 1169			active_speed = SPEED_100;
 1170		else if (neg & (LPA_10HALF | LPA_10FULL))
 1171			active_speed = SPEED_10;
 1172		else
 1173			active_speed = SPEED_INVALID;
 1174
 1175		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1176			active_duplex = DUPLEX_FULL;
 1177		else if (active_speed != SPEED_INVALID)
 1178			active_duplex = DUPLEX_HALF;
 1179		else
 1180			active_duplex = DUPLEX_INVALID;
 1181	} else {
 1182		lp->active_autoneg = 0;
 1183
 1184		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1185			active_speed = SPEED_1000;
 1186		else if (bmcr & BMCR_SPEED100)
 1187			active_speed = SPEED_100;
 1188		else
 1189			active_speed = SPEED_10;
 1190
 1191		if (bmcr & BMCR_FULLDPLX)
 1192			active_duplex = DUPLEX_FULL;
 1193		else
 1194			active_duplex = DUPLEX_HALF;
 1195	}
 1196
 1197	lp->active_advertising = advertising;
 1198	lp->active_speed = active_speed;
 1199	lp->active_duplex = active_duplex;
 1200	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1201
 1202	return 0;
 1203}
 1204
 1205static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1206{
 1207	struct niu_link_config *lp = &np->link_config;
 1208	u16 current_speed, bmsr;
 1209	unsigned long flags;
 1210	u8 current_duplex;
 1211	int err, link_up;
 1212
 1213	link_up = 0;
 1214	current_speed = SPEED_INVALID;
 1215	current_duplex = DUPLEX_INVALID;
 1216
 1217	spin_lock_irqsave(&np->lock, flags);
 1218
 1219	err = -EINVAL;
 1220
 1221	err = mii_read(np, np->phy_addr, MII_BMSR);
 1222	if (err < 0)
 1223		goto out;
 1224
 1225	bmsr = err;
 1226	if (bmsr & BMSR_LSTATUS) {
 1227		u16 adv, lpa;
 1228
 1229		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1230		if (err < 0)
 1231			goto out;
 1232		adv = err;
 1233
 1234		err = mii_read(np, np->phy_addr, MII_LPA);
 1235		if (err < 0)
 1236			goto out;
 1237		lpa = err;
 1238
 1239		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1240		if (err < 0)
 1241			goto out;
 1242		link_up = 1;
 1243		current_speed = SPEED_1000;
 1244		current_duplex = DUPLEX_FULL;
 1245
 1246	}
 1247	lp->active_speed = current_speed;
 1248	lp->active_duplex = current_duplex;
 1249	err = 0;
 1250
 1251out:
 1252	spin_unlock_irqrestore(&np->lock, flags);
 1253
 1254	*link_up_p = link_up;
 1255	return err;
 1256}
 1257
 1258static int link_status_1g(struct niu *np, int *link_up_p)
 1259{
 1260	struct niu_link_config *lp = &np->link_config;
 1261	unsigned long flags;
 1262	int err;
 1263
 1264	spin_lock_irqsave(&np->lock, flags);
 1265
 1266	err = link_status_mii(np, link_up_p);
 1267	lp->supported |= SUPPORTED_TP;
 1268	lp->active_advertising |= ADVERTISED_TP;
 1269
 1270	spin_unlock_irqrestore(&np->lock, flags);
 1271	return err;
 1272}
 1273
 1274static int bcm8704_reset(struct niu *np)
 1275{
 1276	int err, limit;
 1277
 1278	err = mdio_read(np, np->phy_addr,
 1279			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1280	if (err < 0 || err == 0xffff)
 1281		return err;
 1282	err |= BMCR_RESET;
 1283	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1284			 MII_BMCR, err);
 1285	if (err)
 1286		return err;
 1287
 1288	limit = 1000;
 1289	while (--limit >= 0) {
 1290		err = mdio_read(np, np->phy_addr,
 1291				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1292		if (err < 0)
 1293			return err;
 1294		if (!(err & BMCR_RESET))
 1295			break;
 1296	}
 1297	if (limit < 0) {
 1298		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1299			   np->port, (err & 0xffff));
 1300		return -ENODEV;
 1301	}
 1302	return 0;
 1303}
 1304
 1305/* When written, certain PHY registers need to be read back twice
 1306 * in order for the bits to settle properly.
 1307 */
 1308static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1309{
 1310	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1311	if (err < 0)
 1312		return err;
 1313	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1314	if (err < 0)
 1315		return err;
 1316	return 0;
 1317}
 1318
 1319static int bcm8706_init_user_dev3(struct niu *np)
 1320{
 1321	int err;
 1322
 1323
 1324	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1325			BCM8704_USER_OPT_DIGITAL_CTRL);
 1326	if (err < 0)
 1327		return err;
 1328	err &= ~USER_ODIG_CTRL_GPIOS;
 1329	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1330	err |=  USER_ODIG_CTRL_RESV2;
 1331	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1332			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1333	if (err)
 1334		return err;
 1335
 1336	mdelay(1000);
 1337
 1338	return 0;
 1339}
 1340
 1341static int bcm8704_init_user_dev3(struct niu *np)
 1342{
 1343	int err;
 1344
 1345	err = mdio_write(np, np->phy_addr,
 1346			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1347			 (USER_CONTROL_OPTXRST_LVL |
 1348			  USER_CONTROL_OPBIASFLT_LVL |
 1349			  USER_CONTROL_OBTMPFLT_LVL |
 1350			  USER_CONTROL_OPPRFLT_LVL |
 1351			  USER_CONTROL_OPTXFLT_LVL |
 1352			  USER_CONTROL_OPRXLOS_LVL |
 1353			  USER_CONTROL_OPRXFLT_LVL |
 1354			  USER_CONTROL_OPTXON_LVL |
 1355			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1356	if (err)
 1357		return err;
 1358
 1359	err = mdio_write(np, np->phy_addr,
 1360			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1361			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1362			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1363			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1364			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1365	if (err)
 1366		return err;
 1367
 1368	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1369	if (err)
 1370		return err;
 1371	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1372	if (err)
 1373		return err;
 1374
 1375	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1376			BCM8704_USER_OPT_DIGITAL_CTRL);
 1377	if (err < 0)
 1378		return err;
 1379	err &= ~USER_ODIG_CTRL_GPIOS;
 1380	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1381	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1382			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1383	if (err)
 1384		return err;
 1385
 1386	mdelay(1000);
 1387
 1388	return 0;
 1389}
 1390
 1391static int mrvl88x2011_act_led(struct niu *np, int val)
 1392{
 1393	int	err;
 1394
 1395	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1396		MRVL88X2011_LED_8_TO_11_CTL);
 1397	if (err < 0)
 1398		return err;
 1399
 1400	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1401	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1402
 1403	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1404			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1405}
 1406
 1407static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1408{
 1409	int	err;
 1410
 1411	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1412			MRVL88X2011_LED_BLINK_CTL);
 1413	if (err >= 0) {
 1414		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1415		err |= (rate << 4);
 1416
 1417		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1418				 MRVL88X2011_LED_BLINK_CTL, err);
 1419	}
 1420
 1421	return err;
 1422}
 1423
 1424static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1425{
 1426	int	err;
 1427
 1428	/* Set LED functions */
 1429	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1430	if (err)
 1431		return err;
 1432
 1433	/* led activity */
 1434	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1435	if (err)
 1436		return err;
 1437
 1438	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1439			MRVL88X2011_GENERAL_CTL);
 1440	if (err < 0)
 1441		return err;
 1442
 1443	err |= MRVL88X2011_ENA_XFPREFCLK;
 1444
 1445	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1446			 MRVL88X2011_GENERAL_CTL, err);
 1447	if (err < 0)
 1448		return err;
 1449
 1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1451			MRVL88X2011_PMA_PMD_CTL_1);
 1452	if (err < 0)
 1453		return err;
 1454
 1455	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1456		err |= MRVL88X2011_LOOPBACK;
 1457	else
 1458		err &= ~MRVL88X2011_LOOPBACK;
 1459
 1460	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1461			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1462	if (err < 0)
 1463		return err;
 1464
 1465	/* Enable PMD  */
 1466	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1467			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1468}
 1469
 1470
 1471static int xcvr_diag_bcm870x(struct niu *np)
 1472{
 1473	u16 analog_stat0, tx_alarm_status;
 1474	int err = 0;
 1475
 1476#if 1
 1477	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1478			MII_STAT1000);
 1479	if (err < 0)
 1480		return err;
 1481	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1482
 1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1484	if (err < 0)
 1485		return err;
 1486	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1487
 1488	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1489			MII_NWAYTEST);
 1490	if (err < 0)
 1491		return err;
 1492	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1493#endif
 1494
 1495	/* XXX dig this out it might not be so useful XXX */
 1496	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1497			BCM8704_USER_ANALOG_STATUS0);
 1498	if (err < 0)
 1499		return err;
 1500	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1501			BCM8704_USER_ANALOG_STATUS0);
 1502	if (err < 0)
 1503		return err;
 1504	analog_stat0 = err;
 1505
 1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1507			BCM8704_USER_TX_ALARM_STATUS);
 1508	if (err < 0)
 1509		return err;
 1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1511			BCM8704_USER_TX_ALARM_STATUS);
 1512	if (err < 0)
 1513		return err;
 1514	tx_alarm_status = err;
 1515
 1516	if (analog_stat0 != 0x03fc) {
 1517		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1518			pr_info("Port %u cable not connected or bad cable\n",
 1519				np->port);
 1520		} else if (analog_stat0 == 0x639c) {
 1521			pr_info("Port %u optical module is bad or missing\n",
 1522				np->port);
 1523		}
 1524	}
 1525
 1526	return 0;
 1527}
 1528
 1529static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1530{
 1531	struct niu_link_config *lp = &np->link_config;
 1532	int err;
 1533
 1534	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1535			MII_BMCR);
 1536	if (err < 0)
 1537		return err;
 1538
 1539	err &= ~BMCR_LOOPBACK;
 1540
 1541	if (lp->loopback_mode == LOOPBACK_MAC)
 1542		err |= BMCR_LOOPBACK;
 1543
 1544	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1545			 MII_BMCR, err);
 1546	if (err)
 1547		return err;
 1548
 1549	return 0;
 1550}
 1551
 1552static int xcvr_init_10g_bcm8706(struct niu *np)
 1553{
 1554	int err = 0;
 1555	u64 val;
 1556
 1557	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1558	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1559			return err;
 1560
 1561	val = nr64_mac(XMAC_CONFIG);
 1562	val &= ~XMAC_CONFIG_LED_POLARITY;
 1563	val |= XMAC_CONFIG_FORCE_LED_ON;
 1564	nw64_mac(XMAC_CONFIG, val);
 1565
 1566	val = nr64(MIF_CONFIG);
 1567	val |= MIF_CONFIG_INDIRECT_MODE;
 1568	nw64(MIF_CONFIG, val);
 1569
 1570	err = bcm8704_reset(np);
 1571	if (err)
 1572		return err;
 1573
 1574	err = xcvr_10g_set_lb_bcm870x(np);
 1575	if (err)
 1576		return err;
 1577
 1578	err = bcm8706_init_user_dev3(np);
 1579	if (err)
 1580		return err;
 1581
 1582	err = xcvr_diag_bcm870x(np);
 1583	if (err)
 1584		return err;
 1585
 1586	return 0;
 1587}
 1588
 1589static int xcvr_init_10g_bcm8704(struct niu *np)
 1590{
 1591	int err;
 1592
 1593	err = bcm8704_reset(np);
 1594	if (err)
 1595		return err;
 1596
 1597	err = bcm8704_init_user_dev3(np);
 1598	if (err)
 1599		return err;
 1600
 1601	err = xcvr_10g_set_lb_bcm870x(np);
 1602	if (err)
 1603		return err;
 1604
 1605	err =  xcvr_diag_bcm870x(np);
 1606	if (err)
 1607		return err;
 1608
 1609	return 0;
 1610}
 1611
 1612static int xcvr_init_10g(struct niu *np)
 1613{
 1614	int phy_id, err;
 1615	u64 val;
 1616
 1617	val = nr64_mac(XMAC_CONFIG);
 1618	val &= ~XMAC_CONFIG_LED_POLARITY;
 1619	val |= XMAC_CONFIG_FORCE_LED_ON;
 1620	nw64_mac(XMAC_CONFIG, val);
 1621
 1622	/* XXX shared resource, lock parent XXX */
 1623	val = nr64(MIF_CONFIG);
 1624	val |= MIF_CONFIG_INDIRECT_MODE;
 1625	nw64(MIF_CONFIG, val);
 1626
 1627	phy_id = phy_decode(np->parent->port_phy, np->port);
 1628	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1629
 1630	/* handle different phy types */
 1631	switch (phy_id & NIU_PHY_ID_MASK) {
 1632	case NIU_PHY_ID_MRVL88X2011:
 1633		err = xcvr_init_10g_mrvl88x2011(np);
 1634		break;
 1635
 1636	default: /* bcom 8704 */
 1637		err = xcvr_init_10g_bcm8704(np);
 1638		break;
 1639	}
 1640
 1641	return err;
 1642}
 1643
 1644static int mii_reset(struct niu *np)
 1645{
 1646	int limit, err;
 1647
 1648	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1649	if (err)
 1650		return err;
 1651
 1652	limit = 1000;
 1653	while (--limit >= 0) {
 1654		udelay(500);
 1655		err = mii_read(np, np->phy_addr, MII_BMCR);
 1656		if (err < 0)
 1657			return err;
 1658		if (!(err & BMCR_RESET))
 1659			break;
 1660	}
 1661	if (limit < 0) {
 1662		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1663			   np->port, err);
 1664		return -ENODEV;
 1665	}
 1666
 1667	return 0;
 1668}
 1669
 1670static int xcvr_init_1g_rgmii(struct niu *np)
 1671{
 1672	int err;
 1673	u64 val;
 1674	u16 bmcr, bmsr, estat;
 1675
 1676	val = nr64(MIF_CONFIG);
 1677	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1678	nw64(MIF_CONFIG, val);
 1679
 1680	err = mii_reset(np);
 1681	if (err)
 1682		return err;
 1683
 1684	err = mii_read(np, np->phy_addr, MII_BMSR);
 1685	if (err < 0)
 1686		return err;
 1687	bmsr = err;
 1688
 1689	estat = 0;
 1690	if (bmsr & BMSR_ESTATEN) {
 1691		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1692		if (err < 0)
 1693			return err;
 1694		estat = err;
 1695	}
 1696
 1697	bmcr = 0;
 1698	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1699	if (err)
 1700		return err;
 1701
 1702	if (bmsr & BMSR_ESTATEN) {
 1703		u16 ctrl1000 = 0;
 1704
 1705		if (estat & ESTATUS_1000_TFULL)
 1706			ctrl1000 |= ADVERTISE_1000FULL;
 1707		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1708		if (err)
 1709			return err;
 1710	}
 1711
 1712	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1713
 1714	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1715	if (err)
 1716		return err;
 1717
 1718	err = mii_read(np, np->phy_addr, MII_BMCR);
 1719	if (err < 0)
 1720		return err;
 1721	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1722
 1723	err = mii_read(np, np->phy_addr, MII_BMSR);
 1724	if (err < 0)
 1725		return err;
 1726
 1727	return 0;
 1728}
 1729
 1730static int mii_init_common(struct niu *np)
 1731{
 1732	struct niu_link_config *lp = &np->link_config;
 1733	u16 bmcr, bmsr, adv, estat;
 1734	int err;
 1735
 1736	err = mii_reset(np);
 1737	if (err)
 1738		return err;
 1739
 1740	err = mii_read(np, np->phy_addr, MII_BMSR);
 1741	if (err < 0)
 1742		return err;
 1743	bmsr = err;
 1744
 1745	estat = 0;
 1746	if (bmsr & BMSR_ESTATEN) {
 1747		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1748		if (err < 0)
 1749			return err;
 1750		estat = err;
 1751	}
 1752
 1753	bmcr = 0;
 1754	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1755	if (err)
 1756		return err;
 1757
 1758	if (lp->loopback_mode == LOOPBACK_MAC) {
 1759		bmcr |= BMCR_LOOPBACK;
 1760		if (lp->active_speed == SPEED_1000)
 1761			bmcr |= BMCR_SPEED1000;
 1762		if (lp->active_duplex == DUPLEX_FULL)
 1763			bmcr |= BMCR_FULLDPLX;
 1764	}
 1765
 1766	if (lp->loopback_mode == LOOPBACK_PHY) {
 1767		u16 aux;
 1768
 1769		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1770		       BCM5464R_AUX_CTL_WRITE_1);
 1771		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1772		if (err)
 1773			return err;
 1774	}
 1775
 1776	if (lp->autoneg) {
 1777		u16 ctrl1000;
 1778
 1779		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1780		if ((bmsr & BMSR_10HALF) &&
 1781			(lp->advertising & ADVERTISED_10baseT_Half))
 1782			adv |= ADVERTISE_10HALF;
 1783		if ((bmsr & BMSR_10FULL) &&
 1784			(lp->advertising & ADVERTISED_10baseT_Full))
 1785			adv |= ADVERTISE_10FULL;
 1786		if ((bmsr & BMSR_100HALF) &&
 1787			(lp->advertising & ADVERTISED_100baseT_Half))
 1788			adv |= ADVERTISE_100HALF;
 1789		if ((bmsr & BMSR_100FULL) &&
 1790			(lp->advertising & ADVERTISED_100baseT_Full))
 1791			adv |= ADVERTISE_100FULL;
 1792		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1793		if (err)
 1794			return err;
 1795
 1796		if (likely(bmsr & BMSR_ESTATEN)) {
 1797			ctrl1000 = 0;
 1798			if ((estat & ESTATUS_1000_THALF) &&
 1799				(lp->advertising & ADVERTISED_1000baseT_Half))
 1800				ctrl1000 |= ADVERTISE_1000HALF;
 1801			if ((estat & ESTATUS_1000_TFULL) &&
 1802				(lp->advertising & ADVERTISED_1000baseT_Full))
 1803				ctrl1000 |= ADVERTISE_1000FULL;
 1804			err = mii_write(np, np->phy_addr,
 1805					MII_CTRL1000, ctrl1000);
 1806			if (err)
 1807				return err;
 1808		}
 1809
 1810		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1811	} else {
 1812		/* !lp->autoneg */
 1813		int fulldpx;
 1814
 1815		if (lp->duplex == DUPLEX_FULL) {
 1816			bmcr |= BMCR_FULLDPLX;
 1817			fulldpx = 1;
 1818		} else if (lp->duplex == DUPLEX_HALF)
 1819			fulldpx = 0;
 1820		else
 1821			return -EINVAL;
 1822
 1823		if (lp->speed == SPEED_1000) {
 1824			/* if X-full requested while not supported, or
 1825			   X-half requested while not supported... */
 1826			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1827				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1828				return -EINVAL;
 1829			bmcr |= BMCR_SPEED1000;
 1830		} else if (lp->speed == SPEED_100) {
 1831			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1832				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1833				return -EINVAL;
 1834			bmcr |= BMCR_SPEED100;
 1835		} else if (lp->speed == SPEED_10) {
 1836			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1837				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1838				return -EINVAL;
 1839		} else
 1840			return -EINVAL;
 1841	}
 1842
 1843	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1844	if (err)
 1845		return err;
 1846
 1847#if 0
 1848	err = mii_read(np, np->phy_addr, MII_BMCR);
 1849	if (err < 0)
 1850		return err;
 1851	bmcr = err;
 1852
 1853	err = mii_read(np, np->phy_addr, MII_BMSR);
 1854	if (err < 0)
 1855		return err;
 1856	bmsr = err;
 1857
 1858	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1859		np->port, bmcr, bmsr);
 1860#endif
 1861
 1862	return 0;
 1863}
 1864
 1865static int xcvr_init_1g(struct niu *np)
 1866{
 1867	u64 val;
 1868
 1869	/* XXX shared resource, lock parent XXX */
 1870	val = nr64(MIF_CONFIG);
 1871	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1872	nw64(MIF_CONFIG, val);
 1873
 1874	return mii_init_common(np);
 1875}
 1876
 1877static int niu_xcvr_init(struct niu *np)
 1878{
 1879	const struct niu_phy_ops *ops = np->phy_ops;
 1880	int err;
 1881
 1882	err = 0;
 1883	if (ops->xcvr_init)
 1884		err = ops->xcvr_init(np);
 1885
 1886	return err;
 1887}
 1888
 1889static int niu_serdes_init(struct niu *np)
 1890{
 1891	const struct niu_phy_ops *ops = np->phy_ops;
 1892	int err;
 1893
 1894	err = 0;
 1895	if (ops->serdes_init)
 1896		err = ops->serdes_init(np);
 1897
 1898	return err;
 1899}
 1900
 1901static void niu_init_xif(struct niu *);
 1902static void niu_handle_led(struct niu *, int status);
 1903
 1904static int niu_link_status_common(struct niu *np, int link_up)
 1905{
 1906	struct niu_link_config *lp = &np->link_config;
 1907	struct net_device *dev = np->dev;
 1908	unsigned long flags;
 1909
 1910	if (!netif_carrier_ok(dev) && link_up) {
 1911		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1912			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1913			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1914			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1915			   "10Mbit/sec",
 1916			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1917
 1918		spin_lock_irqsave(&np->lock, flags);
 1919		niu_init_xif(np);
 1920		niu_handle_led(np, 1);
 1921		spin_unlock_irqrestore(&np->lock, flags);
 1922
 1923		netif_carrier_on(dev);
 1924	} else if (netif_carrier_ok(dev) && !link_up) {
 1925		netif_warn(np, link, dev, "Link is down\n");
 1926		spin_lock_irqsave(&np->lock, flags);
 1927		niu_handle_led(np, 0);
 1928		spin_unlock_irqrestore(&np->lock, flags);
 1929		netif_carrier_off(dev);
 1930	}
 1931
 1932	return 0;
 1933}
 1934
 1935static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1936{
 1937	int err, link_up, pma_status, pcs_status;
 1938
 1939	link_up = 0;
 1940
 1941	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1942			MRVL88X2011_10G_PMD_STATUS_2);
 1943	if (err < 0)
 1944		goto out;
 1945
 1946	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1947	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1948			MRVL88X2011_PMA_PMD_STATUS_1);
 1949	if (err < 0)
 1950		goto out;
 1951
 1952	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1953
 1954        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1955	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1956			MRVL88X2011_PMA_PMD_STATUS_1);
 1957	if (err < 0)
 1958		goto out;
 1959
 1960	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1961			MRVL88X2011_PMA_PMD_STATUS_1);
 1962	if (err < 0)
 1963		goto out;
 1964
 1965	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1966
 1967        /* Check XGXS Register : 4.0018.[0-3,12] */
 1968	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1969			MRVL88X2011_10G_XGXS_LANE_STAT);
 1970	if (err < 0)
 1971		goto out;
 1972
 1973	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1974		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1975		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1976		    0x800))
 1977		link_up = (pma_status && pcs_status) ? 1 : 0;
 1978
 1979	np->link_config.active_speed = SPEED_10000;
 1980	np->link_config.active_duplex = DUPLEX_FULL;
 1981	err = 0;
 1982out:
 1983	mrvl88x2011_act_led(np, (link_up ?
 1984				 MRVL88X2011_LED_CTL_PCS_ACT :
 1985				 MRVL88X2011_LED_CTL_OFF));
 1986
 1987	*link_up_p = link_up;
 1988	return err;
 1989}
 1990
 1991static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 1992{
 1993	int err, link_up;
 1994	link_up = 0;
 1995
 1996	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1997			BCM8704_PMD_RCV_SIGDET);
 1998	if (err < 0 || err == 0xffff)
 1999		goto out;
 2000	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2001		err = 0;
 2002		goto out;
 2003	}
 2004
 2005	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2006			BCM8704_PCS_10G_R_STATUS);
 2007	if (err < 0)
 2008		goto out;
 2009
 2010	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2011		err = 0;
 2012		goto out;
 2013	}
 2014
 2015	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2016			BCM8704_PHYXS_XGXS_LANE_STAT);
 2017	if (err < 0)
 2018		goto out;
 2019	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2020		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2021		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2022		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2023		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2024		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2025		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2026		err = 0;
 2027		np->link_config.active_speed = SPEED_INVALID;
 2028		np->link_config.active_duplex = DUPLEX_INVALID;
 2029		goto out;
 2030	}
 2031
 2032	link_up = 1;
 2033	np->link_config.active_speed = SPEED_10000;
 2034	np->link_config.active_duplex = DUPLEX_FULL;
 2035	err = 0;
 2036
 2037out:
 2038	*link_up_p = link_up;
 2039	return err;
 2040}
 2041
 2042static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2043{
 2044	int err, link_up;
 2045
 2046	link_up = 0;
 2047
 2048	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2049			BCM8704_PMD_RCV_SIGDET);
 2050	if (err < 0)
 2051		goto out;
 2052	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2053		err = 0;
 2054		goto out;
 2055	}
 2056
 2057	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2058			BCM8704_PCS_10G_R_STATUS);
 2059	if (err < 0)
 2060		goto out;
 2061	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2062		err = 0;
 2063		goto out;
 2064	}
 2065
 2066	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2067			BCM8704_PHYXS_XGXS_LANE_STAT);
 2068	if (err < 0)
 2069		goto out;
 2070
 2071	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2072		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2073		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2074		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2075		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2076		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2077		err = 0;
 2078		goto out;
 2079	}
 2080
 2081	link_up = 1;
 2082	np->link_config.active_speed = SPEED_10000;
 2083	np->link_config.active_duplex = DUPLEX_FULL;
 2084	err = 0;
 2085
 2086out:
 2087	*link_up_p = link_up;
 2088	return err;
 2089}
 2090
 2091static int link_status_10g(struct niu *np, int *link_up_p)
 2092{
 2093	unsigned long flags;
 2094	int err = -EINVAL;
 2095
 2096	spin_lock_irqsave(&np->lock, flags);
 2097
 2098	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2099		int phy_id;
 2100
 2101		phy_id = phy_decode(np->parent->port_phy, np->port);
 2102		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2103
 2104		/* handle different phy types */
 2105		switch (phy_id & NIU_PHY_ID_MASK) {
 2106		case NIU_PHY_ID_MRVL88X2011:
 2107			err = link_status_10g_mrvl(np, link_up_p);
 2108			break;
 2109
 2110		default: /* bcom 8704 */
 2111			err = link_status_10g_bcom(np, link_up_p);
 2112			break;
 2113		}
 2114	}
 2115
 2116	spin_unlock_irqrestore(&np->lock, flags);
 2117
 2118	return err;
 2119}
 2120
 2121static int niu_10g_phy_present(struct niu *np)
 2122{
 2123	u64 sig, mask, val;
 2124
 2125	sig = nr64(ESR_INT_SIGNALS);
 2126	switch (np->port) {
 2127	case 0:
 2128		mask = ESR_INT_SIGNALS_P0_BITS;
 2129		val = (ESR_INT_SRDY0_P0 |
 2130		       ESR_INT_DET0_P0 |
 2131		       ESR_INT_XSRDY_P0 |
 2132		       ESR_INT_XDP_P0_CH3 |
 2133		       ESR_INT_XDP_P0_CH2 |
 2134		       ESR_INT_XDP_P0_CH1 |
 2135		       ESR_INT_XDP_P0_CH0);
 2136		break;
 2137
 2138	case 1:
 2139		mask = ESR_INT_SIGNALS_P1_BITS;
 2140		val = (ESR_INT_SRDY0_P1 |
 2141		       ESR_INT_DET0_P1 |
 2142		       ESR_INT_XSRDY_P1 |
 2143		       ESR_INT_XDP_P1_CH3 |
 2144		       ESR_INT_XDP_P1_CH2 |
 2145		       ESR_INT_XDP_P1_CH1 |
 2146		       ESR_INT_XDP_P1_CH0);
 2147		break;
 2148
 2149	default:
 2150		return 0;
 2151	}
 2152
 2153	if ((sig & mask) != val)
 2154		return 0;
 2155	return 1;
 2156}
 2157
 2158static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2159{
 2160	unsigned long flags;
 2161	int err = 0;
 2162	int phy_present;
 2163	int phy_present_prev;
 2164
 2165	spin_lock_irqsave(&np->lock, flags);
 2166
 2167	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2168		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2169			1 : 0;
 2170		phy_present = niu_10g_phy_present(np);
 2171		if (phy_present != phy_present_prev) {
 2172			/* state change */
 2173			if (phy_present) {
 2174				/* A NEM was just plugged in */
 2175				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2176				if (np->phy_ops->xcvr_init)
 2177					err = np->phy_ops->xcvr_init(np);
 2178				if (err) {
 2179					err = mdio_read(np, np->phy_addr,
 2180						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2181					if (err == 0xffff) {
 2182						/* No mdio, back-to-back XAUI */
 2183						goto out;
 2184					}
 2185					/* debounce */
 2186					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2187				}
 2188			} else {
 2189				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2190				*link_up_p = 0;
 2191				netif_warn(np, link, np->dev,
 2192					   "Hotplug PHY Removed\n");
 2193			}
 2194		}
 2195out:
 2196		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2197			err = link_status_10g_bcm8706(np, link_up_p);
 2198			if (err == 0xffff) {
 2199				/* No mdio, back-to-back XAUI: it is C10NEM */
 2200				*link_up_p = 1;
 2201				np->link_config.active_speed = SPEED_10000;
 2202				np->link_config.active_duplex = DUPLEX_FULL;
 2203			}
 2204		}
 2205	}
 2206
 2207	spin_unlock_irqrestore(&np->lock, flags);
 2208
 2209	return 0;
 2210}
 2211
 2212static int niu_link_status(struct niu *np, int *link_up_p)
 2213{
 2214	const struct niu_phy_ops *ops = np->phy_ops;
 2215	int err;
 2216
 2217	err = 0;
 2218	if (ops->link_status)
 2219		err = ops->link_status(np, link_up_p);
 2220
 2221	return err;
 2222}
 2223
 2224static void niu_timer(unsigned long __opaque)
 2225{
 2226	struct niu *np = (struct niu *) __opaque;
 2227	unsigned long off;
 2228	int err, link_up;
 2229
 2230	err = niu_link_status(np, &link_up);
 2231	if (!err)
 2232		niu_link_status_common(np, link_up);
 2233
 2234	if (netif_carrier_ok(np->dev))
 2235		off = 5 * HZ;
 2236	else
 2237		off = 1 * HZ;
 2238	np->timer.expires = jiffies + off;
 2239
 2240	add_timer(&np->timer);
 2241}
 2242
 2243static const struct niu_phy_ops phy_ops_10g_serdes = {
 2244	.serdes_init		= serdes_init_10g_serdes,
 2245	.link_status		= link_status_10g_serdes,
 2246};
 2247
 2248static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2249	.serdes_init		= serdes_init_niu_10g_serdes,
 2250	.link_status		= link_status_10g_serdes,
 2251};
 2252
 2253static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2254	.serdes_init		= serdes_init_niu_1g_serdes,
 2255	.link_status		= link_status_1g_serdes,
 2256};
 2257
 2258static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2259	.xcvr_init		= xcvr_init_1g_rgmii,
 2260	.link_status		= link_status_1g_rgmii,
 2261};
 2262
 2263static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2264	.serdes_init		= serdes_init_niu_10g_fiber,
 2265	.xcvr_init		= xcvr_init_10g,
 2266	.link_status		= link_status_10g,
 2267};
 2268
 2269static const struct niu_phy_ops phy_ops_10g_fiber = {
 2270	.serdes_init		= serdes_init_10g,
 2271	.xcvr_init		= xcvr_init_10g,
 2272	.link_status		= link_status_10g,
 2273};
 2274
 2275static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2276	.serdes_init		= serdes_init_10g,
 2277	.xcvr_init		= xcvr_init_10g_bcm8706,
 2278	.link_status		= link_status_10g_hotplug,
 2279};
 2280
 2281static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2282	.serdes_init		= serdes_init_niu_10g_fiber,
 2283	.xcvr_init		= xcvr_init_10g_bcm8706,
 2284	.link_status		= link_status_10g_hotplug,
 2285};
 2286
 2287static const struct niu_phy_ops phy_ops_10g_copper = {
 2288	.serdes_init		= serdes_init_10g,
 2289	.link_status		= link_status_10g, /* XXX */
 2290};
 2291
 2292static const struct niu_phy_ops phy_ops_1g_fiber = {
 2293	.serdes_init		= serdes_init_1g,
 2294	.xcvr_init		= xcvr_init_1g,
 2295	.link_status		= link_status_1g,
 2296};
 2297
 2298static const struct niu_phy_ops phy_ops_1g_copper = {
 2299	.xcvr_init		= xcvr_init_1g,
 2300	.link_status		= link_status_1g,
 2301};
 2302
 2303struct niu_phy_template {
 2304	const struct niu_phy_ops	*ops;
 2305	u32				phy_addr_base;
 2306};
 2307
 2308static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2309	.ops		= &phy_ops_10g_fiber_niu,
 2310	.phy_addr_base	= 16,
 2311};
 2312
 2313static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2314	.ops		= &phy_ops_10g_serdes_niu,
 2315	.phy_addr_base	= 0,
 2316};
 2317
 2318static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2319	.ops		= &phy_ops_1g_serdes_niu,
 2320	.phy_addr_base	= 0,
 2321};
 2322
 2323static const struct niu_phy_template phy_template_10g_fiber = {
 2324	.ops		= &phy_ops_10g_fiber,
 2325	.phy_addr_base	= 8,
 2326};
 2327
 2328static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2329	.ops		= &phy_ops_10g_fiber_hotplug,
 2330	.phy_addr_base	= 8,
 2331};
 2332
 2333static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2334	.ops		= &phy_ops_niu_10g_hotplug,
 2335	.phy_addr_base	= 8,
 2336};
 2337
 2338static const struct niu_phy_template phy_template_10g_copper = {
 2339	.ops		= &phy_ops_10g_copper,
 2340	.phy_addr_base	= 10,
 2341};
 2342
 2343static const struct niu_phy_template phy_template_1g_fiber = {
 2344	.ops		= &phy_ops_1g_fiber,
 2345	.phy_addr_base	= 0,
 2346};
 2347
 2348static const struct niu_phy_template phy_template_1g_copper = {
 2349	.ops		= &phy_ops_1g_copper,
 2350	.phy_addr_base	= 0,
 2351};
 2352
 2353static const struct niu_phy_template phy_template_1g_rgmii = {
 2354	.ops		= &phy_ops_1g_rgmii,
 2355	.phy_addr_base	= 0,
 2356};
 2357
 2358static const struct niu_phy_template phy_template_10g_serdes = {
 2359	.ops		= &phy_ops_10g_serdes,
 2360	.phy_addr_base	= 0,
 2361};
 2362
 2363static int niu_atca_port_num[4] = {
 2364	0, 0,  11, 10
 2365};
 2366
 2367static int serdes_init_10g_serdes(struct niu *np)
 2368{
 2369	struct niu_link_config *lp = &np->link_config;
 2370	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2371	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2372
 2373	switch (np->port) {
 2374	case 0:
 2375		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2376		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2377		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2378		break;
 2379	case 1:
 2380		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2381		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2382		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2383		break;
 2384
 2385	default:
 2386		return -EINVAL;
 2387	}
 2388	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2389		    ENET_SERDES_CTRL_SDET_1 |
 2390		    ENET_SERDES_CTRL_SDET_2 |
 2391		    ENET_SERDES_CTRL_SDET_3 |
 2392		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2393		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2394		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2395		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2396		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2397		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2398		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2399		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2400	test_cfg_val = 0;
 2401
 2402	if (lp->loopback_mode == LOOPBACK_PHY) {
 2403		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2404				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2406				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2407				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2408				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2409				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2410				  ENET_SERDES_TEST_MD_3_SHIFT));
 2411	}
 2412
 2413	esr_reset(np);
 2414	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2415	nw64(ctrl_reg, ctrl_val);
 2416	nw64(test_cfg_reg, test_cfg_val);
 2417
 2418	/* Initialize all 4 lanes of the SERDES.  */
 2419	for (i = 0; i < 4; i++) {
 2420		u32 rxtx_ctrl, glue0;
 2421		int err;
 2422
 2423		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2424		if (err)
 2425			return err;
 2426		err = esr_read_glue0(np, i, &glue0);
 2427		if (err)
 2428			return err;
 2429
 2430		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2431		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2432			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2433
 2434		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2435			   ESR_GLUE_CTRL0_THCNT |
 2436			   ESR_GLUE_CTRL0_BLTIME);
 2437		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2438			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2439			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2440			  (BLTIME_300_CYCLES <<
 2441			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2442
 2443		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2444		if (err)
 2445			return err;
 2446		err = esr_write_glue0(np, i, glue0);
 2447		if (err)
 2448			return err;
 2449	}
 2450
 2451
 2452	sig = nr64(ESR_INT_SIGNALS);
 2453	switch (np->port) {
 2454	case 0:
 2455		mask = ESR_INT_SIGNALS_P0_BITS;
 2456		val = (ESR_INT_SRDY0_P0 |
 2457		       ESR_INT_DET0_P0 |
 2458		       ESR_INT_XSRDY_P0 |
 2459		       ESR_INT_XDP_P0_CH3 |
 2460		       ESR_INT_XDP_P0_CH2 |
 2461		       ESR_INT_XDP_P0_CH1 |
 2462		       ESR_INT_XDP_P0_CH0);
 2463		break;
 2464
 2465	case 1:
 2466		mask = ESR_INT_SIGNALS_P1_BITS;
 2467		val = (ESR_INT_SRDY0_P1 |
 2468		       ESR_INT_DET0_P1 |
 2469		       ESR_INT_XSRDY_P1 |
 2470		       ESR_INT_XDP_P1_CH3 |
 2471		       ESR_INT_XDP_P1_CH2 |
 2472		       ESR_INT_XDP_P1_CH1 |
 2473		       ESR_INT_XDP_P1_CH0);
 2474		break;
 2475
 2476	default:
 2477		return -EINVAL;
 2478	}
 2479
 2480	if ((sig & mask) != val) {
 2481		int err;
 2482		err = serdes_init_1g_serdes(np);
 2483		if (!err) {
 2484			np->flags &= ~NIU_FLAGS_10G;
 2485			np->mac_xcvr = MAC_XCVR_PCS;
 2486		}  else {
 2487			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2488				   np->port);
 2489			return -ENODEV;
 2490		}
 2491	}
 2492
 2493	return 0;
 2494}
 2495
 2496static int niu_determine_phy_disposition(struct niu *np)
 2497{
 2498	struct niu_parent *parent = np->parent;
 2499	u8 plat_type = parent->plat_type;
 2500	const struct niu_phy_template *tp;
 2501	u32 phy_addr_off = 0;
 2502
 2503	if (plat_type == PLAT_TYPE_NIU) {
 2504		switch (np->flags &
 2505			(NIU_FLAGS_10G |
 2506			 NIU_FLAGS_FIBER |
 2507			 NIU_FLAGS_XCVR_SERDES)) {
 2508		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2509			/* 10G Serdes */
 2510			tp = &phy_template_niu_10g_serdes;
 2511			break;
 2512		case NIU_FLAGS_XCVR_SERDES:
 2513			/* 1G Serdes */
 2514			tp = &phy_template_niu_1g_serdes;
 2515			break;
 2516		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2517			/* 10G Fiber */
 2518		default:
 2519			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2520				tp = &phy_template_niu_10g_hotplug;
 2521				if (np->port == 0)
 2522					phy_addr_off = 8;
 2523				if (np->port == 1)
 2524					phy_addr_off = 12;
 2525			} else {
 2526				tp = &phy_template_niu_10g_fiber;
 2527				phy_addr_off += np->port;
 2528			}
 2529			break;
 2530		}
 2531	} else {
 2532		switch (np->flags &
 2533			(NIU_FLAGS_10G |
 2534			 NIU_FLAGS_FIBER |
 2535			 NIU_FLAGS_XCVR_SERDES)) {
 2536		case 0:
 2537			/* 1G copper */
 2538			tp = &phy_template_1g_copper;
 2539			if (plat_type == PLAT_TYPE_VF_P0)
 2540				phy_addr_off = 10;
 2541			else if (plat_type == PLAT_TYPE_VF_P1)
 2542				phy_addr_off = 26;
 2543
 2544			phy_addr_off += (np->port ^ 0x3);
 2545			break;
 2546
 2547		case NIU_FLAGS_10G:
 2548			/* 10G copper */
 2549			tp = &phy_template_10g_copper;
 2550			break;
 2551
 2552		case NIU_FLAGS_FIBER:
 2553			/* 1G fiber */
 2554			tp = &phy_template_1g_fiber;
 2555			break;
 2556
 2557		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2558			/* 10G fiber */
 2559			tp = &phy_template_10g_fiber;
 2560			if (plat_type == PLAT_TYPE_VF_P0 ||
 2561			    plat_type == PLAT_TYPE_VF_P1)
 2562				phy_addr_off = 8;
 2563			phy_addr_off += np->port;
 2564			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2565				tp = &phy_template_10g_fiber_hotplug;
 2566				if (np->port == 0)
 2567					phy_addr_off = 8;
 2568				if (np->port == 1)
 2569					phy_addr_off = 12;
 2570			}
 2571			break;
 2572
 2573		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2574		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2575		case NIU_FLAGS_XCVR_SERDES:
 2576			switch(np->port) {
 2577			case 0:
 2578			case 1:
 2579				tp = &phy_template_10g_serdes;
 2580				break;
 2581			case 2:
 2582			case 3:
 2583				tp = &phy_template_1g_rgmii;
 2584				break;
 2585			default:
 2586				return -EINVAL;
 2587				break;
 2588			}
 2589			phy_addr_off = niu_atca_port_num[np->port];
 2590			break;
 2591
 2592		default:
 2593			return -EINVAL;
 2594		}
 2595	}
 2596
 2597	np->phy_ops = tp->ops;
 2598	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2599
 2600	return 0;
 2601}
 2602
 2603static int niu_init_link(struct niu *np)
 2604{
 2605	struct niu_parent *parent = np->parent;
 2606	int err, ignore;
 2607
 2608	if (parent->plat_type == PLAT_TYPE_NIU) {
 2609		err = niu_xcvr_init(np);
 2610		if (err)
 2611			return err;
 2612		msleep(200);
 2613	}
 2614	err = niu_serdes_init(np);
 2615	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2616		return err;
 2617	msleep(200);
 2618	err = niu_xcvr_init(np);
 2619	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2620		niu_link_status(np, &ignore);
 2621	return 0;
 2622}
 2623
 2624static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2625{
 2626	u16 reg0 = addr[4] << 8 | addr[5];
 2627	u16 reg1 = addr[2] << 8 | addr[3];
 2628	u16 reg2 = addr[0] << 8 | addr[1];
 2629
 2630	if (np->flags & NIU_FLAGS_XMAC) {
 2631		nw64_mac(XMAC_ADDR0, reg0);
 2632		nw64_mac(XMAC_ADDR1, reg1);
 2633		nw64_mac(XMAC_ADDR2, reg2);
 2634	} else {
 2635		nw64_mac(BMAC_ADDR0, reg0);
 2636		nw64_mac(BMAC_ADDR1, reg1);
 2637		nw64_mac(BMAC_ADDR2, reg2);
 2638	}
 2639}
 2640
 2641static int niu_num_alt_addr(struct niu *np)
 2642{
 2643	if (np->flags & NIU_FLAGS_XMAC)
 2644		return XMAC_NUM_ALT_ADDR;
 2645	else
 2646		return BMAC_NUM_ALT_ADDR;
 2647}
 2648
 2649static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2650{
 2651	u16 reg0 = addr[4] << 8 | addr[5];
 2652	u16 reg1 = addr[2] << 8 | addr[3];
 2653	u16 reg2 = addr[0] << 8 | addr[1];
 2654
 2655	if (index >= niu_num_alt_addr(np))
 2656		return -EINVAL;
 2657
 2658	if (np->flags & NIU_FLAGS_XMAC) {
 2659		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2660		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2661		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2662	} else {
 2663		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2664		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2665		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2666	}
 2667
 2668	return 0;
 2669}
 2670
 2671static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2672{
 2673	unsigned long reg;
 2674	u64 val, mask;
 2675
 2676	if (index >= niu_num_alt_addr(np))
 2677		return -EINVAL;
 2678
 2679	if (np->flags & NIU_FLAGS_XMAC) {
 2680		reg = XMAC_ADDR_CMPEN;
 2681		mask = 1 << index;
 2682	} else {
 2683		reg = BMAC_ADDR_CMPEN;
 2684		mask = 1 << (index + 1);
 2685	}
 2686
 2687	val = nr64_mac(reg);
 2688	if (on)
 2689		val |= mask;
 2690	else
 2691		val &= ~mask;
 2692	nw64_mac(reg, val);
 2693
 2694	return 0;
 2695}
 2696
 2697static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2698				   int num, int mac_pref)
 2699{
 2700	u64 val = nr64_mac(reg);
 2701	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2702	val |= num;
 2703	if (mac_pref)
 2704		val |= HOST_INFO_MPR;
 2705	nw64_mac(reg, val);
 2706}
 2707
 2708static int __set_rdc_table_num(struct niu *np,
 2709			       int xmac_index, int bmac_index,
 2710			       int rdc_table_num, int mac_pref)
 2711{
 2712	unsigned long reg;
 2713
 2714	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2715		return -EINVAL;
 2716	if (np->flags & NIU_FLAGS_XMAC)
 2717		reg = XMAC_HOST_INFO(xmac_index);
 2718	else
 2719		reg = BMAC_HOST_INFO(bmac_index);
 2720	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2721	return 0;
 2722}
 2723
 2724static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2725					 int mac_pref)
 2726{
 2727	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2728}
 2729
 2730static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2731					   int mac_pref)
 2732{
 2733	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2734}
 2735
 2736static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2737				     int table_num, int mac_pref)
 2738{
 2739	if (idx >= niu_num_alt_addr(np))
 2740		return -EINVAL;
 2741	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2742}
 2743
 2744static u64 vlan_entry_set_parity(u64 reg_val)
 2745{
 2746	u64 port01_mask;
 2747	u64 port23_mask;
 2748
 2749	port01_mask = 0x00ff;
 2750	port23_mask = 0xff00;
 2751
 2752	if (hweight64(reg_val & port01_mask) & 1)
 2753		reg_val |= ENET_VLAN_TBL_PARITY0;
 2754	else
 2755		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2756
 2757	if (hweight64(reg_val & port23_mask) & 1)
 2758		reg_val |= ENET_VLAN_TBL_PARITY1;
 2759	else
 2760		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2761
 2762	return reg_val;
 2763}
 2764
 2765static void vlan_tbl_write(struct niu *np, unsigned long index,
 2766			   int port, int vpr, int rdc_table)
 2767{
 2768	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2769
 2770	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2771		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2772		     ENET_VLAN_TBL_SHIFT(port));
 2773	if (vpr)
 2774		reg_val |= (ENET_VLAN_TBL_VPR <<
 2775			    ENET_VLAN_TBL_SHIFT(port));
 2776	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2777
 2778	reg_val = vlan_entry_set_parity(reg_val);
 2779
 2780	nw64(ENET_VLAN_TBL(index), reg_val);
 2781}
 2782
 2783static void vlan_tbl_clear(struct niu *np)
 2784{
 2785	int i;
 2786
 2787	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2788		nw64(ENET_VLAN_TBL(i), 0);
 2789}
 2790
 2791static int tcam_wait_bit(struct niu *np, u64 bit)
 2792{
 2793	int limit = 1000;
 2794
 2795	while (--limit > 0) {
 2796		if (nr64(TCAM_CTL) & bit)
 2797			break;
 2798		udelay(1);
 2799	}
 2800	if (limit <= 0)
 2801		return -ENODEV;
 2802
 2803	return 0;
 2804}
 2805
 2806static int tcam_flush(struct niu *np, int index)
 2807{
 2808	nw64(TCAM_KEY_0, 0x00);
 2809	nw64(TCAM_KEY_MASK_0, 0xff);
 2810	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2811
 2812	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2813}
 2814
 2815#if 0
 2816static int tcam_read(struct niu *np, int index,
 2817		     u64 *key, u64 *mask)
 2818{
 2819	int err;
 2820
 2821	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2822	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2823	if (!err) {
 2824		key[0] = nr64(TCAM_KEY_0);
 2825		key[1] = nr64(TCAM_KEY_1);
 2826		key[2] = nr64(TCAM_KEY_2);
 2827		key[3] = nr64(TCAM_KEY_3);
 2828		mask[0] = nr64(TCAM_KEY_MASK_0);
 2829		mask[1] = nr64(TCAM_KEY_MASK_1);
 2830		mask[2] = nr64(TCAM_KEY_MASK_2);
 2831		mask[3] = nr64(TCAM_KEY_MASK_3);
 2832	}
 2833	return err;
 2834}
 2835#endif
 2836
 2837static int tcam_write(struct niu *np, int index,
 2838		      u64 *key, u64 *mask)
 2839{
 2840	nw64(TCAM_KEY_0, key[0]);
 2841	nw64(TCAM_KEY_1, key[1]);
 2842	nw64(TCAM_KEY_2, key[2]);
 2843	nw64(TCAM_KEY_3, key[3]);
 2844	nw64(TCAM_KEY_MASK_0, mask[0]);
 2845	nw64(TCAM_KEY_MASK_1, mask[1]);
 2846	nw64(TCAM_KEY_MASK_2, mask[2]);
 2847	nw64(TCAM_KEY_MASK_3, mask[3]);
 2848	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2849
 2850	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2851}
 2852
 2853#if 0
 2854static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2855{
 2856	int err;
 2857
 2858	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2859	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2860	if (!err)
 2861		*data = nr64(TCAM_KEY_1);
 2862
 2863	return err;
 2864}
 2865#endif
 2866
 2867static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2868{
 2869	nw64(TCAM_KEY_1, assoc_data);
 2870	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2871
 2872	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2873}
 2874
 2875static void tcam_enable(struct niu *np, int on)
 2876{
 2877	u64 val = nr64(FFLP_CFG_1);
 2878
 2879	if (on)
 2880		val &= ~FFLP_CFG_1_TCAM_DIS;
 2881	else
 2882		val |= FFLP_CFG_1_TCAM_DIS;
 2883	nw64(FFLP_CFG_1, val);
 2884}
 2885
 2886static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2887{
 2888	u64 val = nr64(FFLP_CFG_1);
 2889
 2890	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2891		 FFLP_CFG_1_CAMLAT |
 2892		 FFLP_CFG_1_CAMRATIO);
 2893	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2894	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2895	nw64(FFLP_CFG_1, val);
 2896
 2897	val = nr64(FFLP_CFG_1);
 2898	val |= FFLP_CFG_1_FFLPINITDONE;
 2899	nw64(FFLP_CFG_1, val);
 2900}
 2901
 2902static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2903				      int on)
 2904{
 2905	unsigned long reg;
 2906	u64 val;
 2907
 2908	if (class < CLASS_CODE_ETHERTYPE1 ||
 2909	    class > CLASS_CODE_ETHERTYPE2)
 2910		return -EINVAL;
 2911
 2912	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2913	val = nr64(reg);
 2914	if (on)
 2915		val |= L2_CLS_VLD;
 2916	else
 2917		val &= ~L2_CLS_VLD;
 2918	nw64(reg, val);
 2919
 2920	return 0;
 2921}
 2922
 2923#if 0
 2924static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2925				   u64 ether_type)
 2926{
 2927	unsigned long reg;
 2928	u64 val;
 2929
 2930	if (class < CLASS_CODE_ETHERTYPE1 ||
 2931	    class > CLASS_CODE_ETHERTYPE2 ||
 2932	    (ether_type & ~(u64)0xffff) != 0)
 2933		return -EINVAL;
 2934
 2935	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2936	val = nr64(reg);
 2937	val &= ~L2_CLS_ETYPE;
 2938	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2939	nw64(reg, val);
 2940
 2941	return 0;
 2942}
 2943#endif
 2944
 2945static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2946				     int on)
 2947{
 2948	unsigned long reg;
 2949	u64 val;
 2950
 2951	if (class < CLASS_CODE_USER_PROG1 ||
 2952	    class > CLASS_CODE_USER_PROG4)
 2953		return -EINVAL;
 2954
 2955	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2956	val = nr64(reg);
 2957	if (on)
 2958		val |= L3_CLS_VALID;
 2959	else
 2960		val &= ~L3_CLS_VALID;
 2961	nw64(reg, val);
 2962
 2963	return 0;
 2964}
 2965
 2966static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2967				  int ipv6, u64 protocol_id,
 2968				  u64 tos_mask, u64 tos_val)
 2969{
 2970	unsigned long reg;
 2971	u64 val;
 2972
 2973	if (class < CLASS_CODE_USER_PROG1 ||
 2974	    class > CLASS_CODE_USER_PROG4 ||
 2975	    (protocol_id & ~(u64)0xff) != 0 ||
 2976	    (tos_mask & ~(u64)0xff) != 0 ||
 2977	    (tos_val & ~(u64)0xff) != 0)
 2978		return -EINVAL;
 2979
 2980	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2981	val = nr64(reg);
 2982	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2983		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2984	if (ipv6)
 2985		val |= L3_CLS_IPVER;
 2986	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2987	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2988	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2989	nw64(reg, val);
 2990
 2991	return 0;
 2992}
 2993
 2994static int tcam_early_init(struct niu *np)
 2995{
 2996	unsigned long i;
 2997	int err;
 2998
 2999	tcam_enable(np, 0);
 3000	tcam_set_lat_and_ratio(np,
 3001			       DEFAULT_TCAM_LATENCY,
 3002			       DEFAULT_TCAM_ACCESS_RATIO);
 3003	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 3004		err = tcam_user_eth_class_enable(np, i, 0);
 3005		if (err)
 3006			return err;
 3007	}
 3008	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 3009		err = tcam_user_ip_class_enable(np, i, 0);
 3010		if (err)
 3011			return err;
 3012	}
 3013
 3014	return 0;
 3015}
 3016
 3017static int tcam_flush_all(struct niu *np)
 3018{
 3019	unsigned long i;
 3020
 3021	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3022		int err = tcam_flush(np, i);
 3023		if (err)
 3024			return err;
 3025	}
 3026	return 0;
 3027}
 3028
 3029static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3030{
 3031	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3032}
 3033
 3034#if 0
 3035static int hash_read(struct niu *np, unsigned long partition,
 3036		     unsigned long index, unsigned long num_entries,
 3037		     u64 *data)
 3038{
 3039	u64 val = hash_addr_regval(index, num_entries);
 3040	unsigned long i;
 3041
 3042	if (partition >= FCRAM_NUM_PARTITIONS ||
 3043	    index + num_entries > FCRAM_SIZE)
 3044		return -EINVAL;
 3045
 3046	nw64(HASH_TBL_ADDR(partition), val);
 3047	for (i = 0; i < num_entries; i++)
 3048		data[i] = nr64(HASH_TBL_DATA(partition));
 3049
 3050	return 0;
 3051}
 3052#endif
 3053
 3054static int hash_write(struct niu *np, unsigned long partition,
 3055		      unsigned long index, unsigned long num_entries,
 3056		      u64 *data)
 3057{
 3058	u64 val = hash_addr_regval(index, num_entries);
 3059	unsigned long i;
 3060
 3061	if (partition >= FCRAM_NUM_PARTITIONS ||
 3062	    index + (num_entries * 8) > FCRAM_SIZE)
 3063		return -EINVAL;
 3064
 3065	nw64(HASH_TBL_ADDR(partition), val);
 3066	for (i = 0; i < num_entries; i++)
 3067		nw64(HASH_TBL_DATA(partition), data[i]);
 3068
 3069	return 0;
 3070}
 3071
 3072static void fflp_reset(struct niu *np)
 3073{
 3074	u64 val;
 3075
 3076	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3077	udelay(10);
 3078	nw64(FFLP_CFG_1, 0);
 3079
 3080	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3081	nw64(FFLP_CFG_1, val);
 3082}
 3083
 3084static void fflp_set_timings(struct niu *np)
 3085{
 3086	u64 val = nr64(FFLP_CFG_1);
 3087
 3088	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3089	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3090	nw64(FFLP_CFG_1, val);
 3091
 3092	val = nr64(FFLP_CFG_1);
 3093	val |= FFLP_CFG_1_FFLPINITDONE;
 3094	nw64(FFLP_CFG_1, val);
 3095
 3096	val = nr64(FCRAM_REF_TMR);
 3097	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3098	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3099	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3100	nw64(FCRAM_REF_TMR, val);
 3101}
 3102
 3103static int fflp_set_partition(struct niu *np, u64 partition,
 3104			      u64 mask, u64 base, int enable)
 3105{
 3106	unsigned long reg;
 3107	u64 val;
 3108
 3109	if (partition >= FCRAM_NUM_PARTITIONS ||
 3110	    (mask & ~(u64)0x1f) != 0 ||
 3111	    (base & ~(u64)0x1f) != 0)
 3112		return -EINVAL;
 3113
 3114	reg = FLW_PRT_SEL(partition);
 3115
 3116	val = nr64(reg);
 3117	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3118	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3119	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3120	if (enable)
 3121		val |= FLW_PRT_SEL_EXT;
 3122	nw64(reg, val);
 3123
 3124	return 0;
 3125}
 3126
 3127static int fflp_disable_all_partitions(struct niu *np)
 3128{
 3129	unsigned long i;
 3130
 3131	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3132		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3133		if (err)
 3134			return err;
 3135	}
 3136	return 0;
 3137}
 3138
 3139static void fflp_llcsnap_enable(struct niu *np, int on)
 3140{
 3141	u64 val = nr64(FFLP_CFG_1);
 3142
 3143	if (on)
 3144		val |= FFLP_CFG_1_LLCSNAP;
 3145	else
 3146		val &= ~FFLP_CFG_1_LLCSNAP;
 3147	nw64(FFLP_CFG_1, val);
 3148}
 3149
 3150static void fflp_errors_enable(struct niu *np, int on)
 3151{
 3152	u64 val = nr64(FFLP_CFG_1);
 3153
 3154	if (on)
 3155		val &= ~FFLP_CFG_1_ERRORDIS;
 3156	else
 3157		val |= FFLP_CFG_1_ERRORDIS;
 3158	nw64(FFLP_CFG_1, val);
 3159}
 3160
 3161static int fflp_hash_clear(struct niu *np)
 3162{
 3163	struct fcram_hash_ipv4 ent;
 3164	unsigned long i;
 3165
 3166	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3167	memset(&ent, 0, sizeof(ent));
 3168	ent.header = HASH_HEADER_EXT;
 3169
 3170	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3171		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3172		if (err)
 3173			return err;
 3174	}
 3175	return 0;
 3176}
 3177
 3178static int fflp_early_init(struct niu *np)
 3179{
 3180	struct niu_parent *parent;
 3181	unsigned long flags;
 3182	int err;
 3183
 3184	niu_lock_parent(np, flags);
 3185
 3186	parent = np->parent;
 3187	err = 0;
 3188	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3189		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3190			fflp_reset(np);
 3191			fflp_set_timings(np);
 3192			err = fflp_disable_all_partitions(np);
 3193			if (err) {
 3194				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3195					     "fflp_disable_all_partitions failed, err=%d\n",
 3196					     err);
 3197				goto out;
 3198			}
 3199		}
 3200
 3201		err = tcam_early_init(np);
 3202		if (err) {
 3203			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3204				     "tcam_early_init failed, err=%d\n", err);
 3205			goto out;
 3206		}
 3207		fflp_llcsnap_enable(np, 1);
 3208		fflp_errors_enable(np, 0);
 3209		nw64(H1POLY, 0);
 3210		nw64(H2POLY, 0);
 3211
 3212		err = tcam_flush_all(np);
 3213		if (err) {
 3214			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3215				     "tcam_flush_all failed, err=%d\n", err);
 3216			goto out;
 3217		}
 3218		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3219			err = fflp_hash_clear(np);
 3220			if (err) {
 3221				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3222					     "fflp_hash_clear failed, err=%d\n",
 3223					     err);
 3224				goto out;
 3225			}
 3226		}
 3227
 3228		vlan_tbl_clear(np);
 3229
 3230		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3231	}
 3232out:
 3233	niu_unlock_parent(np, flags);
 3234	return err;
 3235}
 3236
 3237static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3238{
 3239	if (class_code < CLASS_CODE_USER_PROG1 ||
 3240	    class_code > CLASS_CODE_SCTP_IPV6)
 3241		return -EINVAL;
 3242
 3243	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3244	return 0;
 3245}
 3246
 3247static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3248{
 3249	if (class_code < CLASS_CODE_USER_PROG1 ||
 3250	    class_code > CLASS_CODE_SCTP_IPV6)
 3251		return -EINVAL;
 3252
 3253	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3254	return 0;
 3255}
 3256
 3257/* Entries for the ports are interleaved in the TCAM */
 3258static u16 tcam_get_index(struct niu *np, u16 idx)
 3259{
 3260	/* One entry reserved for IP fragment rule */
 3261	if (idx >= (np->clas.tcam_sz - 1))
 3262		idx = 0;
 3263	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3264}
 3265
 3266static u16 tcam_get_size(struct niu *np)
 3267{
 3268	/* One entry reserved for IP fragment rule */
 3269	return np->clas.tcam_sz - 1;
 3270}
 3271
 3272static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3273{
 3274	/* One entry reserved for IP fragment rule */
 3275	return np->clas.tcam_valid_entries - 1;
 3276}
 3277
 3278static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3279			      u32 offset, u32 size, u32 truesize)
 3280{
 3281	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
 3282
 3283	skb->len += size;
 3284	skb->data_len += size;
 3285	skb->truesize += truesize;
 3286}
 3287
 3288static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3289{
 3290	a >>= PAGE_SHIFT;
 3291	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3292
 3293	return a & (MAX_RBR_RING_SIZE - 1);
 3294}
 3295
 3296static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3297				    struct page ***link)
 3298{
 3299	unsigned int h = niu_hash_rxaddr(rp, addr);
 3300	struct page *p, **pp;
 3301
 3302	addr &= PAGE_MASK;
 3303	pp = &rp->rxhash[h];
 3304	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3305		if (p->index == addr) {
 3306			*link = pp;
 3307			goto found;
 3308		}
 3309	}
 3310	BUG();
 3311
 3312found:
 3313	return p;
 3314}
 3315
 3316static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3317{
 3318	unsigned int h = niu_hash_rxaddr(rp, base);
 3319
 3320	page->index = base;
 3321	page->mapping = (struct address_space *) rp->rxhash[h];
 3322	rp->rxhash[h] = page;
 3323}
 3324
 3325static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3326			    gfp_t mask, int start_index)
 3327{
 3328	struct page *page;
 3329	u64 addr;
 3330	int i;
 3331
 3332	page = alloc_page(mask);
 3333	if (!page)
 3334		return -ENOMEM;
 3335
 3336	addr = np->ops->map_page(np->device, page, 0,
 3337				 PAGE_SIZE, DMA_FROM_DEVICE);
 3338	if (!addr) {
 3339		__free_page(page);
 3340		return -ENOMEM;
 3341	}
 3342
 3343	niu_hash_page(rp, page, addr);
 3344	if (rp->rbr_blocks_per_page > 1)
 3345		atomic_add(rp->rbr_blocks_per_page - 1,
 3346			   &compound_head(page)->_count);
 3347
 3348	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3349		__le32 *rbr = &rp->rbr[start_index + i];
 3350
 3351		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3352		addr += rp->rbr_block_size;
 3353	}
 3354
 3355	return 0;
 3356}
 3357
 3358static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3359{
 3360	int index = rp->rbr_index;
 3361
 3362	rp->rbr_pending++;
 3363	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3364		int err = niu_rbr_add_page(np, rp, mask, index);
 3365
 3366		if (unlikely(err)) {
 3367			rp->rbr_pending--;
 3368			return;
 3369		}
 3370
 3371		rp->rbr_index += rp->rbr_blocks_per_page;
 3372		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3373		if (rp->rbr_index == rp->rbr_table_size)
 3374			rp->rbr_index = 0;
 3375
 3376		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3377			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3378			rp->rbr_pending = 0;
 3379		}
 3380	}
 3381}
 3382
 3383static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3384{
 3385	unsigned int index = rp->rcr_index;
 3386	int num_rcr = 0;
 3387
 3388	rp->rx_dropped++;
 3389	while (1) {
 3390		struct page *page, **link;
 3391		u64 addr, val;
 3392		u32 rcr_size;
 3393
 3394		num_rcr++;
 3395
 3396		val = le64_to_cpup(&rp->rcr[index]);
 3397		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3398			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3399		page = niu_find_rxpage(rp, addr, &link);
 3400
 3401		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3402					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3403		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3404			*link = (struct page *) page->mapping;
 3405			np->ops->unmap_page(np->device, page->index,
 3406					    PAGE_SIZE, DMA_FROM_DEVICE);
 3407			page->index = 0;
 3408			page->mapping = NULL;
 3409			__free_page(page);
 3410			rp->rbr_refill_pending++;
 3411		}
 3412
 3413		index = NEXT_RCR(rp, index);
 3414		if (!(val & RCR_ENTRY_MULTI))
 3415			break;
 3416
 3417	}
 3418	rp->rcr_index = index;
 3419
 3420	return num_rcr;
 3421}
 3422
 3423static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3424			      struct rx_ring_info *rp)
 3425{
 3426	unsigned int index = rp->rcr_index;
 3427	struct rx_pkt_hdr1 *rh;
 3428	struct sk_buff *skb;
 3429	int len, num_rcr;
 3430
 3431	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3432	if (unlikely(!skb))
 3433		return niu_rx_pkt_ignore(np, rp);
 3434
 3435	num_rcr = 0;
 3436	while (1) {
 3437		struct page *page, **link;
 3438		u32 rcr_size, append_size;
 3439		u64 addr, val, off;
 3440
 3441		num_rcr++;
 3442
 3443		val = le64_to_cpup(&rp->rcr[index]);
 3444
 3445		len = (val & RCR_ENTRY_L2_LEN) >>
 3446			RCR_ENTRY_L2_LEN_SHIFT;
 3447		len -= ETH_FCS_LEN;
 3448
 3449		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3450			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3451		page = niu_find_rxpage(rp, addr, &link);
 3452
 3453		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3454					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3455
 3456		off = addr & ~PAGE_MASK;
 3457		append_size = rcr_size;
 3458		if (num_rcr == 1) {
 3459			int ptype;
 3460
 3461			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3462			if ((ptype == RCR_PKT_TYPE_TCP ||
 3463			     ptype == RCR_PKT_TYPE_UDP) &&
 3464			    !(val & (RCR_ENTRY_NOPORT |
 3465				     RCR_ENTRY_ERROR)))
 3466				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3467			else
 3468				skb_checksum_none_assert(skb);
 3469		} else if (!(val & RCR_ENTRY_MULTI))
 3470			append_size = len - skb->len;
 3471
 3472		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 3473		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3474			*link = (struct page *) page->mapping;
 3475			np->ops->unmap_page(np->device, page->index,
 3476					    PAGE_SIZE, DMA_FROM_DEVICE);
 3477			page->index = 0;
 3478			page->mapping = NULL;
 3479			rp->rbr_refill_pending++;
 3480		} else
 3481			get_page(page);
 3482
 3483		index = NEXT_RCR(rp, index);
 3484		if (!(val & RCR_ENTRY_MULTI))
 3485			break;
 3486
 3487	}
 3488	rp->rcr_index = index;
 3489
 3490	len += sizeof(*rh);
 3491	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3492	__pskb_pull_tail(skb, len);
 3493
 3494	rh = (struct rx_pkt_hdr1 *) skb->data;
 3495	if (np->dev->features & NETIF_F_RXHASH)
 3496		skb_set_hash(skb,
 3497			     ((u32)rh->hashval2_0 << 24 |
 3498			      (u32)rh->hashval2_1 << 16 |
 3499			      (u32)rh->hashval1_1 << 8 |
 3500			      (u32)rh->hashval1_2 << 0),
 3501			     PKT_HASH_TYPE_L3);
 3502	skb_pull(skb, sizeof(*rh));
 3503
 3504	rp->rx_packets++;
 3505	rp->rx_bytes += skb->len;
 3506
 3507	skb->protocol = eth_type_trans(skb, np->dev);
 3508	skb_record_rx_queue(skb, rp->rx_channel);
 3509	napi_gro_receive(napi, skb);
 3510
 3511	return num_rcr;
 3512}
 3513
 3514static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3515{
 3516	int blocks_per_page = rp->rbr_blocks_per_page;
 3517	int err, index = rp->rbr_index;
 3518
 3519	err = 0;
 3520	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3521		err = niu_rbr_add_page(np, rp, mask, index);
 3522		if (unlikely(err))
 3523			break;
 3524
 3525		index += blocks_per_page;
 3526	}
 3527
 3528	rp->rbr_index = index;
 3529	return err;
 3530}
 3531
 3532static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3533{
 3534	int i;
 3535
 3536	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3537		struct page *page;
 3538
 3539		page = rp->rxhash[i];
 3540		while (page) {
 3541			struct page *next = (struct page *) page->mapping;
 3542			u64 base = page->index;
 3543
 3544			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3545					    DMA_FROM_DEVICE);
 3546			page->index = 0;
 3547			page->mapping = NULL;
 3548
 3549			__free_page(page);
 3550
 3551			page = next;
 3552		}
 3553	}
 3554
 3555	for (i = 0; i < rp->rbr_table_size; i++)
 3556		rp->rbr[i] = cpu_to_le32(0);
 3557	rp->rbr_index = 0;
 3558}
 3559
 3560static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3561{
 3562	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3563	struct sk_buff *skb = tb->skb;
 3564	struct tx_pkt_hdr *tp;
 3565	u64 tx_flags;
 3566	int i, len;
 3567
 3568	tp = (struct tx_pkt_hdr *) skb->data;
 3569	tx_flags = le64_to_cpup(&tp->flags);
 3570
 3571	rp->tx_packets++;
 3572	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3573			 ((tx_flags & TXHDR_PAD) / 2));
 3574
 3575	len = skb_headlen(skb);
 3576	np->ops->unmap_single(np->device, tb->mapping,
 3577			      len, DMA_TO_DEVICE);
 3578
 3579	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3580		rp->mark_pending--;
 3581
 3582	tb->skb = NULL;
 3583	do {
 3584		idx = NEXT_TX(rp, idx);
 3585		len -= MAX_TX_DESC_LEN;
 3586	} while (len > 0);
 3587
 3588	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3589		tb = &rp->tx_buffs[idx];
 3590		BUG_ON(tb->skb != NULL);
 3591		np->ops->unmap_page(np->device, tb->mapping,
 3592				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
 3593				    DMA_TO_DEVICE);
 3594		idx = NEXT_TX(rp, idx);
 3595	}
 3596
 3597	dev_kfree_skb(skb);
 3598
 3599	return idx;
 3600}
 3601
 3602#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3603
 3604static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3605{
 3606	struct netdev_queue *txq;
 3607	u16 pkt_cnt, tmp;
 3608	int cons, index;
 3609	u64 cs;
 3610
 3611	index = (rp - np->tx_rings);
 3612	txq = netdev_get_tx_queue(np->dev, index);
 3613
 3614	cs = rp->tx_cs;
 3615	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3616		goto out;
 3617
 3618	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3619	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3620		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3621
 3622	rp->last_pkt_cnt = tmp;
 3623
 3624	cons = rp->cons;
 3625
 3626	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3627		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3628
 3629	while (pkt_cnt--)
 3630		cons = release_tx_packet(np, rp, cons);
 3631
 3632	rp->cons = cons;
 3633	smp_mb();
 3634
 3635out:
 3636	if (unlikely(netif_tx_queue_stopped(txq) &&
 3637		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3638		__netif_tx_lock(txq, smp_processor_id());
 3639		if (netif_tx_queue_stopped(txq) &&
 3640		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3641			netif_tx_wake_queue(txq);
 3642		__netif_tx_unlock(txq);
 3643	}
 3644}
 3645
 3646static inline void niu_sync_rx_discard_stats(struct niu *np,
 3647					     struct rx_ring_info *rp,
 3648					     const int limit)
 3649{
 3650	/* This elaborate scheme is needed for reading the RX discard
 3651	 * counters, as they are only 16-bit and can overflow quickly,
 3652	 * and because the overflow indication bit is not usable as
 3653	 * the counter value does not wrap, but remains at max value
 3654	 * 0xFFFF.
 3655	 *
 3656	 * In theory and in practice counters can be lost in between
 3657	 * reading nr64() and clearing the counter nw64().  For this
 3658	 * reason, the number of counter clearings nw64() is
 3659	 * limited/reduced though the limit parameter.
 3660	 */
 3661	int rx_channel = rp->rx_channel;
 3662	u32 misc, wred;
 3663
 3664	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3665	 * following discard events: IPP (Input Port Process),
 3666	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3667	 * Block Ring) prefetch buffer is empty.
 3668	 */
 3669	misc = nr64(RXMISC(rx_channel));
 3670	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3671		nw64(RXMISC(rx_channel), 0);
 3672		rp->rx_errors += misc & RXMISC_COUNT;
 3673
 3674		if (unlikely(misc & RXMISC_OFLOW))
 3675			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3676				rx_channel);
 3677
 3678		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3679			     "rx-%d: MISC drop=%u over=%u\n",
 3680			     rx_channel, misc, misc-limit);
 3681	}
 3682
 3683	/* WRED (Weighted Random Early Discard) by hardware */
 3684	wred = nr64(RED_DIS_CNT(rx_channel));
 3685	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3686		nw64(RED_DIS_CNT(rx_channel), 0);
 3687		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3688
 3689		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3690			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3691
 3692		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3693			     "rx-%d: WRED drop=%u over=%u\n",
 3694			     rx_channel, wred, wred-limit);
 3695	}
 3696}
 3697
 3698static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3699		       struct rx_ring_info *rp, int budget)
 3700{
 3701	int qlen, rcr_done = 0, work_done = 0;
 3702	struct rxdma_mailbox *mbox = rp->mbox;
 3703	u64 stat;
 3704
 3705#if 1
 3706	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3707	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3708#else
 3709	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3710	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3711#endif
 3712	mbox->rx_dma_ctl_stat = 0;
 3713	mbox->rcrstat_a = 0;
 3714
 3715	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3716		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3717		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3718
 3719	rcr_done = work_done = 0;
 3720	qlen = min(qlen, budget);
 3721	while (work_done < qlen) {
 3722		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3723		work_done++;
 3724	}
 3725
 3726	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3727		unsigned int i;
 3728
 3729		for (i = 0; i < rp->rbr_refill_pending; i++)
 3730			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3731		rp->rbr_refill_pending = 0;
 3732	}
 3733
 3734	stat = (RX_DMA_CTL_STAT_MEX |
 3735		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3736		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3737
 3738	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3739
 3740	/* Only sync discards stats when qlen indicate potential for drops */
 3741	if (qlen > 10)
 3742		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3743
 3744	return work_done;
 3745}
 3746
 3747static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3748{
 3749	u64 v0 = lp->v0;
 3750	u32 tx_vec = (v0 >> 32);
 3751	u32 rx_vec = (v0 & 0xffffffff);
 3752	int i, work_done = 0;
 3753
 3754	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3755		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3756
 3757	for (i = 0; i < np->num_tx_rings; i++) {
 3758		struct tx_ring_info *rp = &np->tx_rings[i];
 3759		if (tx_vec & (1 << rp->tx_channel))
 3760			niu_tx_work(np, rp);
 3761		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3762	}
 3763
 3764	for (i = 0; i < np->num_rx_rings; i++) {
 3765		struct rx_ring_info *rp = &np->rx_rings[i];
 3766
 3767		if (rx_vec & (1 << rp->rx_channel)) {
 3768			int this_work_done;
 3769
 3770			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3771						     budget);
 3772
 3773			budget -= this_work_done;
 3774			work_done += this_work_done;
 3775		}
 3776		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3777	}
 3778
 3779	return work_done;
 3780}
 3781
 3782static int niu_poll(struct napi_struct *napi, int budget)
 3783{
 3784	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3785	struct niu *np = lp->np;
 3786	int work_done;
 3787
 3788	work_done = niu_poll_core(np, lp, budget);
 3789
 3790	if (work_done < budget) {
 3791		napi_complete(napi);
 3792		niu_ldg_rearm(np, lp, 1);
 3793	}
 3794	return work_done;
 3795}
 3796
 3797static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3798				  u64 stat)
 3799{
 3800	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3801
 3802	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3803		pr_cont("RBR_TMOUT ");
 3804	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3805		pr_cont("RSP_CNT ");
 3806	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3807		pr_cont("BYTE_EN_BUS ");
 3808	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3809		pr_cont("RSP_DAT ");
 3810	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3811		pr_cont("RCR_ACK ");
 3812	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3813		pr_cont("RCR_SHA_PAR ");
 3814	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3815		pr_cont("RBR_PRE_PAR ");
 3816	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3817		pr_cont("CONFIG ");
 3818	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3819		pr_cont("RCRINCON ");
 3820	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3821		pr_cont("RCRFULL ");
 3822	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3823		pr_cont("RBRFULL ");
 3824	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3825		pr_cont("RBRLOGPAGE ");
 3826	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3827		pr_cont("CFIGLOGPAGE ");
 3828	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3829		pr_cont("DC_FIDO ");
 3830
 3831	pr_cont(")\n");
 3832}
 3833
 3834static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3835{
 3836	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3837	int err = 0;
 3838
 3839
 3840	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3841		    RX_DMA_CTL_STAT_PORT_FATAL))
 3842		err = -EINVAL;
 3843
 3844	if (err) {
 3845		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3846			   rp->rx_channel,
 3847			   (unsigned long long) stat);
 3848
 3849		niu_log_rxchan_errors(np, rp, stat);
 3850	}
 3851
 3852	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3853	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3854
 3855	return err;
 3856}
 3857
 3858static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3859				  u64 cs)
 3860{
 3861	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3862
 3863	if (cs & TX_CS_MBOX_ERR)
 3864		pr_cont("MBOX ");
 3865	if (cs & TX_CS_PKT_SIZE_ERR)
 3866		pr_cont("PKT_SIZE ");
 3867	if (cs & TX_CS_TX_RING_OFLOW)
 3868		pr_cont("TX_RING_OFLOW ");
 3869	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3870		pr_cont("PREF_BUF_PAR ");
 3871	if (cs & TX_CS_NACK_PREF)
 3872		pr_cont("NACK_PREF ");
 3873	if (cs & TX_CS_NACK_PKT_RD)
 3874		pr_cont("NACK_PKT_RD ");
 3875	if (cs & TX_CS_CONF_PART_ERR)
 3876		pr_cont("CONF_PART ");
 3877	if (cs & TX_CS_PKT_PRT_ERR)
 3878		pr_cont("PKT_PTR ");
 3879
 3880	pr_cont(")\n");
 3881}
 3882
 3883static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3884{
 3885	u64 cs, logh, logl;
 3886
 3887	cs = nr64(TX_CS(rp->tx_channel));
 3888	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3889	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3890
 3891	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3892		   rp->tx_channel,
 3893		   (unsigned long long)cs,
 3894		   (unsigned long long)logh,
 3895		   (unsigned long long)logl);
 3896
 3897	niu_log_txchan_errors(np, rp, cs);
 3898
 3899	return -ENODEV;
 3900}
 3901
 3902static int niu_mif_interrupt(struct niu *np)
 3903{
 3904	u64 mif_status = nr64(MIF_STATUS);
 3905	int phy_mdint = 0;
 3906
 3907	if (np->flags & NIU_FLAGS_XMAC) {
 3908		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3909
 3910		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3911			phy_mdint = 1;
 3912	}
 3913
 3914	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3915		   (unsigned long long)mif_status, phy_mdint);
 3916
 3917	return -ENODEV;
 3918}
 3919
 3920static void niu_xmac_interrupt(struct niu *np)
 3921{
 3922	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3923	u64 val;
 3924
 3925	val = nr64_mac(XTXMAC_STATUS);
 3926	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3927		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3928	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3929		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3930	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3931		mp->tx_fifo_errors++;
 3932	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3933		mp->tx_overflow_errors++;
 3934	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3935		mp->tx_max_pkt_size_errors++;
 3936	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3937		mp->tx_underflow_errors++;
 3938
 3939	val = nr64_mac(XRXMAC_STATUS);
 3940	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3941		mp->rx_local_faults++;
 3942	if (val & XRXMAC_STATUS_RFLT_DET)
 3943		mp->rx_remote_faults++;
 3944	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3945		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3946	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3947		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3948	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3949		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3950	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3951		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3952	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3953		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3954	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3955		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3956	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3957		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3958	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3959		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3960	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3961		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3962	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3963		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3964	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3965		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3966	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3967		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3968	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3969		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3970	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3971		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3972	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3973		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3974	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3975		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3976	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3977		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3978	if (val & XRXMAC_STATUS_RXUFLOW)
 3979		mp->rx_underflows++;
 3980	if (val & XRXMAC_STATUS_RXOFLOW)
 3981		mp->rx_overflows++;
 3982
 3983	val = nr64_mac(XMAC_FC_STAT);
 3984	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3985		mp->pause_off_state++;
 3986	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3987		mp->pause_on_state++;
 3988	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 3989		mp->pause_received++;
 3990}
 3991
 3992static void niu_bmac_interrupt(struct niu *np)
 3993{
 3994	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 3995	u64 val;
 3996
 3997	val = nr64_mac(BTXMAC_STATUS);
 3998	if (val & BTXMAC_STATUS_UNDERRUN)
 3999		mp->tx_underflow_errors++;
 4000	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 4001		mp->tx_max_pkt_size_errors++;
 4002	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 4003		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 4004	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 4005		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 4006
 4007	val = nr64_mac(BRXMAC_STATUS);
 4008	if (val & BRXMAC_STATUS_OVERFLOW)
 4009		mp->rx_overflows++;
 4010	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 4011		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 4012	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 4013		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4014	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 4015		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4016	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 4017		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 4018
 4019	val = nr64_mac(BMAC_CTRL_STATUS);
 4020	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4021		mp->pause_off_state++;
 4022	if (val & BMAC_CTRL_STATUS_PAUSE)
 4023		mp->pause_on_state++;
 4024	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4025		mp->pause_received++;
 4026}
 4027
 4028static int niu_mac_interrupt(struct niu *np)
 4029{
 4030	if (np->flags & NIU_FLAGS_XMAC)
 4031		niu_xmac_interrupt(np);
 4032	else
 4033		niu_bmac_interrupt(np);
 4034
 4035	return 0;
 4036}
 4037
 4038static void niu_log_device_error(struct niu *np, u64 stat)
 4039{
 4040	netdev_err(np->dev, "Core device errors ( ");
 4041
 4042	if (stat & SYS_ERR_MASK_META2)
 4043		pr_cont("META2 ");
 4044	if (stat & SYS_ERR_MASK_META1)
 4045		pr_cont("META1 ");
 4046	if (stat & SYS_ERR_MASK_PEU)
 4047		pr_cont("PEU ");
 4048	if (stat & SYS_ERR_MASK_TXC)
 4049		pr_cont("TXC ");
 4050	if (stat & SYS_ERR_MASK_RDMC)
 4051		pr_cont("RDMC ");
 4052	if (stat & SYS_ERR_MASK_TDMC)
 4053		pr_cont("TDMC ");
 4054	if (stat & SYS_ERR_MASK_ZCP)
 4055		pr_cont("ZCP ");
 4056	if (stat & SYS_ERR_MASK_FFLP)
 4057		pr_cont("FFLP ");
 4058	if (stat & SYS_ERR_MASK_IPP)
 4059		pr_cont("IPP ");
 4060	if (stat & SYS_ERR_MASK_MAC)
 4061		pr_cont("MAC ");
 4062	if (stat & SYS_ERR_MASK_SMX)
 4063		pr_cont("SMX ");
 4064
 4065	pr_cont(")\n");
 4066}
 4067
 4068static int niu_device_error(struct niu *np)
 4069{
 4070	u64 stat = nr64(SYS_ERR_STAT);
 4071
 4072	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4073		   (unsigned long long)stat);
 4074
 4075	niu_log_device_error(np, stat);
 4076
 4077	return -ENODEV;
 4078}
 4079
 4080static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4081			      u64 v0, u64 v1, u64 v2)
 4082{
 4083
 4084	int i, err = 0;
 4085
 4086	lp->v0 = v0;
 4087	lp->v1 = v1;
 4088	lp->v2 = v2;
 4089
 4090	if (v1 & 0x00000000ffffffffULL) {
 4091		u32 rx_vec = (v1 & 0xffffffff);
 4092
 4093		for (i = 0; i < np->num_rx_rings; i++) {
 4094			struct rx_ring_info *rp = &np->rx_rings[i];
 4095
 4096			if (rx_vec & (1 << rp->rx_channel)) {
 4097				int r = niu_rx_error(np, rp);
 4098				if (r) {
 4099					err = r;
 4100				} else {
 4101					if (!v0)
 4102						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4103						     RX_DMA_CTL_STAT_MEX);
 4104				}
 4105			}
 4106		}
 4107	}
 4108	if (v1 & 0x7fffffff00000000ULL) {
 4109		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4110
 4111		for (i = 0; i < np->num_tx_rings; i++) {
 4112			struct tx_ring_info *rp = &np->tx_rings[i];
 4113
 4114			if (tx_vec & (1 << rp->tx_channel)) {
 4115				int r = niu_tx_error(np, rp);
 4116				if (r)
 4117					err = r;
 4118			}
 4119		}
 4120	}
 4121	if ((v0 | v1) & 0x8000000000000000ULL) {
 4122		int r = niu_mif_interrupt(np);
 4123		if (r)
 4124			err = r;
 4125	}
 4126	if (v2) {
 4127		if (v2 & 0x01ef) {
 4128			int r = niu_mac_interrupt(np);
 4129			if (r)
 4130				err = r;
 4131		}
 4132		if (v2 & 0x0210) {
 4133			int r = niu_device_error(np);
 4134			if (r)
 4135				err = r;
 4136		}
 4137	}
 4138
 4139	if (err)
 4140		niu_enable_interrupts(np, 0);
 4141
 4142	return err;
 4143}
 4144
 4145static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4146			    int ldn)
 4147{
 4148	struct rxdma_mailbox *mbox = rp->mbox;
 4149	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4150
 4151	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4152		      RX_DMA_CTL_STAT_RCRTO);
 4153	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4154
 4155	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4156		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4157}
 4158
 4159static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4160			    int ldn)
 4161{
 4162	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4163
 4164	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4165		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4166}
 4167
 4168static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4169{
 4170	struct niu_parent *parent = np->parent;
 4171	u32 rx_vec, tx_vec;
 4172	int i;
 4173
 4174	tx_vec = (v0 >> 32);
 4175	rx_vec = (v0 & 0xffffffff);
 4176
 4177	for (i = 0; i < np->num_rx_rings; i++) {
 4178		struct rx_ring_info *rp = &np->rx_rings[i];
 4179		int ldn = LDN_RXDMA(rp->rx_channel);
 4180
 4181		if (parent->ldg_map[ldn] != ldg)
 4182			continue;
 4183
 4184		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4185		if (rx_vec & (1 << rp->rx_channel))
 4186			niu_rxchan_intr(np, rp, ldn);
 4187	}
 4188
 4189	for (i = 0; i < np->num_tx_rings; i++) {
 4190		struct tx_ring_info *rp = &np->tx_rings[i];
 4191		int ldn = LDN_TXDMA(rp->tx_channel);
 4192
 4193		if (parent->ldg_map[ldn] != ldg)
 4194			continue;
 4195
 4196		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4197		if (tx_vec & (1 << rp->tx_channel))
 4198			niu_txchan_intr(np, rp, ldn);
 4199	}
 4200}
 4201
 4202static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4203			      u64 v0, u64 v1, u64 v2)
 4204{
 4205	if (likely(napi_schedule_prep(&lp->napi))) {
 4206		lp->v0 = v0;
 4207		lp->v1 = v1;
 4208		lp->v2 = v2;
 4209		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4210		__napi_schedule(&lp->napi);
 4211	}
 4212}
 4213
 4214static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4215{
 4216	struct niu_ldg *lp = dev_id;
 4217	struct niu *np = lp->np;
 4218	int ldg = lp->ldg_num;
 4219	unsigned long flags;
 4220	u64 v0, v1, v2;
 4221
 4222	if (netif_msg_intr(np))
 4223		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4224		       __func__, lp, ldg);
 4225
 4226	spin_lock_irqsave(&np->lock, flags);
 4227
 4228	v0 = nr64(LDSV0(ldg));
 4229	v1 = nr64(LDSV1(ldg));
 4230	v2 = nr64(LDSV2(ldg));
 4231
 4232	if (netif_msg_intr(np))
 4233		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4234		       (unsigned long long) v0,
 4235		       (unsigned long long) v1,
 4236		       (unsigned long long) v2);
 4237
 4238	if (unlikely(!v0 && !v1 && !v2)) {
 4239		spin_unlock_irqrestore(&np->lock, flags);
 4240		return IRQ_NONE;
 4241	}
 4242
 4243	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4244		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4245		if (err)
 4246			goto out;
 4247	}
 4248	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4249		niu_schedule_napi(np, lp, v0, v1, v2);
 4250	else
 4251		niu_ldg_rearm(np, lp, 1);
 4252out:
 4253	spin_unlock_irqrestore(&np->lock, flags);
 4254
 4255	return IRQ_HANDLED;
 4256}
 4257
 4258static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4259{
 4260	if (rp->mbox) {
 4261		np->ops->free_coherent(np->device,
 4262				       sizeof(struct rxdma_mailbox),
 4263				       rp->mbox, rp->mbox_dma);
 4264		rp->mbox = NULL;
 4265	}
 4266	if (rp->rcr) {
 4267		np->ops->free_coherent(np->device,
 4268				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4269				       rp->rcr, rp->rcr_dma);
 4270		rp->rcr = NULL;
 4271		rp->rcr_table_size = 0;
 4272		rp->rcr_index = 0;
 4273	}
 4274	if (rp->rbr) {
 4275		niu_rbr_free(np, rp);
 4276
 4277		np->ops->free_coherent(np->device,
 4278				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4279				       rp->rbr, rp->rbr_dma);
 4280		rp->rbr = NULL;
 4281		rp->rbr_table_size = 0;
 4282		rp->rbr_index = 0;
 4283	}
 4284	kfree(rp->rxhash);
 4285	rp->rxhash = NULL;
 4286}
 4287
 4288static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4289{
 4290	if (rp->mbox) {
 4291		np->ops->free_coherent(np->device,
 4292				       sizeof(struct txdma_mailbox),
 4293				       rp->mbox, rp->mbox_dma);
 4294		rp->mbox = NULL;
 4295	}
 4296	if (rp->descr) {
 4297		int i;
 4298
 4299		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4300			if (rp->tx_buffs[i].skb)
 4301				(void) release_tx_packet(np, rp, i);
 4302		}
 4303
 4304		np->ops->free_coherent(np->device,
 4305				       MAX_TX_RING_SIZE * sizeof(__le64),
 4306				       rp->descr, rp->descr_dma);
 4307		rp->descr = NULL;
 4308		rp->pending = 0;
 4309		rp->prod = 0;
 4310		rp->cons = 0;
 4311		rp->wrap_bit = 0;
 4312	}
 4313}
 4314
 4315static void niu_free_channels(struct niu *np)
 4316{
 4317	int i;
 4318
 4319	if (np->rx_rings) {
 4320		for (i = 0; i < np->num_rx_rings; i++) {
 4321			struct rx_ring_info *rp = &np->rx_rings[i];
 4322
 4323			niu_free_rx_ring_info(np, rp);
 4324		}
 4325		kfree(np->rx_rings);
 4326		np->rx_rings = NULL;
 4327		np->num_rx_rings = 0;
 4328	}
 4329
 4330	if (np->tx_rings) {
 4331		for (i = 0; i < np->num_tx_rings; i++) {
 4332			struct tx_ring_info *rp = &np->tx_rings[i];
 4333
 4334			niu_free_tx_ring_info(np, rp);
 4335		}
 4336		kfree(np->tx_rings);
 4337		np->tx_rings = NULL;
 4338		np->num_tx_rings = 0;
 4339	}
 4340}
 4341
 4342static int niu_alloc_rx_ring_info(struct niu *np,
 4343				  struct rx_ring_info *rp)
 4344{
 4345	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4346
 4347	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
 4348			     GFP_KERNEL);
 4349	if (!rp->rxhash)
 4350		return -ENOMEM;
 4351
 4352	rp->mbox = np->ops->alloc_coherent(np->device,
 4353					   sizeof(struct rxdma_mailbox),
 4354					   &rp->mbox_dma, GFP_KERNEL);
 4355	if (!rp->mbox)
 4356		return -ENOMEM;
 4357	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4358		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4359			   rp->mbox);
 4360		return -EINVAL;
 4361	}
 4362
 4363	rp->rcr = np->ops->alloc_coherent(np->device,
 4364					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4365					  &rp->rcr_dma, GFP_KERNEL);
 4366	if (!rp->rcr)
 4367		return -ENOMEM;
 4368	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4369		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4370			   rp->rcr);
 4371		return -EINVAL;
 4372	}
 4373	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4374	rp->rcr_index = 0;
 4375
 4376	rp->rbr = np->ops->alloc_coherent(np->device,
 4377					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4378					  &rp->rbr_dma, GFP_KERNEL);
 4379	if (!rp->rbr)
 4380		return -ENOMEM;
 4381	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4382		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4383			   rp->rbr);
 4384		return -EINVAL;
 4385	}
 4386	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4387	rp->rbr_index = 0;
 4388	rp->rbr_pending = 0;
 4389
 4390	return 0;
 4391}
 4392
 4393static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4394{
 4395	int mtu = np->dev->mtu;
 4396
 4397	/* These values are recommended by the HW designers for fair
 4398	 * utilization of DRR amongst the rings.
 4399	 */
 4400	rp->max_burst = mtu + 32;
 4401	if (rp->max_burst > 4096)
 4402		rp->max_burst = 4096;
 4403}
 4404
 4405static int niu_alloc_tx_ring_info(struct niu *np,
 4406				  struct tx_ring_info *rp)
 4407{
 4408	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4409
 4410	rp->mbox = np->ops->alloc_coherent(np->device,
 4411					   sizeof(struct txdma_mailbox),
 4412					   &rp->mbox_dma, GFP_KERNEL);
 4413	if (!rp->mbox)
 4414		return -ENOMEM;
 4415	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4416		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4417			   rp->mbox);
 4418		return -EINVAL;
 4419	}
 4420
 4421	rp->descr = np->ops->alloc_coherent(np->device,
 4422					    MAX_TX_RING_SIZE * sizeof(__le64),
 4423					    &rp->descr_dma, GFP_KERNEL);
 4424	if (!rp->descr)
 4425		return -ENOMEM;
 4426	if ((unsigned long)rp->descr & (64UL - 1)) {
 4427		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4428			   rp->descr);
 4429		return -EINVAL;
 4430	}
 4431
 4432	rp->pending = MAX_TX_RING_SIZE;
 4433	rp->prod = 0;
 4434	rp->cons = 0;
 4435	rp->wrap_bit = 0;
 4436
 4437	/* XXX make these configurable... XXX */
 4438	rp->mark_freq = rp->pending / 4;
 4439
 4440	niu_set_max_burst(np, rp);
 4441
 4442	return 0;
 4443}
 4444
 4445static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4446{
 4447	u16 bss;
 4448
 4449	bss = min(PAGE_SHIFT, 15);
 4450
 4451	rp->rbr_block_size = 1 << bss;
 4452	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4453
 4454	rp->rbr_sizes[0] = 256;
 4455	rp->rbr_sizes[1] = 1024;
 4456	if (np->dev->mtu > ETH_DATA_LEN) {
 4457		switch (PAGE_SIZE) {
 4458		case 4 * 1024:
 4459			rp->rbr_sizes[2] = 4096;
 4460			break;
 4461
 4462		default:
 4463			rp->rbr_sizes[2] = 8192;
 4464			break;
 4465		}
 4466	} else {
 4467		rp->rbr_sizes[2] = 2048;
 4468	}
 4469	rp->rbr_sizes[3] = rp->rbr_block_size;
 4470}
 4471
 4472static int niu_alloc_channels(struct niu *np)
 4473{
 4474	struct niu_parent *parent = np->parent;
 4475	int first_rx_channel, first_tx_channel;
 4476	int num_rx_rings, num_tx_rings;
 4477	struct rx_ring_info *rx_rings;
 4478	struct tx_ring_info *tx_rings;
 4479	int i, port, err;
 4480
 4481	port = np->port;
 4482	first_rx_channel = first_tx_channel = 0;
 4483	for (i = 0; i < port; i++) {
 4484		first_rx_channel += parent->rxchan_per_port[i];
 4485		first_tx_channel += parent->txchan_per_port[i];
 4486	}
 4487
 4488	num_rx_rings = parent->rxchan_per_port[port];
 4489	num_tx_rings = parent->txchan_per_port[port];
 4490
 4491	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4492			   GFP_KERNEL);
 4493	err = -ENOMEM;
 4494	if (!rx_rings)
 4495		goto out_err;
 4496
 4497	np->num_rx_rings = num_rx_rings;
 4498	smp_wmb();
 4499	np->rx_rings = rx_rings;
 4500
 4501	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4502
 4503	for (i = 0; i < np->num_rx_rings; i++) {
 4504		struct rx_ring_info *rp = &np->rx_rings[i];
 4505
 4506		rp->np = np;
 4507		rp->rx_channel = first_rx_channel + i;
 4508
 4509		err = niu_alloc_rx_ring_info(np, rp);
 4510		if (err)
 4511			goto out_err;
 4512
 4513		niu_size_rbr(np, rp);
 4514
 4515		/* XXX better defaults, configurable, etc... XXX */
 4516		rp->nonsyn_window = 64;
 4517		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4518		rp->syn_window = 64;
 4519		rp->syn_threshold = rp->rcr_table_size - 64;
 4520		rp->rcr_pkt_threshold = 16;
 4521		rp->rcr_timeout = 8;
 4522		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4523		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4524			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4525
 4526		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4527		if (err)
 4528			return err;
 4529	}
 4530
 4531	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4532			   GFP_KERNEL);
 4533	err = -ENOMEM;
 4534	if (!tx_rings)
 4535		goto out_err;
 4536
 4537	np->num_tx_rings = num_tx_rings;
 4538	smp_wmb();
 4539	np->tx_rings = tx_rings;
 4540
 4541	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4542
 4543	for (i = 0; i < np->num_tx_rings; i++) {
 4544		struct tx_ring_info *rp = &np->tx_rings[i];
 4545
 4546		rp->np = np;
 4547		rp->tx_channel = first_tx_channel + i;
 4548
 4549		err = niu_alloc_tx_ring_info(np, rp);
 4550		if (err)
 4551			goto out_err;
 4552	}
 4553
 4554	return 0;
 4555
 4556out_err:
 4557	niu_free_channels(np);
 4558	return err;
 4559}
 4560
 4561static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4562{
 4563	int limit = 1000;
 4564
 4565	while (--limit > 0) {
 4566		u64 val = nr64(TX_CS(channel));
 4567		if (val & TX_CS_SNG_STATE)
 4568			return 0;
 4569	}
 4570	return -ENODEV;
 4571}
 4572
 4573static int niu_tx_channel_stop(struct niu *np, int channel)
 4574{
 4575	u64 val = nr64(TX_CS(channel));
 4576
 4577	val |= TX_CS_STOP_N_GO;
 4578	nw64(TX_CS(channel), val);
 4579
 4580	return niu_tx_cs_sng_poll(np, channel);
 4581}
 4582
 4583static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4584{
 4585	int limit = 1000;
 4586
 4587	while (--limit > 0) {
 4588		u64 val = nr64(TX_CS(channel));
 4589		if (!(val & TX_CS_RST))
 4590			return 0;
 4591	}
 4592	return -ENODEV;
 4593}
 4594
 4595static int niu_tx_channel_reset(struct niu *np, int channel)
 4596{
 4597	u64 val = nr64(TX_CS(channel));
 4598	int err;
 4599
 4600	val |= TX_CS_RST;
 4601	nw64(TX_CS(channel), val);
 4602
 4603	err = niu_tx_cs_reset_poll(np, channel);
 4604	if (!err)
 4605		nw64(TX_RING_KICK(channel), 0);
 4606
 4607	return err;
 4608}
 4609
 4610static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4611{
 4612	u64 val;
 4613
 4614	nw64(TX_LOG_MASK1(channel), 0);
 4615	nw64(TX_LOG_VAL1(channel), 0);
 4616	nw64(TX_LOG_MASK2(channel), 0);
 4617	nw64(TX_LOG_VAL2(channel), 0);
 4618	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4619	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4620	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4621
 4622	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4623	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4624	nw64(TX_LOG_PAGE_VLD(channel), val);
 4625
 4626	/* XXX TXDMA 32bit mode? XXX */
 4627
 4628	return 0;
 4629}
 4630
 4631static void niu_txc_enable_port(struct niu *np, int on)
 4632{
 4633	unsigned long flags;
 4634	u64 val, mask;
 4635
 4636	niu_lock_parent(np, flags);
 4637	val = nr64(TXC_CONTROL);
 4638	mask = (u64)1 << np->port;
 4639	if (on) {
 4640		val |= TXC_CONTROL_ENABLE | mask;
 4641	} else {
 4642		val &= ~mask;
 4643		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4644			val &= ~TXC_CONTROL_ENABLE;
 4645	}
 4646	nw64(TXC_CONTROL, val);
 4647	niu_unlock_parent(np, flags);
 4648}
 4649
 4650static void niu_txc_set_imask(struct niu *np, u64 imask)
 4651{
 4652	unsigned long flags;
 4653	u64 val;
 4654
 4655	niu_lock_parent(np, flags);
 4656	val = nr64(TXC_INT_MASK);
 4657	val &= ~TXC_INT_MASK_VAL(np->port);
 4658	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4659	niu_unlock_parent(np, flags);
 4660}
 4661
 4662static void niu_txc_port_dma_enable(struct niu *np, int on)
 4663{
 4664	u64 val = 0;
 4665
 4666	if (on) {
 4667		int i;
 4668
 4669		for (i = 0; i < np->num_tx_rings; i++)
 4670			val |= (1 << np->tx_rings[i].tx_channel);
 4671	}
 4672	nw64(TXC_PORT_DMA(np->port), val);
 4673}
 4674
 4675static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4676{
 4677	int err, channel = rp->tx_channel;
 4678	u64 val, ring_len;
 4679
 4680	err = niu_tx_channel_stop(np, channel);
 4681	if (err)
 4682		return err;
 4683
 4684	err = niu_tx_channel_reset(np, channel);
 4685	if (err)
 4686		return err;
 4687
 4688	err = niu_tx_channel_lpage_init(np, channel);
 4689	if (err)
 4690		return err;
 4691
 4692	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4693	nw64(TX_ENT_MSK(channel), 0);
 4694
 4695	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4696			      TX_RNG_CFIG_STADDR)) {
 4697		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4698			   channel, (unsigned long long)rp->descr_dma);
 4699		return -EINVAL;
 4700	}
 4701
 4702	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4703	 * blocks.  rp->pending is the number of TX descriptors in
 4704	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4705	 * to get the proper value the chip wants.
 4706	 */
 4707	ring_len = (rp->pending / 8);
 4708
 4709	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4710	       rp->descr_dma);
 4711	nw64(TX_RNG_CFIG(channel), val);
 4712
 4713	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4714	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4715		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4716			    channel, (unsigned long long)rp->mbox_dma);
 4717		return -EINVAL;
 4718	}
 4719	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4720	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4721
 4722	nw64(TX_CS(channel), 0);
 4723
 4724	rp->last_pkt_cnt = 0;
 4725
 4726	return 0;
 4727}
 4728
 4729static void niu_init_rdc_groups(struct niu *np)
 4730{
 4731	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4732	int i, first_table_num = tp->first_table_num;
 4733
 4734	for (i = 0; i < tp->num_tables; i++) {
 4735		struct rdc_table *tbl = &tp->tables[i];
 4736		int this_table = first_table_num + i;
 4737		int slot;
 4738
 4739		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4740			nw64(RDC_TBL(this_table, slot),
 4741			     tbl->rxdma_channel[slot]);
 4742	}
 4743
 4744	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4745}
 4746
 4747static void niu_init_drr_weight(struct niu *np)
 4748{
 4749	int type = phy_decode(np->parent->port_phy, np->port);
 4750	u64 val;
 4751
 4752	switch (type) {
 4753	case PORT_TYPE_10G:
 4754		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4755		break;
 4756
 4757	case PORT_TYPE_1G:
 4758	default:
 4759		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4760		break;
 4761	}
 4762	nw64(PT_DRR_WT(np->port), val);
 4763}
 4764
 4765static int niu_init_hostinfo(struct niu *np)
 4766{
 4767	struct niu_parent *parent = np->parent;
 4768	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4769	int i, err, num_alt = niu_num_alt_addr(np);
 4770	int first_rdc_table = tp->first_table_num;
 4771
 4772	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4773	if (err)
 4774		return err;
 4775
 4776	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4777	if (err)
 4778		return err;
 4779
 4780	for (i = 0; i < num_alt; i++) {
 4781		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4782		if (err)
 4783			return err;
 4784	}
 4785
 4786	return 0;
 4787}
 4788
 4789static int niu_rx_channel_reset(struct niu *np, int channel)
 4790{
 4791	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4792				      RXDMA_CFIG1_RST, 1000, 10,
 4793				      "RXDMA_CFIG1");
 4794}
 4795
 4796static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4797{
 4798	u64 val;
 4799
 4800	nw64(RX_LOG_MASK1(channel), 0);
 4801	nw64(RX_LOG_VAL1(channel), 0);
 4802	nw64(RX_LOG_MASK2(channel), 0);
 4803	nw64(RX_LOG_VAL2(channel), 0);
 4804	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4805	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4806	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4807
 4808	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4809	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4810	nw64(RX_LOG_PAGE_VLD(channel), val);
 4811
 4812	return 0;
 4813}
 4814
 4815static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4816{
 4817	u64 val;
 4818
 4819	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4820	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4821	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4822	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4823	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4824}
 4825
 4826static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4827{
 4828	u64 val = 0;
 4829
 4830	*ret = 0;
 4831	switch (rp->rbr_block_size) {
 4832	case 4 * 1024:
 4833		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4834		break;
 4835	case 8 * 1024:
 4836		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4837		break;
 4838	case 16 * 1024:
 4839		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4840		break;
 4841	case 32 * 1024:
 4842		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4843		break;
 4844	default:
 4845		return -EINVAL;
 4846	}
 4847	val |= RBR_CFIG_B_VLD2;
 4848	switch (rp->rbr_sizes[2]) {
 4849	case 2 * 1024:
 4850		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4851		break;
 4852	case 4 * 1024:
 4853		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4854		break;
 4855	case 8 * 1024:
 4856		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4857		break;
 4858	case 16 * 1024:
 4859		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4860		break;
 4861
 4862	default:
 4863		return -EINVAL;
 4864	}
 4865	val |= RBR_CFIG_B_VLD1;
 4866	switch (rp->rbr_sizes[1]) {
 4867	case 1 * 1024:
 4868		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4869		break;
 4870	case 2 * 1024:
 4871		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4872		break;
 4873	case 4 * 1024:
 4874		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4875		break;
 4876	case 8 * 1024:
 4877		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4878		break;
 4879
 4880	default:
 4881		return -EINVAL;
 4882	}
 4883	val |= RBR_CFIG_B_VLD0;
 4884	switch (rp->rbr_sizes[0]) {
 4885	case 256:
 4886		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4887		break;
 4888	case 512:
 4889		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4890		break;
 4891	case 1 * 1024:
 4892		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4893		break;
 4894	case 2 * 1024:
 4895		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4896		break;
 4897
 4898	default:
 4899		return -EINVAL;
 4900	}
 4901
 4902	*ret = val;
 4903	return 0;
 4904}
 4905
 4906static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4907{
 4908	u64 val = nr64(RXDMA_CFIG1(channel));
 4909	int limit;
 4910
 4911	if (on)
 4912		val |= RXDMA_CFIG1_EN;
 4913	else
 4914		val &= ~RXDMA_CFIG1_EN;
 4915	nw64(RXDMA_CFIG1(channel), val);
 4916
 4917	limit = 1000;
 4918	while (--limit > 0) {
 4919		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4920			break;
 4921		udelay(10);
 4922	}
 4923	if (limit <= 0)
 4924		return -ENODEV;
 4925	return 0;
 4926}
 4927
 4928static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4929{
 4930	int err, channel = rp->rx_channel;
 4931	u64 val;
 4932
 4933	err = niu_rx_channel_reset(np, channel);
 4934	if (err)
 4935		return err;
 4936
 4937	err = niu_rx_channel_lpage_init(np, channel);
 4938	if (err)
 4939		return err;
 4940
 4941	niu_rx_channel_wred_init(np, rp);
 4942
 4943	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4944	nw64(RX_DMA_CTL_STAT(channel),
 4945	     (RX_DMA_CTL_STAT_MEX |
 4946	      RX_DMA_CTL_STAT_RCRTHRES |
 4947	      RX_DMA_CTL_STAT_RCRTO |
 4948	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4949	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4950	nw64(RXDMA_CFIG2(channel),
 4951	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4952	      RXDMA_CFIG2_FULL_HDR));
 4953	nw64(RBR_CFIG_A(channel),
 4954	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4955	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4956	err = niu_compute_rbr_cfig_b(rp, &val);
 4957	if (err)
 4958		return err;
 4959	nw64(RBR_CFIG_B(channel), val);
 4960	nw64(RCRCFIG_A(channel),
 4961	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4962	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4963	nw64(RCRCFIG_B(channel),
 4964	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4965	     RCRCFIG_B_ENTOUT |
 4966	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4967
 4968	err = niu_enable_rx_channel(np, channel, 1);
 4969	if (err)
 4970		return err;
 4971
 4972	nw64(RBR_KICK(channel), rp->rbr_index);
 4973
 4974	val = nr64(RX_DMA_CTL_STAT(channel));
 4975	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4976	nw64(RX_DMA_CTL_STAT(channel), val);
 4977
 4978	return 0;
 4979}
 4980
 4981static int niu_init_rx_channels(struct niu *np)
 4982{
 4983	unsigned long flags;
 4984	u64 seed = jiffies_64;
 4985	int err, i;
 4986
 4987	niu_lock_parent(np, flags);
 4988	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 4989	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 4990	niu_unlock_parent(np, flags);
 4991
 4992	/* XXX RXDMA 32bit mode? XXX */
 4993
 4994	niu_init_rdc_groups(np);
 4995	niu_init_drr_weight(np);
 4996
 4997	err = niu_init_hostinfo(np);
 4998	if (err)
 4999		return err;
 5000
 5001	for (i = 0; i < np->num_rx_rings; i++) {
 5002		struct rx_ring_info *rp = &np->rx_rings[i];
 5003
 5004		err = niu_init_one_rx_channel(np, rp);
 5005		if (err)
 5006			return err;
 5007	}
 5008
 5009	return 0;
 5010}
 5011
 5012static int niu_set_ip_frag_rule(struct niu *np)
 5013{
 5014	struct niu_parent *parent = np->parent;
 5015	struct niu_classifier *cp = &np->clas;
 5016	struct niu_tcam_entry *tp;
 5017	int index, err;
 5018
 5019	index = cp->tcam_top;
 5020	tp = &parent->tcam[index];
 5021
 5022	/* Note that the noport bit is the same in both ipv4 and
 5023	 * ipv6 format TCAM entries.
 5024	 */
 5025	memset(tp, 0, sizeof(*tp));
 5026	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5027	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5028	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5029			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5030	err = tcam_write(np, index, tp->key, tp->key_mask);
 5031	if (err)
 5032		return err;
 5033	err = tcam_assoc_write(np, index, tp->assoc_data);
 5034	if (err)
 5035		return err;
 5036	tp->valid = 1;
 5037	cp->tcam_valid_entries++;
 5038
 5039	return 0;
 5040}
 5041
 5042static int niu_init_classifier_hw(struct niu *np)
 5043{
 5044	struct niu_parent *parent = np->parent;
 5045	struct niu_classifier *cp = &np->clas;
 5046	int i, err;
 5047
 5048	nw64(H1POLY, cp->h1_init);
 5049	nw64(H2POLY, cp->h2_init);
 5050
 5051	err = niu_init_hostinfo(np);
 5052	if (err)
 5053		return err;
 5054
 5055	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5056		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5057
 5058		vlan_tbl_write(np, i, np->port,
 5059			       vp->vlan_pref, vp->rdc_num);
 5060	}
 5061
 5062	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5063		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5064
 5065		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5066						ap->rdc_num, ap->mac_pref);
 5067		if (err)
 5068			return err;
 5069	}
 5070
 5071	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5072		int index = i - CLASS_CODE_USER_PROG1;
 5073
 5074		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5075		if (err)
 5076			return err;
 5077		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5078		if (err)
 5079			return err;
 5080	}
 5081
 5082	err = niu_set_ip_frag_rule(np);
 5083	if (err)
 5084		return err;
 5085
 5086	tcam_enable(np, 1);
 5087
 5088	return 0;
 5089}
 5090
 5091static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5092{
 5093	nw64(ZCP_RAM_DATA0, data[0]);
 5094	nw64(ZCP_RAM_DATA1, data[1]);
 5095	nw64(ZCP_RAM_DATA2, data[2]);
 5096	nw64(ZCP_RAM_DATA3, data[3]);
 5097	nw64(ZCP_RAM_DATA4, data[4]);
 5098	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5099	nw64(ZCP_RAM_ACC,
 5100	     (ZCP_RAM_ACC_WRITE |
 5101	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5102	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5103
 5104	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5105				   1000, 100);
 5106}
 5107
 5108static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5109{
 5110	int err;
 5111
 5112	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5113				  1000, 100);
 5114	if (err) {
 5115		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5116			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5117		return err;
 5118	}
 5119
 5120	nw64(ZCP_RAM_ACC,
 5121	     (ZCP_RAM_ACC_READ |
 5122	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5123	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5124
 5125	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5126				  1000, 100);
 5127	if (err) {
 5128		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5129			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5130		return err;
 5131	}
 5132
 5133	data[0] = nr64(ZCP_RAM_DATA0);
 5134	data[1] = nr64(ZCP_RAM_DATA1);
 5135	data[2] = nr64(ZCP_RAM_DATA2);
 5136	data[3] = nr64(ZCP_RAM_DATA3);
 5137	data[4] = nr64(ZCP_RAM_DATA4);
 5138
 5139	return 0;
 5140}
 5141
 5142static void niu_zcp_cfifo_reset(struct niu *np)
 5143{
 5144	u64 val = nr64(RESET_CFIFO);
 5145
 5146	val |= RESET_CFIFO_RST(np->port);
 5147	nw64(RESET_CFIFO, val);
 5148	udelay(10);
 5149
 5150	val &= ~RESET_CFIFO_RST(np->port);
 5151	nw64(RESET_CFIFO, val);
 5152}
 5153
 5154static int niu_init_zcp(struct niu *np)
 5155{
 5156	u64 data[5], rbuf[5];
 5157	int i, max, err;
 5158
 5159	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5160		if (np->port == 0 || np->port == 1)
 5161			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5162		else
 5163			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5164	} else
 5165		max = NIU_CFIFO_ENTRIES;
 5166
 5167	data[0] = 0;
 5168	data[1] = 0;
 5169	data[2] = 0;
 5170	data[3] = 0;
 5171	data[4] = 0;
 5172
 5173	for (i = 0; i < max; i++) {
 5174		err = niu_zcp_write(np, i, data);
 5175		if (err)
 5176			return err;
 5177		err = niu_zcp_read(np, i, rbuf);
 5178		if (err)
 5179			return err;
 5180	}
 5181
 5182	niu_zcp_cfifo_reset(np);
 5183	nw64(CFIFO_ECC(np->port), 0);
 5184	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5185	(void) nr64(ZCP_INT_STAT);
 5186	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5187
 5188	return 0;
 5189}
 5190
 5191static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5192{
 5193	u64 val = nr64_ipp(IPP_CFIG);
 5194
 5195	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5196	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5197	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5198	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5199	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5200	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5201	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5202	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5203}
 5204
 5205static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5206{
 5207	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5208	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5209	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5210	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5211	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5212	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5213}
 5214
 5215static int niu_ipp_reset(struct niu *np)
 5216{
 5217	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5218					  1000, 100, "IPP_CFIG");
 5219}
 5220
 5221static int niu_init_ipp(struct niu *np)
 5222{
 5223	u64 data[5], rbuf[5], val;
 5224	int i, max, err;
 5225
 5226	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5227		if (np->port == 0 || np->port == 1)
 5228			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5229		else
 5230			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5231	} else
 5232		max = NIU_DFIFO_ENTRIES;
 5233
 5234	data[0] = 0;
 5235	data[1] = 0;
 5236	data[2] = 0;
 5237	data[3] = 0;
 5238	data[4] = 0;
 5239
 5240	for (i = 0; i < max; i++) {
 5241		niu_ipp_write(np, i, data);
 5242		niu_ipp_read(np, i, rbuf);
 5243	}
 5244
 5245	(void) nr64_ipp(IPP_INT_STAT);
 5246	(void) nr64_ipp(IPP_INT_STAT);
 5247
 5248	err = niu_ipp_reset(np);
 5249	if (err)
 5250		return err;
 5251
 5252	(void) nr64_ipp(IPP_PKT_DIS);
 5253	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5254	(void) nr64_ipp(IPP_ECC);
 5255
 5256	(void) nr64_ipp(IPP_INT_STAT);
 5257
 5258	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5259
 5260	val = nr64_ipp(IPP_CFIG);
 5261	val &= ~IPP_CFIG_IP_MAX_PKT;
 5262	val |= (IPP_CFIG_IPP_ENABLE |
 5263		IPP_CFIG_DFIFO_ECC_EN |
 5264		IPP_CFIG_DROP_BAD_CRC |
 5265		IPP_CFIG_CKSUM_EN |
 5266		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5267	nw64_ipp(IPP_CFIG, val);
 5268
 5269	return 0;
 5270}
 5271
 5272static void niu_handle_led(struct niu *np, int status)
 5273{
 5274	u64 val;
 5275	val = nr64_mac(XMAC_CONFIG);
 5276
 5277	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5278	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5279		if (status) {
 5280			val |= XMAC_CONFIG_LED_POLARITY;
 5281			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5282		} else {
 5283			val |= XMAC_CONFIG_FORCE_LED_ON;
 5284			val &= ~XMAC_CONFIG_LED_POLARITY;
 5285		}
 5286	}
 5287
 5288	nw64_mac(XMAC_CONFIG, val);
 5289}
 5290
 5291static void niu_init_xif_xmac(struct niu *np)
 5292{
 5293	struct niu_link_config *lp = &np->link_config;
 5294	u64 val;
 5295
 5296	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5297		val = nr64(MIF_CONFIG);
 5298		val |= MIF_CONFIG_ATCA_GE;
 5299		nw64(MIF_CONFIG, val);
 5300	}
 5301
 5302	val = nr64_mac(XMAC_CONFIG);
 5303	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5304
 5305	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5306
 5307	if (lp->loopback_mode == LOOPBACK_MAC) {
 5308		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5309		val |= XMAC_CONFIG_LOOPBACK;
 5310	} else {
 5311		val &= ~XMAC_CONFIG_LOOPBACK;
 5312	}
 5313
 5314	if (np->flags & NIU_FLAGS_10G) {
 5315		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5316	} else {
 5317		val |= XMAC_CONFIG_LFS_DISABLE;
 5318		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5319		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5320			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5321		else
 5322			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5323	}
 5324
 5325	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5326
 5327	if (lp->active_speed == SPEED_100)
 5328		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5329	else
 5330		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5331
 5332	nw64_mac(XMAC_CONFIG, val);
 5333
 5334	val = nr64_mac(XMAC_CONFIG);
 5335	val &= ~XMAC_CONFIG_MODE_MASK;
 5336	if (np->flags & NIU_FLAGS_10G) {
 5337		val |= XMAC_CONFIG_MODE_XGMII;
 5338	} else {
 5339		if (lp->active_speed == SPEED_1000)
 5340			val |= XMAC_CONFIG_MODE_GMII;
 5341		else
 5342			val |= XMAC_CONFIG_MODE_MII;
 5343	}
 5344
 5345	nw64_mac(XMAC_CONFIG, val);
 5346}
 5347
 5348static void niu_init_xif_bmac(struct niu *np)
 5349{
 5350	struct niu_link_config *lp = &np->link_config;
 5351	u64 val;
 5352
 5353	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5354
 5355	if (lp->loopback_mode == LOOPBACK_MAC)
 5356		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5357	else
 5358		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5359
 5360	if (lp->active_speed == SPEED_1000)
 5361		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5362	else
 5363		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5364
 5365	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5366		 BMAC_XIF_CONFIG_LED_POLARITY);
 5367
 5368	if (!(np->flags & NIU_FLAGS_10G) &&
 5369	    !(np->flags & NIU_FLAGS_FIBER) &&
 5370	    lp->active_speed == SPEED_100)
 5371		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5372	else
 5373		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5374
 5375	nw64_mac(BMAC_XIF_CONFIG, val);
 5376}
 5377
 5378static void niu_init_xif(struct niu *np)
 5379{
 5380	if (np->flags & NIU_FLAGS_XMAC)
 5381		niu_init_xif_xmac(np);
 5382	else
 5383		niu_init_xif_bmac(np);
 5384}
 5385
 5386static void niu_pcs_mii_reset(struct niu *np)
 5387{
 5388	int limit = 1000;
 5389	u64 val = nr64_pcs(PCS_MII_CTL);
 5390	val |= PCS_MII_CTL_RST;
 5391	nw64_pcs(PCS_MII_CTL, val);
 5392	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5393		udelay(100);
 5394		val = nr64_pcs(PCS_MII_CTL);
 5395	}
 5396}
 5397
 5398static void niu_xpcs_reset(struct niu *np)
 5399{
 5400	int limit = 1000;
 5401	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5402	val |= XPCS_CONTROL1_RESET;
 5403	nw64_xpcs(XPCS_CONTROL1, val);
 5404	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5405		udelay(100);
 5406		val = nr64_xpcs(XPCS_CONTROL1);
 5407	}
 5408}
 5409
 5410static int niu_init_pcs(struct niu *np)
 5411{
 5412	struct niu_link_config *lp = &np->link_config;
 5413	u64 val;
 5414
 5415	switch (np->flags & (NIU_FLAGS_10G |
 5416			     NIU_FLAGS_FIBER |
 5417			     NIU_FLAGS_XCVR_SERDES)) {
 5418	case NIU_FLAGS_FIBER:
 5419		/* 1G fiber */
 5420		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5421		nw64_pcs(PCS_DPATH_MODE, 0);
 5422		niu_pcs_mii_reset(np);
 5423		break;
 5424
 5425	case NIU_FLAGS_10G:
 5426	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5427	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5428		/* 10G SERDES */
 5429		if (!(np->flags & NIU_FLAGS_XMAC))
 5430			return -EINVAL;
 5431
 5432		/* 10G copper or fiber */
 5433		val = nr64_mac(XMAC_CONFIG);
 5434		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5435		nw64_mac(XMAC_CONFIG, val);
 5436
 5437		niu_xpcs_reset(np);
 5438
 5439		val = nr64_xpcs(XPCS_CONTROL1);
 5440		if (lp->loopback_mode == LOOPBACK_PHY)
 5441			val |= XPCS_CONTROL1_LOOPBACK;
 5442		else
 5443			val &= ~XPCS_CONTROL1_LOOPBACK;
 5444		nw64_xpcs(XPCS_CONTROL1, val);
 5445
 5446		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5447		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5448		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5449		break;
 5450
 5451
 5452	case NIU_FLAGS_XCVR_SERDES:
 5453		/* 1G SERDES */
 5454		niu_pcs_mii_reset(np);
 5455		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5456		nw64_pcs(PCS_DPATH_MODE, 0);
 5457		break;
 5458
 5459	case 0:
 5460		/* 1G copper */
 5461	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5462		/* 1G RGMII FIBER */
 5463		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5464		niu_pcs_mii_reset(np);
 5465		break;
 5466
 5467	default:
 5468		return -EINVAL;
 5469	}
 5470
 5471	return 0;
 5472}
 5473
 5474static int niu_reset_tx_xmac(struct niu *np)
 5475{
 5476	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5477					  (XTXMAC_SW_RST_REG_RS |
 5478					   XTXMAC_SW_RST_SOFT_RST),
 5479					  1000, 100, "XTXMAC_SW_RST");
 5480}
 5481
 5482static int niu_reset_tx_bmac(struct niu *np)
 5483{
 5484	int limit;
 5485
 5486	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5487	limit = 1000;
 5488	while (--limit >= 0) {
 5489		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5490			break;
 5491		udelay(100);
 5492	}
 5493	if (limit < 0) {
 5494		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5495			np->port,
 5496			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5497		return -ENODEV;
 5498	}
 5499
 5500	return 0;
 5501}
 5502
 5503static int niu_reset_tx_mac(struct niu *np)
 5504{
 5505	if (np->flags & NIU_FLAGS_XMAC)
 5506		return niu_reset_tx_xmac(np);
 5507	else
 5508		return niu_reset_tx_bmac(np);
 5509}
 5510
 5511static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5512{
 5513	u64 val;
 5514
 5515	val = nr64_mac(XMAC_MIN);
 5516	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5517		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5518	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5519	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5520	nw64_mac(XMAC_MIN, val);
 5521
 5522	nw64_mac(XMAC_MAX, max);
 5523
 5524	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5525
 5526	val = nr64_mac(XMAC_IPG);
 5527	if (np->flags & NIU_FLAGS_10G) {
 5528		val &= ~XMAC_IPG_IPG_XGMII;
 5529		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5530	} else {
 5531		val &= ~XMAC_IPG_IPG_MII_GMII;
 5532		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5533	}
 5534	nw64_mac(XMAC_IPG, val);
 5535
 5536	val = nr64_mac(XMAC_CONFIG);
 5537	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5538		 XMAC_CONFIG_STRETCH_MODE |
 5539		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5540		 XMAC_CONFIG_TX_ENABLE);
 5541	nw64_mac(XMAC_CONFIG, val);
 5542
 5543	nw64_mac(TXMAC_FRM_CNT, 0);
 5544	nw64_mac(TXMAC_BYTE_CNT, 0);
 5545}
 5546
 5547static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5548{
 5549	u64 val;
 5550
 5551	nw64_mac(BMAC_MIN_FRAME, min);
 5552	nw64_mac(BMAC_MAX_FRAME, max);
 5553
 5554	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5555	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5556	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5557
 5558	val = nr64_mac(BTXMAC_CONFIG);
 5559	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5560		 BTXMAC_CONFIG_ENABLE);
 5561	nw64_mac(BTXMAC_CONFIG, val);
 5562}
 5563
 5564static void niu_init_tx_mac(struct niu *np)
 5565{
 5566	u64 min, max;
 5567
 5568	min = 64;
 5569	if (np->dev->mtu > ETH_DATA_LEN)
 5570		max = 9216;
 5571	else
 5572		max = 1522;
 5573
 5574	/* The XMAC_MIN register only accepts values for TX min which
 5575	 * have the low 3 bits cleared.
 5576	 */
 5577	BUG_ON(min & 0x7);
 5578
 5579	if (np->flags & NIU_FLAGS_XMAC)
 5580		niu_init_tx_xmac(np, min, max);
 5581	else
 5582		niu_init_tx_bmac(np, min, max);
 5583}
 5584
 5585static int niu_reset_rx_xmac(struct niu *np)
 5586{
 5587	int limit;
 5588
 5589	nw64_mac(XRXMAC_SW_RST,
 5590		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5591	limit = 1000;
 5592	while (--limit >= 0) {
 5593		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5594						 XRXMAC_SW_RST_SOFT_RST)))
 5595			break;
 5596		udelay(100);
 5597	}
 5598	if (limit < 0) {
 5599		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5600			np->port,
 5601			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5602		return -ENODEV;
 5603	}
 5604
 5605	return 0;
 5606}
 5607
 5608static int niu_reset_rx_bmac(struct niu *np)
 5609{
 5610	int limit;
 5611
 5612	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5613	limit = 1000;
 5614	while (--limit >= 0) {
 5615		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5616			break;
 5617		udelay(100);
 5618	}
 5619	if (limit < 0) {
 5620		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5621			np->port,
 5622			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5623		return -ENODEV;
 5624	}
 5625
 5626	return 0;
 5627}
 5628
 5629static int niu_reset_rx_mac(struct niu *np)
 5630{
 5631	if (np->flags & NIU_FLAGS_XMAC)
 5632		return niu_reset_rx_xmac(np);
 5633	else
 5634		return niu_reset_rx_bmac(np);
 5635}
 5636
 5637static void niu_init_rx_xmac(struct niu *np)
 5638{
 5639	struct niu_parent *parent = np->parent;
 5640	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5641	int first_rdc_table = tp->first_table_num;
 5642	unsigned long i;
 5643	u64 val;
 5644
 5645	nw64_mac(XMAC_ADD_FILT0, 0);
 5646	nw64_mac(XMAC_ADD_FILT1, 0);
 5647	nw64_mac(XMAC_ADD_FILT2, 0);
 5648	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5649	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5650	for (i = 0; i < MAC_NUM_HASH; i++)
 5651		nw64_mac(XMAC_HASH_TBL(i), 0);
 5652	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5653	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5654	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5655
 5656	val = nr64_mac(XMAC_CONFIG);
 5657	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5658		 XMAC_CONFIG_PROMISCUOUS |
 5659		 XMAC_CONFIG_PROMISC_GROUP |
 5660		 XMAC_CONFIG_ERR_CHK_DIS |
 5661		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5662		 XMAC_CONFIG_RESERVED_MULTICAST |
 5663		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5664		 XMAC_CONFIG_ADDR_FILTER_EN |
 5665		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5666		 XMAC_CONFIG_STRIP_CRC |
 5667		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5668		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5669	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5670	nw64_mac(XMAC_CONFIG, val);
 5671
 5672	nw64_mac(RXMAC_BT_CNT, 0);
 5673	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5674	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5675	nw64_mac(RXMAC_FRAG_CNT, 0);
 5676	nw64_mac(RXMAC_HIST_CNT1, 0);
 5677	nw64_mac(RXMAC_HIST_CNT2, 0);
 5678	nw64_mac(RXMAC_HIST_CNT3, 0);
 5679	nw64_mac(RXMAC_HIST_CNT4, 0);
 5680	nw64_mac(RXMAC_HIST_CNT5, 0);
 5681	nw64_mac(RXMAC_HIST_CNT6, 0);
 5682	nw64_mac(RXMAC_HIST_CNT7, 0);
 5683	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5684	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5685	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5686	nw64_mac(LINK_FAULT_CNT, 0);
 5687}
 5688
 5689static void niu_init_rx_bmac(struct niu *np)
 5690{
 5691	struct niu_parent *parent = np->parent;
 5692	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5693	int first_rdc_table = tp->first_table_num;
 5694	unsigned long i;
 5695	u64 val;
 5696
 5697	nw64_mac(BMAC_ADD_FILT0, 0);
 5698	nw64_mac(BMAC_ADD_FILT1, 0);
 5699	nw64_mac(BMAC_ADD_FILT2, 0);
 5700	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5701	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5702	for (i = 0; i < MAC_NUM_HASH; i++)
 5703		nw64_mac(BMAC_HASH_TBL(i), 0);
 5704	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5705	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5706	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5707
 5708	val = nr64_mac(BRXMAC_CONFIG);
 5709	val &= ~(BRXMAC_CONFIG_ENABLE |
 5710		 BRXMAC_CONFIG_STRIP_PAD |
 5711		 BRXMAC_CONFIG_STRIP_FCS |
 5712		 BRXMAC_CONFIG_PROMISC |
 5713		 BRXMAC_CONFIG_PROMISC_GRP |
 5714		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5715		 BRXMAC_CONFIG_DISCARD_DIS);
 5716	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5717	nw64_mac(BRXMAC_CONFIG, val);
 5718
 5719	val = nr64_mac(BMAC_ADDR_CMPEN);
 5720	val |= BMAC_ADDR_CMPEN_EN0;
 5721	nw64_mac(BMAC_ADDR_CMPEN, val);
 5722}
 5723
 5724static void niu_init_rx_mac(struct niu *np)
 5725{
 5726	niu_set_primary_mac(np, np->dev->dev_addr);
 5727
 5728	if (np->flags & NIU_FLAGS_XMAC)
 5729		niu_init_rx_xmac(np);
 5730	else
 5731		niu_init_rx_bmac(np);
 5732}
 5733
 5734static void niu_enable_tx_xmac(struct niu *np, int on)
 5735{
 5736	u64 val = nr64_mac(XMAC_CONFIG);
 5737
 5738	if (on)
 5739		val |= XMAC_CONFIG_TX_ENABLE;
 5740	else
 5741		val &= ~XMAC_CONFIG_TX_ENABLE;
 5742	nw64_mac(XMAC_CONFIG, val);
 5743}
 5744
 5745static void niu_enable_tx_bmac(struct niu *np, int on)
 5746{
 5747	u64 val = nr64_mac(BTXMAC_CONFIG);
 5748
 5749	if (on)
 5750		val |= BTXMAC_CONFIG_ENABLE;
 5751	else
 5752		val &= ~BTXMAC_CONFIG_ENABLE;
 5753	nw64_mac(BTXMAC_CONFIG, val);
 5754}
 5755
 5756static void niu_enable_tx_mac(struct niu *np, int on)
 5757{
 5758	if (np->flags & NIU_FLAGS_XMAC)
 5759		niu_enable_tx_xmac(np, on);
 5760	else
 5761		niu_enable_tx_bmac(np, on);
 5762}
 5763
 5764static void niu_enable_rx_xmac(struct niu *np, int on)
 5765{
 5766	u64 val = nr64_mac(XMAC_CONFIG);
 5767
 5768	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5769		 XMAC_CONFIG_PROMISCUOUS);
 5770
 5771	if (np->flags & NIU_FLAGS_MCAST)
 5772		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5773	if (np->flags & NIU_FLAGS_PROMISC)
 5774		val |= XMAC_CONFIG_PROMISCUOUS;
 5775
 5776	if (on)
 5777		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5778	else
 5779		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5780	nw64_mac(XMAC_CONFIG, val);
 5781}
 5782
 5783static void niu_enable_rx_bmac(struct niu *np, int on)
 5784{
 5785	u64 val = nr64_mac(BRXMAC_CONFIG);
 5786
 5787	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5788		 BRXMAC_CONFIG_PROMISC);
 5789
 5790	if (np->flags & NIU_FLAGS_MCAST)
 5791		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5792	if (np->flags & NIU_FLAGS_PROMISC)
 5793		val |= BRXMAC_CONFIG_PROMISC;
 5794
 5795	if (on)
 5796		val |= BRXMAC_CONFIG_ENABLE;
 5797	else
 5798		val &= ~BRXMAC_CONFIG_ENABLE;
 5799	nw64_mac(BRXMAC_CONFIG, val);
 5800}
 5801
 5802static void niu_enable_rx_mac(struct niu *np, int on)
 5803{
 5804	if (np->flags & NIU_FLAGS_XMAC)
 5805		niu_enable_rx_xmac(np, on);
 5806	else
 5807		niu_enable_rx_bmac(np, on);
 5808}
 5809
 5810static int niu_init_mac(struct niu *np)
 5811{
 5812	int err;
 5813
 5814	niu_init_xif(np);
 5815	err = niu_init_pcs(np);
 5816	if (err)
 5817		return err;
 5818
 5819	err = niu_reset_tx_mac(np);
 5820	if (err)
 5821		return err;
 5822	niu_init_tx_mac(np);
 5823	err = niu_reset_rx_mac(np);
 5824	if (err)
 5825		return err;
 5826	niu_init_rx_mac(np);
 5827
 5828	/* This looks hookey but the RX MAC reset we just did will
 5829	 * undo some of the state we setup in niu_init_tx_mac() so we
 5830	 * have to call it again.  In particular, the RX MAC reset will
 5831	 * set the XMAC_MAX register back to it's default value.
 5832	 */
 5833	niu_init_tx_mac(np);
 5834	niu_enable_tx_mac(np, 1);
 5835
 5836	niu_enable_rx_mac(np, 1);
 5837
 5838	return 0;
 5839}
 5840
 5841static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5842{
 5843	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5844}
 5845
 5846static void niu_stop_tx_channels(struct niu *np)
 5847{
 5848	int i;
 5849
 5850	for (i = 0; i < np->num_tx_rings; i++) {
 5851		struct tx_ring_info *rp = &np->tx_rings[i];
 5852
 5853		niu_stop_one_tx_channel(np, rp);
 5854	}
 5855}
 5856
 5857static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5858{
 5859	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5860}
 5861
 5862static void niu_reset_tx_channels(struct niu *np)
 5863{
 5864	int i;
 5865
 5866	for (i = 0; i < np->num_tx_rings; i++) {
 5867		struct tx_ring_info *rp = &np->tx_rings[i];
 5868
 5869		niu_reset_one_tx_channel(np, rp);
 5870	}
 5871}
 5872
 5873static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5874{
 5875	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5876}
 5877
 5878static void niu_stop_rx_channels(struct niu *np)
 5879{
 5880	int i;
 5881
 5882	for (i = 0; i < np->num_rx_rings; i++) {
 5883		struct rx_ring_info *rp = &np->rx_rings[i];
 5884
 5885		niu_stop_one_rx_channel(np, rp);
 5886	}
 5887}
 5888
 5889static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5890{
 5891	int channel = rp->rx_channel;
 5892
 5893	(void) niu_rx_channel_reset(np, channel);
 5894	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5895	nw64(RX_DMA_CTL_STAT(channel), 0);
 5896	(void) niu_enable_rx_channel(np, channel, 0);
 5897}
 5898
 5899static void niu_reset_rx_channels(struct niu *np)
 5900{
 5901	int i;
 5902
 5903	for (i = 0; i < np->num_rx_rings; i++) {
 5904		struct rx_ring_info *rp = &np->rx_rings[i];
 5905
 5906		niu_reset_one_rx_channel(np, rp);
 5907	}
 5908}
 5909
 5910static void niu_disable_ipp(struct niu *np)
 5911{
 5912	u64 rd, wr, val;
 5913	int limit;
 5914
 5915	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5916	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5917	limit = 100;
 5918	while (--limit >= 0 && (rd != wr)) {
 5919		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5920		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5921	}
 5922	if (limit < 0 &&
 5923	    (rd != 0 && wr != 1)) {
 5924		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5925			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5926			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5927	}
 5928
 5929	val = nr64_ipp(IPP_CFIG);
 5930	val &= ~(IPP_CFIG_IPP_ENABLE |
 5931		 IPP_CFIG_DFIFO_ECC_EN |
 5932		 IPP_CFIG_DROP_BAD_CRC |
 5933		 IPP_CFIG_CKSUM_EN);
 5934	nw64_ipp(IPP_CFIG, val);
 5935
 5936	(void) niu_ipp_reset(np);
 5937}
 5938
 5939static int niu_init_hw(struct niu *np)
 5940{
 5941	int i, err;
 5942
 5943	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5944	niu_txc_enable_port(np, 1);
 5945	niu_txc_port_dma_enable(np, 1);
 5946	niu_txc_set_imask(np, 0);
 5947
 5948	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5949	for (i = 0; i < np->num_tx_rings; i++) {
 5950		struct tx_ring_info *rp = &np->tx_rings[i];
 5951
 5952		err = niu_init_one_tx_channel(np, rp);
 5953		if (err)
 5954			return err;
 5955	}
 5956
 5957	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5958	err = niu_init_rx_channels(np);
 5959	if (err)
 5960		goto out_uninit_tx_channels;
 5961
 5962	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5963	err = niu_init_classifier_hw(np);
 5964	if (err)
 5965		goto out_uninit_rx_channels;
 5966
 5967	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5968	err = niu_init_zcp(np);
 5969	if (err)
 5970		goto out_uninit_rx_channels;
 5971
 5972	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5973	err = niu_init_ipp(np);
 5974	if (err)
 5975		goto out_uninit_rx_channels;
 5976
 5977	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5978	err = niu_init_mac(np);
 5979	if (err)
 5980		goto out_uninit_ipp;
 5981
 5982	return 0;
 5983
 5984out_uninit_ipp:
 5985	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5986	niu_disable_ipp(np);
 5987
 5988out_uninit_rx_channels:
 5989	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 5990	niu_stop_rx_channels(np);
 5991	niu_reset_rx_channels(np);
 5992
 5993out_uninit_tx_channels:
 5994	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 5995	niu_stop_tx_channels(np);
 5996	niu_reset_tx_channels(np);
 5997
 5998	return err;
 5999}
 6000
 6001static void niu_stop_hw(struct niu *np)
 6002{
 6003	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 6004	niu_enable_interrupts(np, 0);
 6005
 6006	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 6007	niu_enable_rx_mac(np, 0);
 6008
 6009	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 6010	niu_disable_ipp(np);
 6011
 6012	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 6013	niu_stop_tx_channels(np);
 6014
 6015	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 6016	niu_stop_rx_channels(np);
 6017
 6018	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 6019	niu_reset_tx_channels(np);
 6020
 6021	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6022	niu_reset_rx_channels(np);
 6023}
 6024
 6025static void niu_set_irq_name(struct niu *np)
 6026{
 6027	int port = np->port;
 6028	int i, j = 1;
 6029
 6030	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6031
 6032	if (port == 0) {
 6033		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6034		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6035		j = 3;
 6036	}
 6037
 6038	for (i = 0; i < np->num_ldg - j; i++) {
 6039		if (i < np->num_rx_rings)
 6040			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6041				np->dev->name, i);
 6042		else if (i < np->num_tx_rings + np->num_rx_rings)
 6043			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6044				i - np->num_rx_rings);
 6045	}
 6046}
 6047
 6048static int niu_request_irq(struct niu *np)
 6049{
 6050	int i, j, err;
 6051
 6052	niu_set_irq_name(np);
 6053
 6054	err = 0;
 6055	for (i = 0; i < np->num_ldg; i++) {
 6056		struct niu_ldg *lp = &np->ldg[i];
 6057
 6058		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6059				  np->irq_name[i], lp);
 6060		if (err)
 6061			goto out_free_irqs;
 6062
 6063	}
 6064
 6065	return 0;
 6066
 6067out_free_irqs:
 6068	for (j = 0; j < i; j++) {
 6069		struct niu_ldg *lp = &np->ldg[j];
 6070
 6071		free_irq(lp->irq, lp);
 6072	}
 6073	return err;
 6074}
 6075
 6076static void niu_free_irq(struct niu *np)
 6077{
 6078	int i;
 6079
 6080	for (i = 0; i < np->num_ldg; i++) {
 6081		struct niu_ldg *lp = &np->ldg[i];
 6082
 6083		free_irq(lp->irq, lp);
 6084	}
 6085}
 6086
 6087static void niu_enable_napi(struct niu *np)
 6088{
 6089	int i;
 6090
 6091	for (i = 0; i < np->num_ldg; i++)
 6092		napi_enable(&np->ldg[i].napi);
 6093}
 6094
 6095static void niu_disable_napi(struct niu *np)
 6096{
 6097	int i;
 6098
 6099	for (i = 0; i < np->num_ldg; i++)
 6100		napi_disable(&np->ldg[i].napi);
 6101}
 6102
 6103static int niu_open(struct net_device *dev)
 6104{
 6105	struct niu *np = netdev_priv(dev);
 6106	int err;
 6107
 6108	netif_carrier_off(dev);
 6109
 6110	err = niu_alloc_channels(np);
 6111	if (err)
 6112		goto out_err;
 6113
 6114	err = niu_enable_interrupts(np, 0);
 6115	if (err)
 6116		goto out_free_channels;
 6117
 6118	err = niu_request_irq(np);
 6119	if (err)
 6120		goto out_free_channels;
 6121
 6122	niu_enable_napi(np);
 6123
 6124	spin_lock_irq(&np->lock);
 6125
 6126	err = niu_init_hw(np);
 6127	if (!err) {
 6128		init_timer(&np->timer);
 6129		np->timer.expires = jiffies + HZ;
 6130		np->timer.data = (unsigned long) np;
 6131		np->timer.function = niu_timer;
 6132
 6133		err = niu_enable_interrupts(np, 1);
 6134		if (err)
 6135			niu_stop_hw(np);
 6136	}
 6137
 6138	spin_unlock_irq(&np->lock);
 6139
 6140	if (err) {
 6141		niu_disable_napi(np);
 6142		goto out_free_irq;
 6143	}
 6144
 6145	netif_tx_start_all_queues(dev);
 6146
 6147	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6148		netif_carrier_on(dev);
 6149
 6150	add_timer(&np->timer);
 6151
 6152	return 0;
 6153
 6154out_free_irq:
 6155	niu_free_irq(np);
 6156
 6157out_free_channels:
 6158	niu_free_channels(np);
 6159
 6160out_err:
 6161	return err;
 6162}
 6163
 6164static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6165{
 6166	cancel_work_sync(&np->reset_task);
 6167
 6168	niu_disable_napi(np);
 6169	netif_tx_stop_all_queues(dev);
 6170
 6171	del_timer_sync(&np->timer);
 6172
 6173	spin_lock_irq(&np->lock);
 6174
 6175	niu_stop_hw(np);
 6176
 6177	spin_unlock_irq(&np->lock);
 6178}
 6179
 6180static int niu_close(struct net_device *dev)
 6181{
 6182	struct niu *np = netdev_priv(dev);
 6183
 6184	niu_full_shutdown(np, dev);
 6185
 6186	niu_free_irq(np);
 6187
 6188	niu_free_channels(np);
 6189
 6190	niu_handle_led(np, 0);
 6191
 6192	return 0;
 6193}
 6194
 6195static void niu_sync_xmac_stats(struct niu *np)
 6196{
 6197	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6198
 6199	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6200	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6201
 6202	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6203	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6204	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6205	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6206	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6207	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6208	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6209	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6210	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6211	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6212	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6213	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6214	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6215	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6216	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6217	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6218}
 6219
 6220static void niu_sync_bmac_stats(struct niu *np)
 6221{
 6222	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6223
 6224	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6225	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6226
 6227	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6228	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6229	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6230	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6231}
 6232
 6233static void niu_sync_mac_stats(struct niu *np)
 6234{
 6235	if (np->flags & NIU_FLAGS_XMAC)
 6236		niu_sync_xmac_stats(np);
 6237	else
 6238		niu_sync_bmac_stats(np);
 6239}
 6240
 6241static void niu_get_rx_stats(struct niu *np,
 6242			     struct rtnl_link_stats64 *stats)
 6243{
 6244	u64 pkts, dropped, errors, bytes;
 6245	struct rx_ring_info *rx_rings;
 6246	int i;
 6247
 6248	pkts = dropped = errors = bytes = 0;
 6249
 6250	rx_rings = ACCESS_ONCE(np->rx_rings);
 6251	if (!rx_rings)
 6252		goto no_rings;
 6253
 6254	for (i = 0; i < np->num_rx_rings; i++) {
 6255		struct rx_ring_info *rp = &rx_rings[i];
 6256
 6257		niu_sync_rx_discard_stats(np, rp, 0);
 6258
 6259		pkts += rp->rx_packets;
 6260		bytes += rp->rx_bytes;
 6261		dropped += rp->rx_dropped;
 6262		errors += rp->rx_errors;
 6263	}
 6264
 6265no_rings:
 6266	stats->rx_packets = pkts;
 6267	stats->rx_bytes = bytes;
 6268	stats->rx_dropped = dropped;
 6269	stats->rx_errors = errors;
 6270}
 6271
 6272static void niu_get_tx_stats(struct niu *np,
 6273			     struct rtnl_link_stats64 *stats)
 6274{
 6275	u64 pkts, errors, bytes;
 6276	struct tx_ring_info *tx_rings;
 6277	int i;
 6278
 6279	pkts = errors = bytes = 0;
 6280
 6281	tx_rings = ACCESS_ONCE(np->tx_rings);
 6282	if (!tx_rings)
 6283		goto no_rings;
 6284
 6285	for (i = 0; i < np->num_tx_rings; i++) {
 6286		struct tx_ring_info *rp = &tx_rings[i];
 6287
 6288		pkts += rp->tx_packets;
 6289		bytes += rp->tx_bytes;
 6290		errors += rp->tx_errors;
 6291	}
 6292
 6293no_rings:
 6294	stats->tx_packets = pkts;
 6295	stats->tx_bytes = bytes;
 6296	stats->tx_errors = errors;
 6297}
 6298
 6299static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
 6300					       struct rtnl_link_stats64 *stats)
 6301{
 6302	struct niu *np = netdev_priv(dev);
 6303
 6304	if (netif_running(dev)) {
 6305		niu_get_rx_stats(np, stats);
 6306		niu_get_tx_stats(np, stats);
 6307	}
 6308
 6309	return stats;
 6310}
 6311
 6312static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6313{
 6314	int i;
 6315
 6316	for (i = 0; i < 16; i++)
 6317		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6318}
 6319
 6320static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6321{
 6322	int i;
 6323
 6324	for (i = 0; i < 16; i++)
 6325		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6326}
 6327
 6328static void niu_load_hash(struct niu *np, u16 *hash)
 6329{
 6330	if (np->flags & NIU_FLAGS_XMAC)
 6331		niu_load_hash_xmac(np, hash);
 6332	else
 6333		niu_load_hash_bmac(np, hash);
 6334}
 6335
 6336static void niu_set_rx_mode(struct net_device *dev)
 6337{
 6338	struct niu *np = netdev_priv(dev);
 6339	int i, alt_cnt, err;
 6340	struct netdev_hw_addr *ha;
 6341	unsigned long flags;
 6342	u16 hash[16] = { 0, };
 6343
 6344	spin_lock_irqsave(&np->lock, flags);
 6345	niu_enable_rx_mac(np, 0);
 6346
 6347	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6348	if (dev->flags & IFF_PROMISC)
 6349		np->flags |= NIU_FLAGS_PROMISC;
 6350	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6351		np->flags |= NIU_FLAGS_MCAST;
 6352
 6353	alt_cnt = netdev_uc_count(dev);
 6354	if (alt_cnt > niu_num_alt_addr(np)) {
 6355		alt_cnt = 0;
 6356		np->flags |= NIU_FLAGS_PROMISC;
 6357	}
 6358
 6359	if (alt_cnt) {
 6360		int index = 0;
 6361
 6362		netdev_for_each_uc_addr(ha, dev) {
 6363			err = niu_set_alt_mac(np, index, ha->addr);
 6364			if (err)
 6365				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6366					    err, index);
 6367			err = niu_enable_alt_mac(np, index, 1);
 6368			if (err)
 6369				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6370					    err, index);
 6371
 6372			index++;
 6373		}
 6374	} else {
 6375		int alt_start;
 6376		if (np->flags & NIU_FLAGS_XMAC)
 6377			alt_start = 0;
 6378		else
 6379			alt_start = 1;
 6380		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6381			err = niu_enable_alt_mac(np, i, 0);
 6382			if (err)
 6383				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6384					    err, i);
 6385		}
 6386	}
 6387	if (dev->flags & IFF_ALLMULTI) {
 6388		for (i = 0; i < 16; i++)
 6389			hash[i] = 0xffff;
 6390	} else if (!netdev_mc_empty(dev)) {
 6391		netdev_for_each_mc_addr(ha, dev) {
 6392			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6393
 6394			crc >>= 24;
 6395			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6396		}
 6397	}
 6398
 6399	if (np->flags & NIU_FLAGS_MCAST)
 6400		niu_load_hash(np, hash);
 6401
 6402	niu_enable_rx_mac(np, 1);
 6403	spin_unlock_irqrestore(&np->lock, flags);
 6404}
 6405
 6406static int niu_set_mac_addr(struct net_device *dev, void *p)
 6407{
 6408	struct niu *np = netdev_priv(dev);
 6409	struct sockaddr *addr = p;
 6410	unsigned long flags;
 6411
 6412	if (!is_valid_ether_addr(addr->sa_data))
 6413		return -EADDRNOTAVAIL;
 6414
 6415	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6416
 6417	if (!netif_running(dev))
 6418		return 0;
 6419
 6420	spin_lock_irqsave(&np->lock, flags);
 6421	niu_enable_rx_mac(np, 0);
 6422	niu_set_primary_mac(np, dev->dev_addr);
 6423	niu_enable_rx_mac(np, 1);
 6424	spin_unlock_irqrestore(&np->lock, flags);
 6425
 6426	return 0;
 6427}
 6428
 6429static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6430{
 6431	return -EOPNOTSUPP;
 6432}
 6433
 6434static void niu_netif_stop(struct niu *np)
 6435{
 6436	np->dev->trans_start = jiffies;	/* prevent tx timeout */
 6437
 6438	niu_disable_napi(np);
 6439
 6440	netif_tx_disable(np->dev);
 6441}
 6442
 6443static void niu_netif_start(struct niu *np)
 6444{
 6445	/* NOTE: unconditional netif_wake_queue is only appropriate
 6446	 * so long as all callers are assured to have free tx slots
 6447	 * (such as after niu_init_hw).
 6448	 */
 6449	netif_tx_wake_all_queues(np->dev);
 6450
 6451	niu_enable_napi(np);
 6452
 6453	niu_enable_interrupts(np, 1);
 6454}
 6455
 6456static void niu_reset_buffers(struct niu *np)
 6457{
 6458	int i, j, k, err;
 6459
 6460	if (np->rx_rings) {
 6461		for (i = 0; i < np->num_rx_rings; i++) {
 6462			struct rx_ring_info *rp = &np->rx_rings[i];
 6463
 6464			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6465				struct page *page;
 6466
 6467				page = rp->rxhash[j];
 6468				while (page) {
 6469					struct page *next =
 6470						(struct page *) page->mapping;
 6471					u64 base = page->index;
 6472					base = base >> RBR_DESCR_ADDR_SHIFT;
 6473					rp->rbr[k++] = cpu_to_le32(base);
 6474					page = next;
 6475				}
 6476			}
 6477			for (; k < MAX_RBR_RING_SIZE; k++) {
 6478				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6479				if (unlikely(err))
 6480					break;
 6481			}
 6482
 6483			rp->rbr_index = rp->rbr_table_size - 1;
 6484			rp->rcr_index = 0;
 6485			rp->rbr_pending = 0;
 6486			rp->rbr_refill_pending = 0;
 6487		}
 6488	}
 6489	if (np->tx_rings) {
 6490		for (i = 0; i < np->num_tx_rings; i++) {
 6491			struct tx_ring_info *rp = &np->tx_rings[i];
 6492
 6493			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6494				if (rp->tx_buffs[j].skb)
 6495					(void) release_tx_packet(np, rp, j);
 6496			}
 6497
 6498			rp->pending = MAX_TX_RING_SIZE;
 6499			rp->prod = 0;
 6500			rp->cons = 0;
 6501			rp->wrap_bit = 0;
 6502		}
 6503	}
 6504}
 6505
 6506static void niu_reset_task(struct work_struct *work)
 6507{
 6508	struct niu *np = container_of(work, struct niu, reset_task);
 6509	unsigned long flags;
 6510	int err;
 6511
 6512	spin_lock_irqsave(&np->lock, flags);
 6513	if (!netif_running(np->dev)) {
 6514		spin_unlock_irqrestore(&np->lock, flags);
 6515		return;
 6516	}
 6517
 6518	spin_unlock_irqrestore(&np->lock, flags);
 6519
 6520	del_timer_sync(&np->timer);
 6521
 6522	niu_netif_stop(np);
 6523
 6524	spin_lock_irqsave(&np->lock, flags);
 6525
 6526	niu_stop_hw(np);
 6527
 6528	spin_unlock_irqrestore(&np->lock, flags);
 6529
 6530	niu_reset_buffers(np);
 6531
 6532	spin_lock_irqsave(&np->lock, flags);
 6533
 6534	err = niu_init_hw(np);
 6535	if (!err) {
 6536		np->timer.expires = jiffies + HZ;
 6537		add_timer(&np->timer);
 6538		niu_netif_start(np);
 6539	}
 6540
 6541	spin_unlock_irqrestore(&np->lock, flags);
 6542}
 6543
 6544static void niu_tx_timeout(struct net_device *dev)
 6545{
 6546	struct niu *np = netdev_priv(dev);
 6547
 6548	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6549		dev->name);
 6550
 6551	schedule_work(&np->reset_task);
 6552}
 6553
 6554static void niu_set_txd(struct tx_ring_info *rp, int index,
 6555			u64 mapping, u64 len, u64 mark,
 6556			u64 n_frags)
 6557{
 6558	__le64 *desc = &rp->descr[index];
 6559
 6560	*desc = cpu_to_le64(mark |
 6561			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6562			    (len << TX_DESC_TR_LEN_SHIFT) |
 6563			    (mapping & TX_DESC_SAD));
 6564}
 6565
 6566static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6567				u64 pad_bytes, u64 len)
 6568{
 6569	u16 eth_proto, eth_proto_inner;
 6570	u64 csum_bits, l3off, ihl, ret;
 6571	u8 ip_proto;
 6572	int ipv6;
 6573
 6574	eth_proto = be16_to_cpu(ehdr->h_proto);
 6575	eth_proto_inner = eth_proto;
 6576	if (eth_proto == ETH_P_8021Q) {
 6577		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6578		__be16 val = vp->h_vlan_encapsulated_proto;
 6579
 6580		eth_proto_inner = be16_to_cpu(val);
 6581	}
 6582
 6583	ipv6 = ihl = 0;
 6584	switch (skb->protocol) {
 6585	case cpu_to_be16(ETH_P_IP):
 6586		ip_proto = ip_hdr(skb)->protocol;
 6587		ihl = ip_hdr(skb)->ihl;
 6588		break;
 6589	case cpu_to_be16(ETH_P_IPV6):
 6590		ip_proto = ipv6_hdr(skb)->nexthdr;
 6591		ihl = (40 >> 2);
 6592		ipv6 = 1;
 6593		break;
 6594	default:
 6595		ip_proto = ihl = 0;
 6596		break;
 6597	}
 6598
 6599	csum_bits = TXHDR_CSUM_NONE;
 6600	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6601		u64 start, stuff;
 6602
 6603		csum_bits = (ip_proto == IPPROTO_TCP ?
 6604			     TXHDR_CSUM_TCP :
 6605			     (ip_proto == IPPROTO_UDP ?
 6606			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6607
 6608		start = skb_checksum_start_offset(skb) -
 6609			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6610		stuff = start + skb->csum_offset;
 6611
 6612		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6613		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6614	}
 6615
 6616	l3off = skb_network_offset(skb) -
 6617		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6618
 6619	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6620	       (len << TXHDR_LEN_SHIFT) |
 6621	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6622	       (ihl << TXHDR_IHL_SHIFT) |
 6623	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
 6624	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6625	       (ipv6 ? TXHDR_IP_VER : 0) |
 6626	       csum_bits);
 6627
 6628	return ret;
 6629}
 6630
 6631static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6632				  struct net_device *dev)
 6633{
 6634	struct niu *np = netdev_priv(dev);
 6635	unsigned long align, headroom;
 6636	struct netdev_queue *txq;
 6637	struct tx_ring_info *rp;
 6638	struct tx_pkt_hdr *tp;
 6639	unsigned int len, nfg;
 6640	struct ethhdr *ehdr;
 6641	int prod, i, tlen;
 6642	u64 mapping, mrk;
 6643
 6644	i = skb_get_queue_mapping(skb);
 6645	rp = &np->tx_rings[i];
 6646	txq = netdev_get_tx_queue(dev, i);
 6647
 6648	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6649		netif_tx_stop_queue(txq);
 6650		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6651		rp->tx_errors++;
 6652		return NETDEV_TX_BUSY;
 6653	}
 6654
 6655	if (skb->len < ETH_ZLEN) {
 6656		unsigned int pad_bytes = ETH_ZLEN - skb->len;
 6657
 6658		if (skb_pad(skb, pad_bytes))
 6659			goto out;
 6660		skb_put(skb, pad_bytes);
 6661	}
 6662
 6663	len = sizeof(struct tx_pkt_hdr) + 15;
 6664	if (skb_headroom(skb) < len) {
 6665		struct sk_buff *skb_new;
 6666
 6667		skb_new = skb_realloc_headroom(skb, len);
 6668		if (!skb_new) {
 6669			rp->tx_errors++;
 6670			goto out_drop;
 6671		}
 6672		kfree_skb(skb);
 6673		skb = skb_new;
 6674	} else
 6675		skb_orphan(skb);
 6676
 6677	align = ((unsigned long) skb->data & (16 - 1));
 6678	headroom = align + sizeof(struct tx_pkt_hdr);
 6679
 6680	ehdr = (struct ethhdr *) skb->data;
 6681	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
 6682
 6683	len = skb->len - sizeof(struct tx_pkt_hdr);
 6684	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6685	tp->resv = 0;
 6686
 6687	len = skb_headlen(skb);
 6688	mapping = np->ops->map_single(np->device, skb->data,
 6689				      len, DMA_TO_DEVICE);
 6690
 6691	prod = rp->prod;
 6692
 6693	rp->tx_buffs[prod].skb = skb;
 6694	rp->tx_buffs[prod].mapping = mapping;
 6695
 6696	mrk = TX_DESC_SOP;
 6697	if (++rp->mark_counter == rp->mark_freq) {
 6698		rp->mark_counter = 0;
 6699		mrk |= TX_DESC_MARK;
 6700		rp->mark_pending++;
 6701	}
 6702
 6703	tlen = len;
 6704	nfg = skb_shinfo(skb)->nr_frags;
 6705	while (tlen > 0) {
 6706		tlen -= MAX_TX_DESC_LEN;
 6707		nfg++;
 6708	}
 6709
 6710	while (len > 0) {
 6711		unsigned int this_len = len;
 6712
 6713		if (this_len > MAX_TX_DESC_LEN)
 6714			this_len = MAX_TX_DESC_LEN;
 6715
 6716		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6717		mrk = nfg = 0;
 6718
 6719		prod = NEXT_TX(rp, prod);
 6720		mapping += this_len;
 6721		len -= this_len;
 6722	}
 6723
 6724	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6725		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6726
 6727		len = skb_frag_size(frag);
 6728		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
 6729					    frag->page_offset, len,
 6730					    DMA_TO_DEVICE);
 6731
 6732		rp->tx_buffs[prod].skb = NULL;
 6733		rp->tx_buffs[prod].mapping = mapping;
 6734
 6735		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6736
 6737		prod = NEXT_TX(rp, prod);
 6738	}
 6739
 6740	if (prod < rp->prod)
 6741		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6742	rp->prod = prod;
 6743
 6744	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6745
 6746	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6747		netif_tx_stop_queue(txq);
 6748		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6749			netif_tx_wake_queue(txq);
 6750	}
 6751
 6752out:
 6753	return NETDEV_TX_OK;
 6754
 6755out_drop:
 6756	rp->tx_errors++;
 6757	kfree_skb(skb);
 6758	goto out;
 6759}
 6760
 6761static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6762{
 6763	struct niu *np = netdev_priv(dev);
 6764	int err, orig_jumbo, new_jumbo;
 6765
 6766	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
 6767		return -EINVAL;
 6768
 6769	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6770	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6771
 6772	dev->mtu = new_mtu;
 6773
 6774	if (!netif_running(dev) ||
 6775	    (orig_jumbo == new_jumbo))
 6776		return 0;
 6777
 6778	niu_full_shutdown(np, dev);
 6779
 6780	niu_free_channels(np);
 6781
 6782	niu_enable_napi(np);
 6783
 6784	err = niu_alloc_channels(np);
 6785	if (err)
 6786		return err;
 6787
 6788	spin_lock_irq(&np->lock);
 6789
 6790	err = niu_init_hw(np);
 6791	if (!err) {
 6792		init_timer(&np->timer);
 6793		np->timer.expires = jiffies + HZ;
 6794		np->timer.data = (unsigned long) np;
 6795		np->timer.function = niu_timer;
 6796
 6797		err = niu_enable_interrupts(np, 1);
 6798		if (err)
 6799			niu_stop_hw(np);
 6800	}
 6801
 6802	spin_unlock_irq(&np->lock);
 6803
 6804	if (!err) {
 6805		netif_tx_start_all_queues(dev);
 6806		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6807			netif_carrier_on(dev);
 6808
 6809		add_timer(&np->timer);
 6810	}
 6811
 6812	return err;
 6813}
 6814
 6815static void niu_get_drvinfo(struct net_device *dev,
 6816			    struct ethtool_drvinfo *info)
 6817{
 6818	struct niu *np = netdev_priv(dev);
 6819	struct niu_vpd *vpd = &np->vpd;
 6820
 6821	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 6822	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 6823	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 6824		vpd->fcode_major, vpd->fcode_minor);
 6825	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6826		strlcpy(info->bus_info, pci_name(np->pdev),
 6827			sizeof(info->bus_info));
 6828}
 6829
 6830static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6831{
 6832	struct niu *np = netdev_priv(dev);
 6833	struct niu_link_config *lp;
 6834
 6835	lp = &np->link_config;
 6836
 6837	memset(cmd, 0, sizeof(*cmd));
 6838	cmd->phy_address = np->phy_addr;
 6839	cmd->supported = lp->supported;
 6840	cmd->advertising = lp->active_advertising;
 6841	cmd->autoneg = lp->active_autoneg;
 6842	ethtool_cmd_speed_set(cmd, lp->active_speed);
 6843	cmd->duplex = lp->active_duplex;
 6844	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6845	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
 6846		XCVR_EXTERNAL : XCVR_INTERNAL;
 6847
 6848	return 0;
 6849}
 6850
 6851static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6852{
 6853	struct niu *np = netdev_priv(dev);
 6854	struct niu_link_config *lp = &np->link_config;
 6855
 6856	lp->advertising = cmd->advertising;
 6857	lp->speed = ethtool_cmd_speed(cmd);
 6858	lp->duplex = cmd->duplex;
 6859	lp->autoneg = cmd->autoneg;
 6860	return niu_init_link(np);
 6861}
 6862
 6863static u32 niu_get_msglevel(struct net_device *dev)
 6864{
 6865	struct niu *np = netdev_priv(dev);
 6866	return np->msg_enable;
 6867}
 6868
 6869static void niu_set_msglevel(struct net_device *dev, u32 value)
 6870{
 6871	struct niu *np = netdev_priv(dev);
 6872	np->msg_enable = value;
 6873}
 6874
 6875static int niu_nway_reset(struct net_device *dev)
 6876{
 6877	struct niu *np = netdev_priv(dev);
 6878
 6879	if (np->link_config.autoneg)
 6880		return niu_init_link(np);
 6881
 6882	return 0;
 6883}
 6884
 6885static int niu_get_eeprom_len(struct net_device *dev)
 6886{
 6887	struct niu *np = netdev_priv(dev);
 6888
 6889	return np->eeprom_len;
 6890}
 6891
 6892static int niu_get_eeprom(struct net_device *dev,
 6893			  struct ethtool_eeprom *eeprom, u8 *data)
 6894{
 6895	struct niu *np = netdev_priv(dev);
 6896	u32 offset, len, val;
 6897
 6898	offset = eeprom->offset;
 6899	len = eeprom->len;
 6900
 6901	if (offset + len < offset)
 6902		return -EINVAL;
 6903	if (offset >= np->eeprom_len)
 6904		return -EINVAL;
 6905	if (offset + len > np->eeprom_len)
 6906		len = eeprom->len = np->eeprom_len - offset;
 6907
 6908	if (offset & 3) {
 6909		u32 b_offset, b_count;
 6910
 6911		b_offset = offset & 3;
 6912		b_count = 4 - b_offset;
 6913		if (b_count > len)
 6914			b_count = len;
 6915
 6916		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6917		memcpy(data, ((char *)&val) + b_offset, b_count);
 6918		data += b_count;
 6919		len -= b_count;
 6920		offset += b_count;
 6921	}
 6922	while (len >= 4) {
 6923		val = nr64(ESPC_NCR(offset / 4));
 6924		memcpy(data, &val, 4);
 6925		data += 4;
 6926		len -= 4;
 6927		offset += 4;
 6928	}
 6929	if (len) {
 6930		val = nr64(ESPC_NCR(offset / 4));
 6931		memcpy(data, &val, len);
 6932	}
 6933	return 0;
 6934}
 6935
 6936static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6937{
 6938	switch (flow_type) {
 6939	case TCP_V4_FLOW:
 6940	case TCP_V6_FLOW:
 6941		*pid = IPPROTO_TCP;
 6942		break;
 6943	case UDP_V4_FLOW:
 6944	case UDP_V6_FLOW:
 6945		*pid = IPPROTO_UDP;
 6946		break;
 6947	case SCTP_V4_FLOW:
 6948	case SCTP_V6_FLOW:
 6949		*pid = IPPROTO_SCTP;
 6950		break;
 6951	case AH_V4_FLOW:
 6952	case AH_V6_FLOW:
 6953		*pid = IPPROTO_AH;
 6954		break;
 6955	case ESP_V4_FLOW:
 6956	case ESP_V6_FLOW:
 6957		*pid = IPPROTO_ESP;
 6958		break;
 6959	default:
 6960		*pid = 0;
 6961		break;
 6962	}
 6963}
 6964
 6965static int niu_class_to_ethflow(u64 class, int *flow_type)
 6966{
 6967	switch (class) {
 6968	case CLASS_CODE_TCP_IPV4:
 6969		*flow_type = TCP_V4_FLOW;
 6970		break;
 6971	case CLASS_CODE_UDP_IPV4:
 6972		*flow_type = UDP_V4_FLOW;
 6973		break;
 6974	case CLASS_CODE_AH_ESP_IPV4:
 6975		*flow_type = AH_V4_FLOW;
 6976		break;
 6977	case CLASS_CODE_SCTP_IPV4:
 6978		*flow_type = SCTP_V4_FLOW;
 6979		break;
 6980	case CLASS_CODE_TCP_IPV6:
 6981		*flow_type = TCP_V6_FLOW;
 6982		break;
 6983	case CLASS_CODE_UDP_IPV6:
 6984		*flow_type = UDP_V6_FLOW;
 6985		break;
 6986	case CLASS_CODE_AH_ESP_IPV6:
 6987		*flow_type = AH_V6_FLOW;
 6988		break;
 6989	case CLASS_CODE_SCTP_IPV6:
 6990		*flow_type = SCTP_V6_FLOW;
 6991		break;
 6992	case CLASS_CODE_USER_PROG1:
 6993	case CLASS_CODE_USER_PROG2:
 6994	case CLASS_CODE_USER_PROG3:
 6995	case CLASS_CODE_USER_PROG4:
 6996		*flow_type = IP_USER_FLOW;
 6997		break;
 6998	default:
 6999		return 0;
 7000	}
 7001
 7002	return 1;
 7003}
 7004
 7005static int niu_ethflow_to_class(int flow_type, u64 *class)
 7006{
 7007	switch (flow_type) {
 7008	case TCP_V4_FLOW:
 7009		*class = CLASS_CODE_TCP_IPV4;
 7010		break;
 7011	case UDP_V4_FLOW:
 7012		*class = CLASS_CODE_UDP_IPV4;
 7013		break;
 7014	case AH_ESP_V4_FLOW:
 7015	case AH_V4_FLOW:
 7016	case ESP_V4_FLOW:
 7017		*class = CLASS_CODE_AH_ESP_IPV4;
 7018		break;
 7019	case SCTP_V4_FLOW:
 7020		*class = CLASS_CODE_SCTP_IPV4;
 7021		break;
 7022	case TCP_V6_FLOW:
 7023		*class = CLASS_CODE_TCP_IPV6;
 7024		break;
 7025	case UDP_V6_FLOW:
 7026		*class = CLASS_CODE_UDP_IPV6;
 7027		break;
 7028	case AH_ESP_V6_FLOW:
 7029	case AH_V6_FLOW:
 7030	case ESP_V6_FLOW:
 7031		*class = CLASS_CODE_AH_ESP_IPV6;
 7032		break;
 7033	case SCTP_V6_FLOW:
 7034		*class = CLASS_CODE_SCTP_IPV6;
 7035		break;
 7036	default:
 7037		return 0;
 7038	}
 7039
 7040	return 1;
 7041}
 7042
 7043static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7044{
 7045	u64 ethflow = 0;
 7046
 7047	if (flow_key & FLOW_KEY_L2DA)
 7048		ethflow |= RXH_L2DA;
 7049	if (flow_key & FLOW_KEY_VLAN)
 7050		ethflow |= RXH_VLAN;
 7051	if (flow_key & FLOW_KEY_IPSA)
 7052		ethflow |= RXH_IP_SRC;
 7053	if (flow_key & FLOW_KEY_IPDA)
 7054		ethflow |= RXH_IP_DST;
 7055	if (flow_key & FLOW_KEY_PROTO)
 7056		ethflow |= RXH_L3_PROTO;
 7057	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7058		ethflow |= RXH_L4_B_0_1;
 7059	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7060		ethflow |= RXH_L4_B_2_3;
 7061
 7062	return ethflow;
 7063
 7064}
 7065
 7066static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7067{
 7068	u64 key = 0;
 7069
 7070	if (ethflow & RXH_L2DA)
 7071		key |= FLOW_KEY_L2DA;
 7072	if (ethflow & RXH_VLAN)
 7073		key |= FLOW_KEY_VLAN;
 7074	if (ethflow & RXH_IP_SRC)
 7075		key |= FLOW_KEY_IPSA;
 7076	if (ethflow & RXH_IP_DST)
 7077		key |= FLOW_KEY_IPDA;
 7078	if (ethflow & RXH_L3_PROTO)
 7079		key |= FLOW_KEY_PROTO;
 7080	if (ethflow & RXH_L4_B_0_1)
 7081		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7082	if (ethflow & RXH_L4_B_2_3)
 7083		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7084
 7085	*flow_key = key;
 7086
 7087	return 1;
 7088
 7089}
 7090
 7091static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7092{
 7093	u64 class;
 7094
 7095	nfc->data = 0;
 7096
 7097	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7098		return -EINVAL;
 7099
 7100	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7101	    TCAM_KEY_DISC)
 7102		nfc->data = RXH_DISCARD;
 7103	else
 7104		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7105						      CLASS_CODE_USER_PROG1]);
 7106	return 0;
 7107}
 7108
 7109static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7110					struct ethtool_rx_flow_spec *fsp)
 7111{
 7112	u32 tmp;
 7113	u16 prt;
 7114
 7115	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7116	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7117
 7118	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7119	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7120
 7121	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7122	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7123
 7124	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7125	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7126
 7127	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7128		TCAM_V4KEY2_TOS_SHIFT;
 7129	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7130		TCAM_V4KEY2_TOS_SHIFT;
 7131
 7132	switch (fsp->flow_type) {
 7133	case TCP_V4_FLOW:
 7134	case UDP_V4_FLOW:
 7135	case SCTP_V4_FLOW:
 7136		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7137			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7138		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7139
 7140		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7141			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7142		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7143
 7144		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7145			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7146		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7147
 7148		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7149			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7150		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7151		break;
 7152	case AH_V4_FLOW:
 7153	case ESP_V4_FLOW:
 7154		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7155			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7156		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7157
 7158		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7159			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7160		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7161		break;
 7162	case IP_USER_FLOW:
 7163		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7164			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7165		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7166
 7167		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7168			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7169		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7170
 7171		fsp->h_u.usr_ip4_spec.proto =
 7172			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7173			TCAM_V4KEY2_PROTO_SHIFT;
 7174		fsp->m_u.usr_ip4_spec.proto =
 7175			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7176			TCAM_V4KEY2_PROTO_SHIFT;
 7177
 7178		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7179		break;
 7180	default:
 7181		break;
 7182	}
 7183}
 7184
 7185static int niu_get_ethtool_tcam_entry(struct niu *np,
 7186				      struct ethtool_rxnfc *nfc)
 7187{
 7188	struct niu_parent *parent = np->parent;
 7189	struct niu_tcam_entry *tp;
 7190	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7191	u16 idx;
 7192	u64 class;
 7193	int ret = 0;
 7194
 7195	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7196
 7197	tp = &parent->tcam[idx];
 7198	if (!tp->valid) {
 7199		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7200			    parent->index, (u16)nfc->fs.location, idx);
 7201		return -EINVAL;
 7202	}
 7203
 7204	/* fill the flow spec entry */
 7205	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7206		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7207	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7208
 7209	if (ret < 0) {
 7210		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7211			    parent->index);
 7212		ret = -EINVAL;
 7213		goto out;
 7214	}
 7215
 7216	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7217		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7218			TCAM_V4KEY2_PROTO_SHIFT;
 7219		if (proto == IPPROTO_ESP) {
 7220			if (fsp->flow_type == AH_V4_FLOW)
 7221				fsp->flow_type = ESP_V4_FLOW;
 7222			else
 7223				fsp->flow_type = ESP_V6_FLOW;
 7224		}
 7225	}
 7226
 7227	switch (fsp->flow_type) {
 7228	case TCP_V4_FLOW:
 7229	case UDP_V4_FLOW:
 7230	case SCTP_V4_FLOW:
 7231	case AH_V4_FLOW:
 7232	case ESP_V4_FLOW:
 7233		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7234		break;
 7235	case TCP_V6_FLOW:
 7236	case UDP_V6_FLOW:
 7237	case SCTP_V6_FLOW:
 7238	case AH_V6_FLOW:
 7239	case ESP_V6_FLOW:
 7240		/* Not yet implemented */
 7241		ret = -EINVAL;
 7242		break;
 7243	case IP_USER_FLOW:
 7244		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7245		break;
 7246	default:
 7247		ret = -EINVAL;
 7248		break;
 7249	}
 7250
 7251	if (ret < 0)
 7252		goto out;
 7253
 7254	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7255		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7256	else
 7257		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7258			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7259
 7260	/* put the tcam size here */
 7261	nfc->data = tcam_get_size(np);
 7262out:
 7263	return ret;
 7264}
 7265
 7266static int niu_get_ethtool_tcam_all(struct niu *np,
 7267				    struct ethtool_rxnfc *nfc,
 7268				    u32 *rule_locs)
 7269{
 7270	struct niu_parent *parent = np->parent;
 7271	struct niu_tcam_entry *tp;
 7272	int i, idx, cnt;
 7273	unsigned long flags;
 7274	int ret = 0;
 7275
 7276	/* put the tcam size here */
 7277	nfc->data = tcam_get_size(np);
 7278
 7279	niu_lock_parent(np, flags);
 7280	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7281		idx = tcam_get_index(np, i);
 7282		tp = &parent->tcam[idx];
 7283		if (!tp->valid)
 7284			continue;
 7285		if (cnt == nfc->rule_cnt) {
 7286			ret = -EMSGSIZE;
 7287			break;
 7288		}
 7289		rule_locs[cnt] = i;
 7290		cnt++;
 7291	}
 7292	niu_unlock_parent(np, flags);
 7293
 7294	nfc->rule_cnt = cnt;
 7295
 7296	return ret;
 7297}
 7298
 7299static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7300		       u32 *rule_locs)
 7301{
 7302	struct niu *np = netdev_priv(dev);
 7303	int ret = 0;
 7304
 7305	switch (cmd->cmd) {
 7306	case ETHTOOL_GRXFH:
 7307		ret = niu_get_hash_opts(np, cmd);
 7308		break;
 7309	case ETHTOOL_GRXRINGS:
 7310		cmd->data = np->num_rx_rings;
 7311		break;
 7312	case ETHTOOL_GRXCLSRLCNT:
 7313		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7314		break;
 7315	case ETHTOOL_GRXCLSRULE:
 7316		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7317		break;
 7318	case ETHTOOL_GRXCLSRLALL:
 7319		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
 7320		break;
 7321	default:
 7322		ret = -EINVAL;
 7323		break;
 7324	}
 7325
 7326	return ret;
 7327}
 7328
 7329static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7330{
 7331	u64 class;
 7332	u64 flow_key = 0;
 7333	unsigned long flags;
 7334
 7335	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7336		return -EINVAL;
 7337
 7338	if (class < CLASS_CODE_USER_PROG1 ||
 7339	    class > CLASS_CODE_SCTP_IPV6)
 7340		return -EINVAL;
 7341
 7342	if (nfc->data & RXH_DISCARD) {
 7343		niu_lock_parent(np, flags);
 7344		flow_key = np->parent->tcam_key[class -
 7345					       CLASS_CODE_USER_PROG1];
 7346		flow_key |= TCAM_KEY_DISC;
 7347		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7348		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7349		niu_unlock_parent(np, flags);
 7350		return 0;
 7351	} else {
 7352		/* Discard was set before, but is not set now */
 7353		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7354		    TCAM_KEY_DISC) {
 7355			niu_lock_parent(np, flags);
 7356			flow_key = np->parent->tcam_key[class -
 7357					       CLASS_CODE_USER_PROG1];
 7358			flow_key &= ~TCAM_KEY_DISC;
 7359			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7360			     flow_key);
 7361			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7362				flow_key;
 7363			niu_unlock_parent(np, flags);
 7364		}
 7365	}
 7366
 7367	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7368		return -EINVAL;
 7369
 7370	niu_lock_parent(np, flags);
 7371	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7372	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7373	niu_unlock_parent(np, flags);
 7374
 7375	return 0;
 7376}
 7377
 7378static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7379				       struct niu_tcam_entry *tp,
 7380				       int l2_rdc_tab, u64 class)
 7381{
 7382	u8 pid = 0;
 7383	u32 sip, dip, sipm, dipm, spi, spim;
 7384	u16 sport, dport, spm, dpm;
 7385
 7386	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7387	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7388	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7389	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7390
 7391	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7392	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7393	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7394	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7395
 7396	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7397	tp->key[3] |= dip;
 7398
 7399	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7400	tp->key_mask[3] |= dipm;
 7401
 7402	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7403		       TCAM_V4KEY2_TOS_SHIFT);
 7404	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7405			    TCAM_V4KEY2_TOS_SHIFT);
 7406	switch (fsp->flow_type) {
 7407	case TCP_V4_FLOW:
 7408	case UDP_V4_FLOW:
 7409	case SCTP_V4_FLOW:
 7410		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7411		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7412		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7413		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7414
 7415		tp->key[2] |= (((u64)sport << 16) | dport);
 7416		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7417		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7418		break;
 7419	case AH_V4_FLOW:
 7420	case ESP_V4_FLOW:
 7421		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7422		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7423
 7424		tp->key[2] |= spi;
 7425		tp->key_mask[2] |= spim;
 7426		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7427		break;
 7428	case IP_USER_FLOW:
 7429		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7430		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7431
 7432		tp->key[2] |= spi;
 7433		tp->key_mask[2] |= spim;
 7434		pid = fsp->h_u.usr_ip4_spec.proto;
 7435		break;
 7436	default:
 7437		break;
 7438	}
 7439
 7440	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7441	if (pid) {
 7442		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7443	}
 7444}
 7445
 7446static int niu_add_ethtool_tcam_entry(struct niu *np,
 7447				      struct ethtool_rxnfc *nfc)
 7448{
 7449	struct niu_parent *parent = np->parent;
 7450	struct niu_tcam_entry *tp;
 7451	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7452	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7453	int l2_rdc_table = rdc_table->first_table_num;
 7454	u16 idx;
 7455	u64 class;
 7456	unsigned long flags;
 7457	int err, ret;
 7458
 7459	ret = 0;
 7460
 7461	idx = nfc->fs.location;
 7462	if (idx >= tcam_get_size(np))
 7463		return -EINVAL;
 7464
 7465	if (fsp->flow_type == IP_USER_FLOW) {
 7466		int i;
 7467		int add_usr_cls = 0;
 7468		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7469		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7470
 7471		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7472			return -EINVAL;
 7473
 7474		niu_lock_parent(np, flags);
 7475
 7476		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7477			if (parent->l3_cls[i]) {
 7478				if (uspec->proto == parent->l3_cls_pid[i]) {
 7479					class = parent->l3_cls[i];
 7480					parent->l3_cls_refcnt[i]++;
 7481					add_usr_cls = 1;
 7482					break;
 7483				}
 7484			} else {
 7485				/* Program new user IP class */
 7486				switch (i) {
 7487				case 0:
 7488					class = CLASS_CODE_USER_PROG1;
 7489					break;
 7490				case 1:
 7491					class = CLASS_CODE_USER_PROG2;
 7492					break;
 7493				case 2:
 7494					class = CLASS_CODE_USER_PROG3;
 7495					break;
 7496				case 3:
 7497					class = CLASS_CODE_USER_PROG4;
 7498					break;
 7499				default:
 7500					break;
 7501				}
 7502				ret = tcam_user_ip_class_set(np, class, 0,
 7503							     uspec->proto,
 7504							     uspec->tos,
 7505							     umask->tos);
 7506				if (ret)
 7507					goto out;
 7508
 7509				ret = tcam_user_ip_class_enable(np, class, 1);
 7510				if (ret)
 7511					goto out;
 7512				parent->l3_cls[i] = class;
 7513				parent->l3_cls_pid[i] = uspec->proto;
 7514				parent->l3_cls_refcnt[i]++;
 7515				add_usr_cls = 1;
 7516				break;
 7517			}
 7518		}
 7519		if (!add_usr_cls) {
 7520			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7521				    parent->index, __func__, uspec->proto);
 7522			ret = -EINVAL;
 7523			goto out;
 7524		}
 7525		niu_unlock_parent(np, flags);
 7526	} else {
 7527		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7528			return -EINVAL;
 7529		}
 7530	}
 7531
 7532	niu_lock_parent(np, flags);
 7533
 7534	idx = tcam_get_index(np, idx);
 7535	tp = &parent->tcam[idx];
 7536
 7537	memset(tp, 0, sizeof(*tp));
 7538
 7539	/* fill in the tcam key and mask */
 7540	switch (fsp->flow_type) {
 7541	case TCP_V4_FLOW:
 7542	case UDP_V4_FLOW:
 7543	case SCTP_V4_FLOW:
 7544	case AH_V4_FLOW:
 7545	case ESP_V4_FLOW:
 7546		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7547		break;
 7548	case TCP_V6_FLOW:
 7549	case UDP_V6_FLOW:
 7550	case SCTP_V6_FLOW:
 7551	case AH_V6_FLOW:
 7552	case ESP_V6_FLOW:
 7553		/* Not yet implemented */
 7554		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7555			    parent->index, __func__, fsp->flow_type);
 7556		ret = -EINVAL;
 7557		goto out;
 7558	case IP_USER_FLOW:
 7559		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7560		break;
 7561	default:
 7562		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7563			    parent->index, __func__, fsp->flow_type);
 7564		ret = -EINVAL;
 7565		goto out;
 7566	}
 7567
 7568	/* fill in the assoc data */
 7569	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7570		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7571	} else {
 7572		if (fsp->ring_cookie >= np->num_rx_rings) {
 7573			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7574				    parent->index, __func__,
 7575				    (long long)fsp->ring_cookie);
 7576			ret = -EINVAL;
 7577			goto out;
 7578		}
 7579		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7580				  (fsp->ring_cookie <<
 7581				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7582	}
 7583
 7584	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7585	if (err) {
 7586		ret = -EINVAL;
 7587		goto out;
 7588	}
 7589	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7590	if (err) {
 7591		ret = -EINVAL;
 7592		goto out;
 7593	}
 7594
 7595	/* validate the entry */
 7596	tp->valid = 1;
 7597	np->clas.tcam_valid_entries++;
 7598out:
 7599	niu_unlock_parent(np, flags);
 7600
 7601	return ret;
 7602}
 7603
 7604static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7605{
 7606	struct niu_parent *parent = np->parent;
 7607	struct niu_tcam_entry *tp;
 7608	u16 idx;
 7609	unsigned long flags;
 7610	u64 class;
 7611	int ret = 0;
 7612
 7613	if (loc >= tcam_get_size(np))
 7614		return -EINVAL;
 7615
 7616	niu_lock_parent(np, flags);
 7617
 7618	idx = tcam_get_index(np, loc);
 7619	tp = &parent->tcam[idx];
 7620
 7621	/* if the entry is of a user defined class, then update*/
 7622	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7623		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7624
 7625	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7626		int i;
 7627		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7628			if (parent->l3_cls[i] == class) {
 7629				parent->l3_cls_refcnt[i]--;
 7630				if (!parent->l3_cls_refcnt[i]) {
 7631					/* disable class */
 7632					ret = tcam_user_ip_class_enable(np,
 7633									class,
 7634									0);
 7635					if (ret)
 7636						goto out;
 7637					parent->l3_cls[i] = 0;
 7638					parent->l3_cls_pid[i] = 0;
 7639				}
 7640				break;
 7641			}
 7642		}
 7643		if (i == NIU_L3_PROG_CLS) {
 7644			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7645				    parent->index, __func__,
 7646				    (unsigned long long)class);
 7647			ret = -EINVAL;
 7648			goto out;
 7649		}
 7650	}
 7651
 7652	ret = tcam_flush(np, idx);
 7653	if (ret)
 7654		goto out;
 7655
 7656	/* invalidate the entry */
 7657	tp->valid = 0;
 7658	np->clas.tcam_valid_entries--;
 7659out:
 7660	niu_unlock_parent(np, flags);
 7661
 7662	return ret;
 7663}
 7664
 7665static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7666{
 7667	struct niu *np = netdev_priv(dev);
 7668	int ret = 0;
 7669
 7670	switch (cmd->cmd) {
 7671	case ETHTOOL_SRXFH:
 7672		ret = niu_set_hash_opts(np, cmd);
 7673		break;
 7674	case ETHTOOL_SRXCLSRLINS:
 7675		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7676		break;
 7677	case ETHTOOL_SRXCLSRLDEL:
 7678		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7679		break;
 7680	default:
 7681		ret = -EINVAL;
 7682		break;
 7683	}
 7684
 7685	return ret;
 7686}
 7687
 7688static const struct {
 7689	const char string[ETH_GSTRING_LEN];
 7690} niu_xmac_stat_keys[] = {
 7691	{ "tx_frames" },
 7692	{ "tx_bytes" },
 7693	{ "tx_fifo_errors" },
 7694	{ "tx_overflow_errors" },
 7695	{ "tx_max_pkt_size_errors" },
 7696	{ "tx_underflow_errors" },
 7697	{ "rx_local_faults" },
 7698	{ "rx_remote_faults" },
 7699	{ "rx_link_faults" },
 7700	{ "rx_align_errors" },
 7701	{ "rx_frags" },
 7702	{ "rx_mcasts" },
 7703	{ "rx_bcasts" },
 7704	{ "rx_hist_cnt1" },
 7705	{ "rx_hist_cnt2" },
 7706	{ "rx_hist_cnt3" },
 7707	{ "rx_hist_cnt4" },
 7708	{ "rx_hist_cnt5" },
 7709	{ "rx_hist_cnt6" },
 7710	{ "rx_hist_cnt7" },
 7711	{ "rx_octets" },
 7712	{ "rx_code_violations" },
 7713	{ "rx_len_errors" },
 7714	{ "rx_crc_errors" },
 7715	{ "rx_underflows" },
 7716	{ "rx_overflows" },
 7717	{ "pause_off_state" },
 7718	{ "pause_on_state" },
 7719	{ "pause_received" },
 7720};
 7721
 7722#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7723
 7724static const struct {
 7725	const char string[ETH_GSTRING_LEN];
 7726} niu_bmac_stat_keys[] = {
 7727	{ "tx_underflow_errors" },
 7728	{ "tx_max_pkt_size_errors" },
 7729	{ "tx_bytes" },
 7730	{ "tx_frames" },
 7731	{ "rx_overflows" },
 7732	{ "rx_frames" },
 7733	{ "rx_align_errors" },
 7734	{ "rx_crc_errors" },
 7735	{ "rx_len_errors" },
 7736	{ "pause_off_state" },
 7737	{ "pause_on_state" },
 7738	{ "pause_received" },
 7739};
 7740
 7741#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7742
 7743static const struct {
 7744	const char string[ETH_GSTRING_LEN];
 7745} niu_rxchan_stat_keys[] = {
 7746	{ "rx_channel" },
 7747	{ "rx_packets" },
 7748	{ "rx_bytes" },
 7749	{ "rx_dropped" },
 7750	{ "rx_errors" },
 7751};
 7752
 7753#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7754
 7755static const struct {
 7756	const char string[ETH_GSTRING_LEN];
 7757} niu_txchan_stat_keys[] = {
 7758	{ "tx_channel" },
 7759	{ "tx_packets" },
 7760	{ "tx_bytes" },
 7761	{ "tx_errors" },
 7762};
 7763
 7764#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7765
 7766static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7767{
 7768	struct niu *np = netdev_priv(dev);
 7769	int i;
 7770
 7771	if (stringset != ETH_SS_STATS)
 7772		return;
 7773
 7774	if (np->flags & NIU_FLAGS_XMAC) {
 7775		memcpy(data, niu_xmac_stat_keys,
 7776		       sizeof(niu_xmac_stat_keys));
 7777		data += sizeof(niu_xmac_stat_keys);
 7778	} else {
 7779		memcpy(data, niu_bmac_stat_keys,
 7780		       sizeof(niu_bmac_stat_keys));
 7781		data += sizeof(niu_bmac_stat_keys);
 7782	}
 7783	for (i = 0; i < np->num_rx_rings; i++) {
 7784		memcpy(data, niu_rxchan_stat_keys,
 7785		       sizeof(niu_rxchan_stat_keys));
 7786		data += sizeof(niu_rxchan_stat_keys);
 7787	}
 7788	for (i = 0; i < np->num_tx_rings; i++) {
 7789		memcpy(data, niu_txchan_stat_keys,
 7790		       sizeof(niu_txchan_stat_keys));
 7791		data += sizeof(niu_txchan_stat_keys);
 7792	}
 7793}
 7794
 7795static int niu_get_sset_count(struct net_device *dev, int stringset)
 7796{
 7797	struct niu *np = netdev_priv(dev);
 7798
 7799	if (stringset != ETH_SS_STATS)
 7800		return -EINVAL;
 7801
 7802	return (np->flags & NIU_FLAGS_XMAC ?
 7803		 NUM_XMAC_STAT_KEYS :
 7804		 NUM_BMAC_STAT_KEYS) +
 7805		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7806		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7807}
 7808
 7809static void niu_get_ethtool_stats(struct net_device *dev,
 7810				  struct ethtool_stats *stats, u64 *data)
 7811{
 7812	struct niu *np = netdev_priv(dev);
 7813	int i;
 7814
 7815	niu_sync_mac_stats(np);
 7816	if (np->flags & NIU_FLAGS_XMAC) {
 7817		memcpy(data, &np->mac_stats.xmac,
 7818		       sizeof(struct niu_xmac_stats));
 7819		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7820	} else {
 7821		memcpy(data, &np->mac_stats.bmac,
 7822		       sizeof(struct niu_bmac_stats));
 7823		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7824	}
 7825	for (i = 0; i < np->num_rx_rings; i++) {
 7826		struct rx_ring_info *rp = &np->rx_rings[i];
 7827
 7828		niu_sync_rx_discard_stats(np, rp, 0);
 7829
 7830		data[0] = rp->rx_channel;
 7831		data[1] = rp->rx_packets;
 7832		data[2] = rp->rx_bytes;
 7833		data[3] = rp->rx_dropped;
 7834		data[4] = rp->rx_errors;
 7835		data += 5;
 7836	}
 7837	for (i = 0; i < np->num_tx_rings; i++) {
 7838		struct tx_ring_info *rp = &np->tx_rings[i];
 7839
 7840		data[0] = rp->tx_channel;
 7841		data[1] = rp->tx_packets;
 7842		data[2] = rp->tx_bytes;
 7843		data[3] = rp->tx_errors;
 7844		data += 4;
 7845	}
 7846}
 7847
 7848static u64 niu_led_state_save(struct niu *np)
 7849{
 7850	if (np->flags & NIU_FLAGS_XMAC)
 7851		return nr64_mac(XMAC_CONFIG);
 7852	else
 7853		return nr64_mac(BMAC_XIF_CONFIG);
 7854}
 7855
 7856static void niu_led_state_restore(struct niu *np, u64 val)
 7857{
 7858	if (np->flags & NIU_FLAGS_XMAC)
 7859		nw64_mac(XMAC_CONFIG, val);
 7860	else
 7861		nw64_mac(BMAC_XIF_CONFIG, val);
 7862}
 7863
 7864static void niu_force_led(struct niu *np, int on)
 7865{
 7866	u64 val, reg, bit;
 7867
 7868	if (np->flags & NIU_FLAGS_XMAC) {
 7869		reg = XMAC_CONFIG;
 7870		bit = XMAC_CONFIG_FORCE_LED_ON;
 7871	} else {
 7872		reg = BMAC_XIF_CONFIG;
 7873		bit = BMAC_XIF_CONFIG_LINK_LED;
 7874	}
 7875
 7876	val = nr64_mac(reg);
 7877	if (on)
 7878		val |= bit;
 7879	else
 7880		val &= ~bit;
 7881	nw64_mac(reg, val);
 7882}
 7883
 7884static int niu_set_phys_id(struct net_device *dev,
 7885			   enum ethtool_phys_id_state state)
 7886
 7887{
 7888	struct niu *np = netdev_priv(dev);
 7889
 7890	if (!netif_running(dev))
 7891		return -EAGAIN;
 7892
 7893	switch (state) {
 7894	case ETHTOOL_ID_ACTIVE:
 7895		np->orig_led_state = niu_led_state_save(np);
 7896		return 1;	/* cycle on/off once per second */
 7897
 7898	case ETHTOOL_ID_ON:
 7899		niu_force_led(np, 1);
 7900		break;
 7901
 7902	case ETHTOOL_ID_OFF:
 7903		niu_force_led(np, 0);
 7904		break;
 7905
 7906	case ETHTOOL_ID_INACTIVE:
 7907		niu_led_state_restore(np, np->orig_led_state);
 7908	}
 7909
 7910	return 0;
 7911}
 7912
 7913static const struct ethtool_ops niu_ethtool_ops = {
 7914	.get_drvinfo		= niu_get_drvinfo,
 7915	.get_link		= ethtool_op_get_link,
 7916	.get_msglevel		= niu_get_msglevel,
 7917	.set_msglevel		= niu_set_msglevel,
 7918	.nway_reset		= niu_nway_reset,
 7919	.get_eeprom_len		= niu_get_eeprom_len,
 7920	.get_eeprom		= niu_get_eeprom,
 7921	.get_settings		= niu_get_settings,
 7922	.set_settings		= niu_set_settings,
 7923	.get_strings		= niu_get_strings,
 7924	.get_sset_count		= niu_get_sset_count,
 7925	.get_ethtool_stats	= niu_get_ethtool_stats,
 7926	.set_phys_id		= niu_set_phys_id,
 7927	.get_rxnfc		= niu_get_nfc,
 7928	.set_rxnfc		= niu_set_nfc,
 7929};
 7930
 7931static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7932			      int ldg, int ldn)
 7933{
 7934	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7935		return -EINVAL;
 7936	if (ldn < 0 || ldn > LDN_MAX)
 7937		return -EINVAL;
 7938
 7939	parent->ldg_map[ldn] = ldg;
 7940
 7941	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7942		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7943		 * the firmware, and we're not supposed to change them.
 7944		 * Validate the mapping, because if it's wrong we probably
 7945		 * won't get any interrupts and that's painful to debug.
 7946		 */
 7947		if (nr64(LDG_NUM(ldn)) != ldg) {
 7948			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7949				np->port, ldn, ldg,
 7950				(unsigned long long) nr64(LDG_NUM(ldn)));
 7951			return -EINVAL;
 7952		}
 7953	} else
 7954		nw64(LDG_NUM(ldn), ldg);
 7955
 7956	return 0;
 7957}
 7958
 7959static int niu_set_ldg_timer_res(struct niu *np, int res)
 7960{
 7961	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7962		return -EINVAL;
 7963
 7964
 7965	nw64(LDG_TIMER_RES, res);
 7966
 7967	return 0;
 7968}
 7969
 7970static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7971{
 7972	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7973	    (func < 0 || func > 3) ||
 7974	    (vector < 0 || vector > 0x1f))
 7975		return -EINVAL;
 7976
 7977	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7978
 7979	return 0;
 7980}
 7981
 7982static int niu_pci_eeprom_read(struct niu *np, u32 addr)
 7983{
 7984	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7985				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7986	int limit;
 7987
 7988	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7989		return -EINVAL;
 7990
 7991	frame = frame_base;
 7992	nw64(ESPC_PIO_STAT, frame);
 7993	limit = 64;
 7994	do {
 7995		udelay(5);
 7996		frame = nr64(ESPC_PIO_STAT);
 7997		if (frame & ESPC_PIO_STAT_READ_END)
 7998			break;
 7999	} while (limit--);
 8000	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8001		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8002			(unsigned long long) frame);
 8003		return -ENODEV;
 8004	}
 8005
 8006	frame = frame_base;
 8007	nw64(ESPC_PIO_STAT, frame);
 8008	limit = 64;
 8009	do {
 8010		udelay(5);
 8011		frame = nr64(ESPC_PIO_STAT);
 8012		if (frame & ESPC_PIO_STAT_READ_END)
 8013			break;
 8014	} while (limit--);
 8015	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8016		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8017			(unsigned long long) frame);
 8018		return -ENODEV;
 8019	}
 8020
 8021	frame = nr64(ESPC_PIO_STAT);
 8022	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 8023}
 8024
 8025static int niu_pci_eeprom_read16(struct niu *np, u32 off)
 8026{
 8027	int err = niu_pci_eeprom_read(np, off);
 8028	u16 val;
 8029
 8030	if (err < 0)
 8031		return err;
 8032	val = (err << 8);
 8033	err = niu_pci_eeprom_read(np, off + 1);
 8034	if (err < 0)
 8035		return err;
 8036	val |= (err & 0xff);
 8037
 8038	return val;
 8039}
 8040
 8041static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8042{
 8043	int err = niu_pci_eeprom_read(np, off);
 8044	u16 val;
 8045
 8046	if (err < 0)
 8047		return err;
 8048
 8049	val = (err & 0xff);
 8050	err = niu_pci_eeprom_read(np, off + 1);
 8051	if (err < 0)
 8052		return err;
 8053
 8054	val |= (err & 0xff) << 8;
 8055
 8056	return val;
 8057}
 8058
 8059static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
 8060				    int namebuf_len)
 8061{
 8062	int i;
 8063
 8064	for (i = 0; i < namebuf_len; i++) {
 8065		int err = niu_pci_eeprom_read(np, off + i);
 8066		if (err < 0)
 8067			return err;
 8068		*namebuf++ = err;
 8069		if (!err)
 8070			break;
 8071	}
 8072	if (i >= namebuf_len)
 8073		return -EINVAL;
 8074
 8075	return i + 1;
 8076}
 8077
 8078static void niu_vpd_parse_version(struct niu *np)
 8079{
 8080	struct niu_vpd *vpd = &np->vpd;
 8081	int len = strlen(vpd->version) + 1;
 8082	const char *s = vpd->version;
 8083	int i;
 8084
 8085	for (i = 0; i < len - 5; i++) {
 8086		if (!strncmp(s + i, "FCode ", 6))
 8087			break;
 8088	}
 8089	if (i >= len - 5)
 8090		return;
 8091
 8092	s += i + 5;
 8093	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8094
 8095	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8096		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8097		     vpd->fcode_major, vpd->fcode_minor);
 8098	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8099	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8100	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8101		np->flags |= NIU_FLAGS_VPD_VALID;
 8102}
 8103
 8104/* ESPC_PIO_EN_ENABLE must be set */
 8105static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
 8106{
 8107	unsigned int found_mask = 0;
 8108#define FOUND_MASK_MODEL	0x00000001
 8109#define FOUND_MASK_BMODEL	0x00000002
 8110#define FOUND_MASK_VERS		0x00000004
 8111#define FOUND_MASK_MAC		0x00000008
 8112#define FOUND_MASK_NMAC		0x00000010
 8113#define FOUND_MASK_PHY		0x00000020
 8114#define FOUND_MASK_ALL		0x0000003f
 8115
 8116	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8117		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8118	while (start < end) {
 8119		int len, err, prop_len;
 8120		char namebuf[64];
 8121		u8 *prop_buf;
 8122		int max_len;
 8123
 8124		if (found_mask == FOUND_MASK_ALL) {
 8125			niu_vpd_parse_version(np);
 8126			return 1;
 8127		}
 8128
 8129		err = niu_pci_eeprom_read(np, start + 2);
 8130		if (err < 0)
 8131			return err;
 8132		len = err;
 8133		start += 3;
 8134
 8135		prop_len = niu_pci_eeprom_read(np, start + 4);
 8136		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8137		if (err < 0)
 8138			return err;
 8139
 8140		prop_buf = NULL;
 8141		max_len = 0;
 8142		if (!strcmp(namebuf, "model")) {
 8143			prop_buf = np->vpd.model;
 8144			max_len = NIU_VPD_MODEL_MAX;
 8145			found_mask |= FOUND_MASK_MODEL;
 8146		} else if (!strcmp(namebuf, "board-model")) {
 8147			prop_buf = np->vpd.board_model;
 8148			max_len = NIU_VPD_BD_MODEL_MAX;
 8149			found_mask |= FOUND_MASK_BMODEL;
 8150		} else if (!strcmp(namebuf, "version")) {
 8151			prop_buf = np->vpd.version;
 8152			max_len = NIU_VPD_VERSION_MAX;
 8153			found_mask |= FOUND_MASK_VERS;
 8154		} else if (!strcmp(namebuf, "local-mac-address")) {
 8155			prop_buf = np->vpd.local_mac;
 8156			max_len = ETH_ALEN;
 8157			found_mask |= FOUND_MASK_MAC;
 8158		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8159			prop_buf = &np->vpd.mac_num;
 8160			max_len = 1;
 8161			found_mask |= FOUND_MASK_NMAC;
 8162		} else if (!strcmp(namebuf, "phy-type")) {
 8163			prop_buf = np->vpd.phy_type;
 8164			max_len = NIU_VPD_PHY_TYPE_MAX;
 8165			found_mask |= FOUND_MASK_PHY;
 8166		}
 8167
 8168		if (max_len && prop_len > max_len) {
 8169			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8170			return -EINVAL;
 8171		}
 8172
 8173		if (prop_buf) {
 8174			u32 off = start + 5 + err;
 8175			int i;
 8176
 8177			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8178				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8179				     namebuf, prop_len);
 8180			for (i = 0; i < prop_len; i++)
 8181				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
 8182		}
 8183
 8184		start += len;
 8185	}
 8186
 8187	return 0;
 8188}
 8189
 8190/* ESPC_PIO_EN_ENABLE must be set */
 8191static void niu_pci_vpd_fetch(struct niu *np, u32 start)
 8192{
 8193	u32 offset;
 8194	int err;
 8195
 8196	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8197	if (err < 0)
 8198		return;
 8199
 8200	offset = err + 3;
 8201
 8202	while (start + offset < ESPC_EEPROM_SIZE) {
 8203		u32 here = start + offset;
 8204		u32 end;
 8205
 8206		err = niu_pci_eeprom_read(np, here);
 8207		if (err != 0x90)
 8208			return;
 8209
 8210		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8211		if (err < 0)
 8212			return;
 8213
 8214		here = start + offset + 3;
 8215		end = start + offset + err;
 8216
 8217		offset += err;
 8218
 8219		err = niu_pci_vpd_scan_props(np, here, end);
 8220		if (err < 0 || err == 1)
 8221			return;
 8222	}
 8223}
 8224
 8225/* ESPC_PIO_EN_ENABLE must be set */
 8226static u32 niu_pci_vpd_offset(struct niu *np)
 8227{
 8228	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8229	int err;
 8230
 8231	while (start < end) {
 8232		ret = start;
 8233
 8234		/* ROM header signature?  */
 8235		err = niu_pci_eeprom_read16(np, start +  0);
 8236		if (err != 0x55aa)
 8237			return 0;
 8238
 8239		/* Apply offset to PCI data structure.  */
 8240		err = niu_pci_eeprom_read16(np, start + 23);
 8241		if (err < 0)
 8242			return 0;
 8243		start += err;
 8244
 8245		/* Check for "PCIR" signature.  */
 8246		err = niu_pci_eeprom_read16(np, start +  0);
 8247		if (err != 0x5043)
 8248			return 0;
 8249		err = niu_pci_eeprom_read16(np, start +  2);
 8250		if (err != 0x4952)
 8251			return 0;
 8252
 8253		/* Check for OBP image type.  */
 8254		err = niu_pci_eeprom_read(np, start + 20);
 8255		if (err < 0)
 8256			return 0;
 8257		if (err != 0x01) {
 8258			err = niu_pci_eeprom_read(np, ret + 2);
 8259			if (err < 0)
 8260				return 0;
 8261
 8262			start = ret + (err * 512);
 8263			continue;
 8264		}
 8265
 8266		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8267		if (err < 0)
 8268			return err;
 8269		ret += err;
 8270
 8271		err = niu_pci_eeprom_read(np, ret + 0);
 8272		if (err != 0x82)
 8273			return 0;
 8274
 8275		return ret;
 8276	}
 8277
 8278	return 0;
 8279}
 8280
 8281static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
 8282{
 8283	if (!strcmp(phy_prop, "mif")) {
 8284		/* 1G copper, MII */
 8285		np->flags &= ~(NIU_FLAGS_FIBER |
 8286			       NIU_FLAGS_10G);
 8287		np->mac_xcvr = MAC_XCVR_MII;
 8288	} else if (!strcmp(phy_prop, "xgf")) {
 8289		/* 10G fiber, XPCS */
 8290		np->flags |= (NIU_FLAGS_10G |
 8291			      NIU_FLAGS_FIBER);
 8292		np->mac_xcvr = MAC_XCVR_XPCS;
 8293	} else if (!strcmp(phy_prop, "pcs")) {
 8294		/* 1G fiber, PCS */
 8295		np->flags &= ~NIU_FLAGS_10G;
 8296		np->flags |= NIU_FLAGS_FIBER;
 8297		np->mac_xcvr = MAC_XCVR_PCS;
 8298	} else if (!strcmp(phy_prop, "xgc")) {
 8299		/* 10G copper, XPCS */
 8300		np->flags |= NIU_FLAGS_10G;
 8301		np->flags &= ~NIU_FLAGS_FIBER;
 8302		np->mac_xcvr = MAC_XCVR_XPCS;
 8303	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8304		/* 10G Serdes or 1G Serdes, default to 10G */
 8305		np->flags |= NIU_FLAGS_10G;
 8306		np->flags &= ~NIU_FLAGS_FIBER;
 8307		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8308		np->mac_xcvr = MAC_XCVR_XPCS;
 8309	} else {
 8310		return -EINVAL;
 8311	}
 8312	return 0;
 8313}
 8314
 8315static int niu_pci_vpd_get_nports(struct niu *np)
 8316{
 8317	int ports = 0;
 8318
 8319	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8320	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8321	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8322	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8323	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8324		ports = 4;
 8325	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8326		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8327		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8328		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8329		ports = 2;
 8330	}
 8331
 8332	return ports;
 8333}
 8334
 8335static void niu_pci_vpd_validate(struct niu *np)
 8336{
 8337	struct net_device *dev = np->dev;
 8338	struct niu_vpd *vpd = &np->vpd;
 8339	u8 val8;
 8340
 8341	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8342		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8343
 8344		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8345		return;
 8346	}
 8347
 8348	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8349	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8350		np->flags |= NIU_FLAGS_10G;
 8351		np->flags &= ~NIU_FLAGS_FIBER;
 8352		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8353		np->mac_xcvr = MAC_XCVR_PCS;
 8354		if (np->port > 1) {
 8355			np->flags |= NIU_FLAGS_FIBER;
 8356			np->flags &= ~NIU_FLAGS_10G;
 8357		}
 8358		if (np->flags & NIU_FLAGS_10G)
 8359			np->mac_xcvr = MAC_XCVR_XPCS;
 8360	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8361		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8362			      NIU_FLAGS_HOTPLUG_PHY);
 8363	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8364		dev_err(np->device, "Illegal phy string [%s]\n",
 8365			np->vpd.phy_type);
 8366		dev_err(np->device, "Falling back to SPROM\n");
 8367		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8368		return;
 8369	}
 8370
 8371	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
 8372
 8373	val8 = dev->dev_addr[5];
 8374	dev->dev_addr[5] += np->port;
 8375	if (dev->dev_addr[5] < val8)
 8376		dev->dev_addr[4]++;
 8377}
 8378
 8379static int niu_pci_probe_sprom(struct niu *np)
 8380{
 8381	struct net_device *dev = np->dev;
 8382	int len, i;
 8383	u64 val, sum;
 8384	u8 val8;
 8385
 8386	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8387	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8388	len = val / 4;
 8389
 8390	np->eeprom_len = len;
 8391
 8392	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8393		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8394
 8395	sum = 0;
 8396	for (i = 0; i < len; i++) {
 8397		val = nr64(ESPC_NCR(i));
 8398		sum += (val >>  0) & 0xff;
 8399		sum += (val >>  8) & 0xff;
 8400		sum += (val >> 16) & 0xff;
 8401		sum += (val >> 24) & 0xff;
 8402	}
 8403	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8404		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8405	if ((sum & 0xff) != 0xab) {
 8406		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8407		return -EINVAL;
 8408	}
 8409
 8410	val = nr64(ESPC_PHY_TYPE);
 8411	switch (np->port) {
 8412	case 0:
 8413		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8414			ESPC_PHY_TYPE_PORT0_SHIFT;
 8415		break;
 8416	case 1:
 8417		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8418			ESPC_PHY_TYPE_PORT1_SHIFT;
 8419		break;
 8420	case 2:
 8421		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8422			ESPC_PHY_TYPE_PORT2_SHIFT;
 8423		break;
 8424	case 3:
 8425		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8426			ESPC_PHY_TYPE_PORT3_SHIFT;
 8427		break;
 8428	default:
 8429		dev_err(np->device, "Bogus port number %u\n",
 8430			np->port);
 8431		return -EINVAL;
 8432	}
 8433	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8434		     "SPROM: PHY type %x\n", val8);
 8435
 8436	switch (val8) {
 8437	case ESPC_PHY_TYPE_1G_COPPER:
 8438		/* 1G copper, MII */
 8439		np->flags &= ~(NIU_FLAGS_FIBER |
 8440			       NIU_FLAGS_10G);
 8441		np->mac_xcvr = MAC_XCVR_MII;
 8442		break;
 8443
 8444	case ESPC_PHY_TYPE_1G_FIBER:
 8445		/* 1G fiber, PCS */
 8446		np->flags &= ~NIU_FLAGS_10G;
 8447		np->flags |= NIU_FLAGS_FIBER;
 8448		np->mac_xcvr = MAC_XCVR_PCS;
 8449		break;
 8450
 8451	case ESPC_PHY_TYPE_10G_COPPER:
 8452		/* 10G copper, XPCS */
 8453		np->flags |= NIU_FLAGS_10G;
 8454		np->flags &= ~NIU_FLAGS_FIBER;
 8455		np->mac_xcvr = MAC_XCVR_XPCS;
 8456		break;
 8457
 8458	case ESPC_PHY_TYPE_10G_FIBER:
 8459		/* 10G fiber, XPCS */
 8460		np->flags |= (NIU_FLAGS_10G |
 8461			      NIU_FLAGS_FIBER);
 8462		np->mac_xcvr = MAC_XCVR_XPCS;
 8463		break;
 8464
 8465	default:
 8466		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8467		return -EINVAL;
 8468	}
 8469
 8470	val = nr64(ESPC_MAC_ADDR0);
 8471	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8472		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8473	dev->dev_addr[0] = (val >>  0) & 0xff;
 8474	dev->dev_addr[1] = (val >>  8) & 0xff;
 8475	dev->dev_addr[2] = (val >> 16) & 0xff;
 8476	dev->dev_addr[3] = (val >> 24) & 0xff;
 8477
 8478	val = nr64(ESPC_MAC_ADDR1);
 8479	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8480		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8481	dev->dev_addr[4] = (val >>  0) & 0xff;
 8482	dev->dev_addr[5] = (val >>  8) & 0xff;
 8483
 8484	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 8485		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8486			dev->dev_addr);
 8487		return -EINVAL;
 8488	}
 8489
 8490	val8 = dev->dev_addr[5];
 8491	dev->dev_addr[5] += np->port;
 8492	if (dev->dev_addr[5] < val8)
 8493		dev->dev_addr[4]++;
 8494
 8495	val = nr64(ESPC_MOD_STR_LEN);
 8496	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8497		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8498	if (val >= 8 * 4)
 8499		return -EINVAL;
 8500
 8501	for (i = 0; i < val; i += 4) {
 8502		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8503
 8504		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8505		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8506		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8507		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8508	}
 8509	np->vpd.model[val] = '\0';
 8510
 8511	val = nr64(ESPC_BD_MOD_STR_LEN);
 8512	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8513		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8514	if (val >= 4 * 4)
 8515		return -EINVAL;
 8516
 8517	for (i = 0; i < val; i += 4) {
 8518		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8519
 8520		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8521		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8522		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8523		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8524	}
 8525	np->vpd.board_model[val] = '\0';
 8526
 8527	np->vpd.mac_num =
 8528		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8529	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8530		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8531
 8532	return 0;
 8533}
 8534
 8535static int niu_get_and_validate_port(struct niu *np)
 8536{
 8537	struct niu_parent *parent = np->parent;
 8538
 8539	if (np->port <= 1)
 8540		np->flags |= NIU_FLAGS_XMAC;
 8541
 8542	if (!parent->num_ports) {
 8543		if (parent->plat_type == PLAT_TYPE_NIU) {
 8544			parent->num_ports = 2;
 8545		} else {
 8546			parent->num_ports = niu_pci_vpd_get_nports(np);
 8547			if (!parent->num_ports) {
 8548				/* Fall back to SPROM as last resort.
 8549				 * This will fail on most cards.
 8550				 */
 8551				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8552					ESPC_NUM_PORTS_MACS_VAL;
 8553
 8554				/* All of the current probing methods fail on
 8555				 * Maramba on-board parts.
 8556				 */
 8557				if (!parent->num_ports)
 8558					parent->num_ports = 4;
 8559			}
 8560		}
 8561	}
 8562
 8563	if (np->port >= parent->num_ports)
 8564		return -ENODEV;
 8565
 8566	return 0;
 8567}
 8568
 8569static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
 8570		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
 8571{
 8572	u32 id = (dev_id_1 << 16) | dev_id_2;
 8573	u8 idx;
 8574
 8575	if (dev_id_1 < 0 || dev_id_2 < 0)
 8576		return 0;
 8577	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8578		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
 8579		 * test covers the 8706 as well.
 8580		 */
 8581		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8582		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 8583			return 0;
 8584	} else {
 8585		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8586			return 0;
 8587	}
 8588
 8589	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8590		parent->index, id,
 8591		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8592		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8593		phy_port);
 8594
 8595	if (p->cur[type] >= NIU_MAX_PORTS) {
 8596		pr_err("Too many PHY ports\n");
 8597		return -EINVAL;
 8598	}
 8599	idx = p->cur[type];
 8600	p->phy_id[type][idx] = id;
 8601	p->phy_port[type][idx] = phy_port;
 8602	p->cur[type] = idx + 1;
 8603	return 0;
 8604}
 8605
 8606static int port_has_10g(struct phy_probe_info *p, int port)
 8607{
 8608	int i;
 8609
 8610	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8611		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8612			return 1;
 8613	}
 8614	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8615		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8616			return 1;
 8617	}
 8618
 8619	return 0;
 8620}
 8621
 8622static int count_10g_ports(struct phy_probe_info *p, int *lowest)
 8623{
 8624	int port, cnt;
 8625
 8626	cnt = 0;
 8627	*lowest = 32;
 8628	for (port = 8; port < 32; port++) {
 8629		if (port_has_10g(p, port)) {
 8630			if (!cnt)
 8631				*lowest = port;
 8632			cnt++;
 8633		}
 8634	}
 8635
 8636	return cnt;
 8637}
 8638
 8639static int count_1g_ports(struct phy_probe_info *p, int *lowest)
 8640{
 8641	*lowest = 32;
 8642	if (p->cur[PHY_TYPE_MII])
 8643		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8644
 8645	return p->cur[PHY_TYPE_MII];
 8646}
 8647
 8648static void niu_n2_divide_channels(struct niu_parent *parent)
 8649{
 8650	int num_ports = parent->num_ports;
 8651	int i;
 8652
 8653	for (i = 0; i < num_ports; i++) {
 8654		parent->rxchan_per_port[i] = (16 / num_ports);
 8655		parent->txchan_per_port[i] = (16 / num_ports);
 8656
 8657		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8658			parent->index, i,
 8659			parent->rxchan_per_port[i],
 8660			parent->txchan_per_port[i]);
 8661	}
 8662}
 8663
 8664static void niu_divide_channels(struct niu_parent *parent,
 8665				int num_10g, int num_1g)
 8666{
 8667	int num_ports = parent->num_ports;
 8668	int rx_chans_per_10g, rx_chans_per_1g;
 8669	int tx_chans_per_10g, tx_chans_per_1g;
 8670	int i, tot_rx, tot_tx;
 8671
 8672	if (!num_10g || !num_1g) {
 8673		rx_chans_per_10g = rx_chans_per_1g =
 8674			(NIU_NUM_RXCHAN / num_ports);
 8675		tx_chans_per_10g = tx_chans_per_1g =
 8676			(NIU_NUM_TXCHAN / num_ports);
 8677	} else {
 8678		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8679		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8680				    (rx_chans_per_1g * num_1g)) /
 8681			num_10g;
 8682
 8683		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8684		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8685				    (tx_chans_per_1g * num_1g)) /
 8686			num_10g;
 8687	}
 8688
 8689	tot_rx = tot_tx = 0;
 8690	for (i = 0; i < num_ports; i++) {
 8691		int type = phy_decode(parent->port_phy, i);
 8692
 8693		if (type == PORT_TYPE_10G) {
 8694			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8695			parent->txchan_per_port[i] = tx_chans_per_10g;
 8696		} else {
 8697			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8698			parent->txchan_per_port[i] = tx_chans_per_1g;
 8699		}
 8700		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8701			parent->index, i,
 8702			parent->rxchan_per_port[i],
 8703			parent->txchan_per_port[i]);
 8704		tot_rx += parent->rxchan_per_port[i];
 8705		tot_tx += parent->txchan_per_port[i];
 8706	}
 8707
 8708	if (tot_rx > NIU_NUM_RXCHAN) {
 8709		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8710		       parent->index, tot_rx);
 8711		for (i = 0; i < num_ports; i++)
 8712			parent->rxchan_per_port[i] = 1;
 8713	}
 8714	if (tot_tx > NIU_NUM_TXCHAN) {
 8715		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8716		       parent->index, tot_tx);
 8717		for (i = 0; i < num_ports; i++)
 8718			parent->txchan_per_port[i] = 1;
 8719	}
 8720	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8721		pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8722			   parent->index, tot_rx, tot_tx);
 8723	}
 8724}
 8725
 8726static void niu_divide_rdc_groups(struct niu_parent *parent,
 8727				  int num_10g, int num_1g)
 8728{
 8729	int i, num_ports = parent->num_ports;
 8730	int rdc_group, rdc_groups_per_port;
 8731	int rdc_channel_base;
 8732
 8733	rdc_group = 0;
 8734	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8735
 8736	rdc_channel_base = 0;
 8737
 8738	for (i = 0; i < num_ports; i++) {
 8739		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8740		int grp, num_channels = parent->rxchan_per_port[i];
 8741		int this_channel_offset;
 8742
 8743		tp->first_table_num = rdc_group;
 8744		tp->num_tables = rdc_groups_per_port;
 8745		this_channel_offset = 0;
 8746		for (grp = 0; grp < tp->num_tables; grp++) {
 8747			struct rdc_table *rt = &tp->tables[grp];
 8748			int slot;
 8749
 8750			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8751				parent->index, i, tp->first_table_num + grp);
 8752			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8753				rt->rxdma_channel[slot] =
 8754					rdc_channel_base + this_channel_offset;
 8755
 8756				pr_cont("%d ", rt->rxdma_channel[slot]);
 8757
 8758				if (++this_channel_offset == num_channels)
 8759					this_channel_offset = 0;
 8760			}
 8761			pr_cont("]\n");
 8762		}
 8763
 8764		parent->rdc_default[i] = rdc_channel_base;
 8765
 8766		rdc_channel_base += num_channels;
 8767		rdc_group += rdc_groups_per_port;
 8768	}
 8769}
 8770
 8771static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
 8772			       struct phy_probe_info *info)
 8773{
 8774	unsigned long flags;
 8775	int port, err;
 8776
 8777	memset(info, 0, sizeof(*info));
 8778
 8779	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8780	niu_lock_parent(np, flags);
 8781	err = 0;
 8782	for (port = 8; port < 32; port++) {
 8783		int dev_id_1, dev_id_2;
 8784
 8785		dev_id_1 = mdio_read(np, port,
 8786				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8787		dev_id_2 = mdio_read(np, port,
 8788				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8789		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8790				 PHY_TYPE_PMA_PMD);
 8791		if (err)
 8792			break;
 8793		dev_id_1 = mdio_read(np, port,
 8794				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8795		dev_id_2 = mdio_read(np, port,
 8796				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8797		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8798				 PHY_TYPE_PCS);
 8799		if (err)
 8800			break;
 8801		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8802		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8803		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8804				 PHY_TYPE_MII);
 8805		if (err)
 8806			break;
 8807	}
 8808	niu_unlock_parent(np, flags);
 8809
 8810	return err;
 8811}
 8812
 8813static int walk_phys(struct niu *np, struct niu_parent *parent)
 8814{
 8815	struct phy_probe_info *info = &parent->phy_probe_info;
 8816	int lowest_10g, lowest_1g;
 8817	int num_10g, num_1g;
 8818	u32 val;
 8819	int err;
 8820
 8821	num_10g = num_1g = 0;
 8822
 8823	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8824	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8825		num_10g = 0;
 8826		num_1g = 2;
 8827		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8828		parent->num_ports = 4;
 8829		val = (phy_encode(PORT_TYPE_1G, 0) |
 8830		       phy_encode(PORT_TYPE_1G, 1) |
 8831		       phy_encode(PORT_TYPE_1G, 2) |
 8832		       phy_encode(PORT_TYPE_1G, 3));
 8833	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8834		num_10g = 2;
 8835		num_1g = 0;
 8836		parent->num_ports = 2;
 8837		val = (phy_encode(PORT_TYPE_10G, 0) |
 8838		       phy_encode(PORT_TYPE_10G, 1));
 8839	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8840		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8841		/* this is the Monza case */
 8842		if (np->flags & NIU_FLAGS_10G) {
 8843			val = (phy_encode(PORT_TYPE_10G, 0) |
 8844			       phy_encode(PORT_TYPE_10G, 1));
 8845		} else {
 8846			val = (phy_encode(PORT_TYPE_1G, 0) |
 8847			       phy_encode(PORT_TYPE_1G, 1));
 8848		}
 8849	} else {
 8850		err = fill_phy_probe_info(np, parent, info);
 8851		if (err)
 8852			return err;
 8853
 8854		num_10g = count_10g_ports(info, &lowest_10g);
 8855		num_1g = count_1g_ports(info, &lowest_1g);
 8856
 8857		switch ((num_10g << 4) | num_1g) {
 8858		case 0x24:
 8859			if (lowest_1g == 10)
 8860				parent->plat_type = PLAT_TYPE_VF_P0;
 8861			else if (lowest_1g == 26)
 8862				parent->plat_type = PLAT_TYPE_VF_P1;
 8863			else
 8864				goto unknown_vg_1g_port;
 8865
 8866			/* fallthru */
 8867		case 0x22:
 8868			val = (phy_encode(PORT_TYPE_10G, 0) |
 8869			       phy_encode(PORT_TYPE_10G, 1) |
 8870			       phy_encode(PORT_TYPE_1G, 2) |
 8871			       phy_encode(PORT_TYPE_1G, 3));
 8872			break;
 8873
 8874		case 0x20:
 8875			val = (phy_encode(PORT_TYPE_10G, 0) |
 8876			       phy_encode(PORT_TYPE_10G, 1));
 8877			break;
 8878
 8879		case 0x10:
 8880			val = phy_encode(PORT_TYPE_10G, np->port);
 8881			break;
 8882
 8883		case 0x14:
 8884			if (lowest_1g == 10)
 8885				parent->plat_type = PLAT_TYPE_VF_P0;
 8886			else if (lowest_1g == 26)
 8887				parent->plat_type = PLAT_TYPE_VF_P1;
 8888			else
 8889				goto unknown_vg_1g_port;
 8890
 8891			/* fallthru */
 8892		case 0x13:
 8893			if ((lowest_10g & 0x7) == 0)
 8894				val = (phy_encode(PORT_TYPE_10G, 0) |
 8895				       phy_encode(PORT_TYPE_1G, 1) |
 8896				       phy_encode(PORT_TYPE_1G, 2) |
 8897				       phy_encode(PORT_TYPE_1G, 3));
 8898			else
 8899				val = (phy_encode(PORT_TYPE_1G, 0) |
 8900				       phy_encode(PORT_TYPE_10G, 1) |
 8901				       phy_encode(PORT_TYPE_1G, 2) |
 8902				       phy_encode(PORT_TYPE_1G, 3));
 8903			break;
 8904
 8905		case 0x04:
 8906			if (lowest_1g == 10)
 8907				parent->plat_type = PLAT_TYPE_VF_P0;
 8908			else if (lowest_1g == 26)
 8909				parent->plat_type = PLAT_TYPE_VF_P1;
 8910			else
 8911				goto unknown_vg_1g_port;
 8912
 8913			val = (phy_encode(PORT_TYPE_1G, 0) |
 8914			       phy_encode(PORT_TYPE_1G, 1) |
 8915			       phy_encode(PORT_TYPE_1G, 2) |
 8916			       phy_encode(PORT_TYPE_1G, 3));
 8917			break;
 8918
 8919		default:
 8920			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8921			       num_10g, num_1g);
 8922			return -EINVAL;
 8923		}
 8924	}
 8925
 8926	parent->port_phy = val;
 8927
 8928	if (parent->plat_type == PLAT_TYPE_NIU)
 8929		niu_n2_divide_channels(parent);
 8930	else
 8931		niu_divide_channels(parent, num_10g, num_1g);
 8932
 8933	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8934
 8935	return 0;
 8936
 8937unknown_vg_1g_port:
 8938	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8939	return -EINVAL;
 8940}
 8941
 8942static int niu_probe_ports(struct niu *np)
 8943{
 8944	struct niu_parent *parent = np->parent;
 8945	int err, i;
 8946
 8947	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8948		err = walk_phys(np, parent);
 8949		if (err)
 8950			return err;
 8951
 8952		niu_set_ldg_timer_res(np, 2);
 8953		for (i = 0; i <= LDN_MAX; i++)
 8954			niu_ldn_irq_enable(np, i, 0);
 8955	}
 8956
 8957	if (parent->port_phy == PORT_PHY_INVALID)
 8958		return -EINVAL;
 8959
 8960	return 0;
 8961}
 8962
 8963static int niu_classifier_swstate_init(struct niu *np)
 8964{
 8965	struct niu_classifier *cp = &np->clas;
 8966
 8967	cp->tcam_top = (u16) np->port;
 8968	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8969	cp->h1_init = 0xffffffff;
 8970	cp->h2_init = 0xffff;
 8971
 8972	return fflp_early_init(np);
 8973}
 8974
 8975static void niu_link_config_init(struct niu *np)
 8976{
 8977	struct niu_link_config *lp = &np->link_config;
 8978
 8979	lp->advertising = (ADVERTISED_10baseT_Half |
 8980			   ADVERTISED_10baseT_Full |
 8981			   ADVERTISED_100baseT_Half |
 8982			   ADVERTISED_100baseT_Full |
 8983			   ADVERTISED_1000baseT_Half |
 8984			   ADVERTISED_1000baseT_Full |
 8985			   ADVERTISED_10000baseT_Full |
 8986			   ADVERTISED_Autoneg);
 8987	lp->speed = lp->active_speed = SPEED_INVALID;
 8988	lp->duplex = DUPLEX_FULL;
 8989	lp->active_duplex = DUPLEX_INVALID;
 8990	lp->autoneg = 1;
 8991#if 0
 8992	lp->loopback_mode = LOOPBACK_MAC;
 8993	lp->active_speed = SPEED_10000;
 8994	lp->active_duplex = DUPLEX_FULL;
 8995#else
 8996	lp->loopback_mode = LOOPBACK_DISABLED;
 8997#endif
 8998}
 8999
 9000static int niu_init_mac_ipp_pcs_base(struct niu *np)
 9001{
 9002	switch (np->port) {
 9003	case 0:
 9004		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 9005		np->ipp_off  = 0x00000;
 9006		np->pcs_off  = 0x04000;
 9007		np->xpcs_off = 0x02000;
 9008		break;
 9009
 9010	case 1:
 9011		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 9012		np->ipp_off  = 0x08000;
 9013		np->pcs_off  = 0x0a000;
 9014		np->xpcs_off = 0x08000;
 9015		break;
 9016
 9017	case 2:
 9018		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 9019		np->ipp_off  = 0x04000;
 9020		np->pcs_off  = 0x0e000;
 9021		np->xpcs_off = ~0UL;
 9022		break;
 9023
 9024	case 3:
 9025		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 9026		np->ipp_off  = 0x0c000;
 9027		np->pcs_off  = 0x12000;
 9028		np->xpcs_off = ~0UL;
 9029		break;
 9030
 9031	default:
 9032		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9033		return -EINVAL;
 9034	}
 9035
 9036	return 0;
 9037}
 9038
 9039static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9040{
 9041	struct msix_entry msi_vec[NIU_NUM_LDG];
 9042	struct niu_parent *parent = np->parent;
 9043	struct pci_dev *pdev = np->pdev;
 9044	int i, num_irqs;
 9045	u8 first_ldg;
 9046
 9047	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9048	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9049		ldg_num_map[i] = first_ldg + i;
 9050
 9051	num_irqs = (parent->rxchan_per_port[np->port] +
 9052		    parent->txchan_per_port[np->port] +
 9053		    (np->port == 0 ? 3 : 1));
 9054	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9055
 9056	for (i = 0; i < num_irqs; i++) {
 9057		msi_vec[i].vector = 0;
 9058		msi_vec[i].entry = i;
 9059	}
 9060
 9061	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
 9062	if (num_irqs < 0) {
 9063		np->flags &= ~NIU_FLAGS_MSIX;
 9064		return;
 9065	}
 9066
 9067	np->flags |= NIU_FLAGS_MSIX;
 9068	for (i = 0; i < num_irqs; i++)
 9069		np->ldg[i].irq = msi_vec[i].vector;
 9070	np->num_ldg = num_irqs;
 9071}
 9072
 9073static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9074{
 9075#ifdef CONFIG_SPARC64
 9076	struct platform_device *op = np->op;
 9077	const u32 *int_prop;
 9078	int i;
 9079
 9080	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9081	if (!int_prop)
 9082		return -ENODEV;
 9083
 9084	for (i = 0; i < op->archdata.num_irqs; i++) {
 9085		ldg_num_map[i] = int_prop[i];
 9086		np->ldg[i].irq = op->archdata.irqs[i];
 9087	}
 9088
 9089	np->num_ldg = op->archdata.num_irqs;
 9090
 9091	return 0;
 9092#else
 9093	return -EINVAL;
 9094#endif
 9095}
 9096
 9097static int niu_ldg_init(struct niu *np)
 9098{
 9099	struct niu_parent *parent = np->parent;
 9100	u8 ldg_num_map[NIU_NUM_LDG];
 9101	int first_chan, num_chan;
 9102	int i, err, ldg_rotor;
 9103	u8 port;
 9104
 9105	np->num_ldg = 1;
 9106	np->ldg[0].irq = np->dev->irq;
 9107	if (parent->plat_type == PLAT_TYPE_NIU) {
 9108		err = niu_n2_irq_init(np, ldg_num_map);
 9109		if (err)
 9110			return err;
 9111	} else
 9112		niu_try_msix(np, ldg_num_map);
 9113
 9114	port = np->port;
 9115	for (i = 0; i < np->num_ldg; i++) {
 9116		struct niu_ldg *lp = &np->ldg[i];
 9117
 9118		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9119
 9120		lp->np = np;
 9121		lp->ldg_num = ldg_num_map[i];
 9122		lp->timer = 2; /* XXX */
 9123
 9124		/* On N2 NIU the firmware has setup the SID mappings so they go
 9125		 * to the correct values that will route the LDG to the proper
 9126		 * interrupt in the NCU interrupt table.
 9127		 */
 9128		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9129			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9130			if (err)
 9131				return err;
 9132		}
 9133	}
 9134
 9135	/* We adopt the LDG assignment ordering used by the N2 NIU
 9136	 * 'interrupt' properties because that simplifies a lot of
 9137	 * things.  This ordering is:
 9138	 *
 9139	 *	MAC
 9140	 *	MIF	(if port zero)
 9141	 *	SYSERR	(if port zero)
 9142	 *	RX channels
 9143	 *	TX channels
 9144	 */
 9145
 9146	ldg_rotor = 0;
 9147
 9148	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9149				  LDN_MAC(port));
 9150	if (err)
 9151		return err;
 9152
 9153	ldg_rotor++;
 9154	if (ldg_rotor == np->num_ldg)
 9155		ldg_rotor = 0;
 9156
 9157	if (port == 0) {
 9158		err = niu_ldg_assign_ldn(np, parent,
 9159					 ldg_num_map[ldg_rotor],
 9160					 LDN_MIF);
 9161		if (err)
 9162			return err;
 9163
 9164		ldg_rotor++;
 9165		if (ldg_rotor == np->num_ldg)
 9166			ldg_rotor = 0;
 9167
 9168		err = niu_ldg_assign_ldn(np, parent,
 9169					 ldg_num_map[ldg_rotor],
 9170					 LDN_DEVICE_ERROR);
 9171		if (err)
 9172			return err;
 9173
 9174		ldg_rotor++;
 9175		if (ldg_rotor == np->num_ldg)
 9176			ldg_rotor = 0;
 9177
 9178	}
 9179
 9180	first_chan = 0;
 9181	for (i = 0; i < port; i++)
 9182		first_chan += parent->rxchan_per_port[i];
 9183	num_chan = parent->rxchan_per_port[port];
 9184
 9185	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9186		err = niu_ldg_assign_ldn(np, parent,
 9187					 ldg_num_map[ldg_rotor],
 9188					 LDN_RXDMA(i));
 9189		if (err)
 9190			return err;
 9191		ldg_rotor++;
 9192		if (ldg_rotor == np->num_ldg)
 9193			ldg_rotor = 0;
 9194	}
 9195
 9196	first_chan = 0;
 9197	for (i = 0; i < port; i++)
 9198		first_chan += parent->txchan_per_port[i];
 9199	num_chan = parent->txchan_per_port[port];
 9200	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9201		err = niu_ldg_assign_ldn(np, parent,
 9202					 ldg_num_map[ldg_rotor],
 9203					 LDN_TXDMA(i));
 9204		if (err)
 9205			return err;
 9206		ldg_rotor++;
 9207		if (ldg_rotor == np->num_ldg)
 9208			ldg_rotor = 0;
 9209	}
 9210
 9211	return 0;
 9212}
 9213
 9214static void niu_ldg_free(struct niu *np)
 9215{
 9216	if (np->flags & NIU_FLAGS_MSIX)
 9217		pci_disable_msix(np->pdev);
 9218}
 9219
 9220static int niu_get_of_props(struct niu *np)
 9221{
 9222#ifdef CONFIG_SPARC64
 9223	struct net_device *dev = np->dev;
 9224	struct device_node *dp;
 9225	const char *phy_type;
 9226	const u8 *mac_addr;
 9227	const char *model;
 9228	int prop_len;
 9229
 9230	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9231		dp = np->op->dev.of_node;
 9232	else
 9233		dp = pci_device_to_OF_node(np->pdev);
 9234
 9235	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9236	if (!phy_type) {
 9237		netdev_err(dev, "%s: OF node lacks phy-type property\n",
 9238			   dp->full_name);
 9239		return -EINVAL;
 9240	}
 9241
 9242	if (!strcmp(phy_type, "none"))
 9243		return -ENODEV;
 9244
 9245	strcpy(np->vpd.phy_type, phy_type);
 9246
 9247	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9248		netdev_err(dev, "%s: Illegal phy string [%s]\n",
 9249			   dp->full_name, np->vpd.phy_type);
 9250		return -EINVAL;
 9251	}
 9252
 9253	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9254	if (!mac_addr) {
 9255		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
 9256			   dp->full_name);
 9257		return -EINVAL;
 9258	}
 9259	if (prop_len != dev->addr_len) {
 9260		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
 9261			   dp->full_name, prop_len);
 9262	}
 9263	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
 9264	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 9265		netdev_err(dev, "%s: OF MAC address is invalid\n",
 9266			   dp->full_name);
 9267		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
 9268		return -EINVAL;
 9269	}
 9270
 9271	model = of_get_property(dp, "model", &prop_len);
 9272
 9273	if (model)
 9274		strcpy(np->vpd.model, model);
 9275
 9276	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9277		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9278			NIU_FLAGS_HOTPLUG_PHY);
 9279	}
 9280
 9281	return 0;
 9282#else
 9283	return -EINVAL;
 9284#endif
 9285}
 9286
 9287static int niu_get_invariants(struct niu *np)
 9288{
 9289	int err, have_props;
 9290	u32 offset;
 9291
 9292	err = niu_get_of_props(np);
 9293	if (err == -ENODEV)
 9294		return err;
 9295
 9296	have_props = !err;
 9297
 9298	err = niu_init_mac_ipp_pcs_base(np);
 9299	if (err)
 9300		return err;
 9301
 9302	if (have_props) {
 9303		err = niu_get_and_validate_port(np);
 9304		if (err)
 9305			return err;
 9306
 9307	} else  {
 9308		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9309			return -EINVAL;
 9310
 9311		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9312		offset = niu_pci_vpd_offset(np);
 9313		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9314			     "%s() VPD offset [%08x]\n", __func__, offset);
 9315		if (offset)
 9316			niu_pci_vpd_fetch(np, offset);
 9317		nw64(ESPC_PIO_EN, 0);
 9318
 9319		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9320			niu_pci_vpd_validate(np);
 9321			err = niu_get_and_validate_port(np);
 9322			if (err)
 9323				return err;
 9324		}
 9325
 9326		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9327			err = niu_get_and_validate_port(np);
 9328			if (err)
 9329				return err;
 9330			err = niu_pci_probe_sprom(np);
 9331			if (err)
 9332				return err;
 9333		}
 9334	}
 9335
 9336	err = niu_probe_ports(np);
 9337	if (err)
 9338		return err;
 9339
 9340	niu_ldg_init(np);
 9341
 9342	niu_classifier_swstate_init(np);
 9343	niu_link_config_init(np);
 9344
 9345	err = niu_determine_phy_disposition(np);
 9346	if (!err)
 9347		err = niu_init_link(np);
 9348
 9349	return err;
 9350}
 9351
 9352static LIST_HEAD(niu_parent_list);
 9353static DEFINE_MUTEX(niu_parent_lock);
 9354static int niu_parent_index;
 9355
 9356static ssize_t show_port_phy(struct device *dev,
 9357			     struct device_attribute *attr, char *buf)
 9358{
 9359	struct platform_device *plat_dev = to_platform_device(dev);
 9360	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9361	u32 port_phy = p->port_phy;
 9362	char *orig_buf = buf;
 9363	int i;
 9364
 9365	if (port_phy == PORT_PHY_UNKNOWN ||
 9366	    port_phy == PORT_PHY_INVALID)
 9367		return 0;
 9368
 9369	for (i = 0; i < p->num_ports; i++) {
 9370		const char *type_str;
 9371		int type;
 9372
 9373		type = phy_decode(port_phy, i);
 9374		if (type == PORT_TYPE_10G)
 9375			type_str = "10G";
 9376		else
 9377			type_str = "1G";
 9378		buf += sprintf(buf,
 9379			       (i == 0) ? "%s" : " %s",
 9380			       type_str);
 9381	}
 9382	buf += sprintf(buf, "\n");
 9383	return buf - orig_buf;
 9384}
 9385
 9386static ssize_t show_plat_type(struct device *dev,
 9387			      struct device_attribute *attr, char *buf)
 9388{
 9389	struct platform_device *plat_dev = to_platform_device(dev);
 9390	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9391	const char *type_str;
 9392
 9393	switch (p->plat_type) {
 9394	case PLAT_TYPE_ATLAS:
 9395		type_str = "atlas";
 9396		break;
 9397	case PLAT_TYPE_NIU:
 9398		type_str = "niu";
 9399		break;
 9400	case PLAT_TYPE_VF_P0:
 9401		type_str = "vf_p0";
 9402		break;
 9403	case PLAT_TYPE_VF_P1:
 9404		type_str = "vf_p1";
 9405		break;
 9406	default:
 9407		type_str = "unknown";
 9408		break;
 9409	}
 9410
 9411	return sprintf(buf, "%s\n", type_str);
 9412}
 9413
 9414static ssize_t __show_chan_per_port(struct device *dev,
 9415				    struct device_attribute *attr, char *buf,
 9416				    int rx)
 9417{
 9418	struct platform_device *plat_dev = to_platform_device(dev);
 9419	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9420	char *orig_buf = buf;
 9421	u8 *arr;
 9422	int i;
 9423
 9424	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9425
 9426	for (i = 0; i < p->num_ports; i++) {
 9427		buf += sprintf(buf,
 9428			       (i == 0) ? "%d" : " %d",
 9429			       arr[i]);
 9430	}
 9431	buf += sprintf(buf, "\n");
 9432
 9433	return buf - orig_buf;
 9434}
 9435
 9436static ssize_t show_rxchan_per_port(struct device *dev,
 9437				    struct device_attribute *attr, char *buf)
 9438{
 9439	return __show_chan_per_port(dev, attr, buf, 1);
 9440}
 9441
 9442static ssize_t show_txchan_per_port(struct device *dev,
 9443				    struct device_attribute *attr, char *buf)
 9444{
 9445	return __show_chan_per_port(dev, attr, buf, 1);
 9446}
 9447
 9448static ssize_t show_num_ports(struct device *dev,
 9449			      struct device_attribute *attr, char *buf)
 9450{
 9451	struct platform_device *plat_dev = to_platform_device(dev);
 9452	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9453
 9454	return sprintf(buf, "%d\n", p->num_ports);
 9455}
 9456
 9457static struct device_attribute niu_parent_attributes[] = {
 9458	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
 9459	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
 9460	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
 9461	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
 9462	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
 9463	{}
 9464};
 9465
 9466static struct niu_parent *niu_new_parent(struct niu *np,
 9467					 union niu_parent_id *id, u8 ptype)
 9468{
 9469	struct platform_device *plat_dev;
 9470	struct niu_parent *p;
 9471	int i;
 9472
 9473	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9474						   NULL, 0);
 9475	if (IS_ERR(plat_dev))
 9476		return NULL;
 9477
 9478	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
 9479		int err = device_create_file(&plat_dev->dev,
 9480					     &niu_parent_attributes[i]);
 9481		if (err)
 9482			goto fail_unregister;
 9483	}
 9484
 9485	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9486	if (!p)
 9487		goto fail_unregister;
 9488
 9489	p->index = niu_parent_index++;
 9490
 9491	plat_dev->dev.platform_data = p;
 9492	p->plat_dev = plat_dev;
 9493
 9494	memcpy(&p->id, id, sizeof(*id));
 9495	p->plat_type = ptype;
 9496	INIT_LIST_HEAD(&p->list);
 9497	atomic_set(&p->refcnt, 0);
 9498	list_add(&p->list, &niu_parent_list);
 9499	spin_lock_init(&p->lock);
 9500
 9501	p->rxdma_clock_divider = 7500;
 9502
 9503	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9504	if (p->plat_type == PLAT_TYPE_NIU)
 9505		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9506
 9507	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9508		int index = i - CLASS_CODE_USER_PROG1;
 9509
 9510		p->tcam_key[index] = TCAM_KEY_TSEL;
 9511		p->flow_key[index] = (FLOW_KEY_IPSA |
 9512				      FLOW_KEY_IPDA |
 9513				      FLOW_KEY_PROTO |
 9514				      (FLOW_KEY_L4_BYTE12 <<
 9515				       FLOW_KEY_L4_0_SHIFT) |
 9516				      (FLOW_KEY_L4_BYTE12 <<
 9517				       FLOW_KEY_L4_1_SHIFT));
 9518	}
 9519
 9520	for (i = 0; i < LDN_MAX + 1; i++)
 9521		p->ldg_map[i] = LDG_INVALID;
 9522
 9523	return p;
 9524
 9525fail_unregister:
 9526	platform_device_unregister(plat_dev);
 9527	return NULL;
 9528}
 9529
 9530static struct niu_parent *niu_get_parent(struct niu *np,
 9531					 union niu_parent_id *id, u8 ptype)
 9532{
 9533	struct niu_parent *p, *tmp;
 9534	int port = np->port;
 9535
 9536	mutex_lock(&niu_parent_lock);
 9537	p = NULL;
 9538	list_for_each_entry(tmp, &niu_parent_list, list) {
 9539		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9540			p = tmp;
 9541			break;
 9542		}
 9543	}
 9544	if (!p)
 9545		p = niu_new_parent(np, id, ptype);
 9546
 9547	if (p) {
 9548		char port_name[6];
 9549		int err;
 9550
 9551		sprintf(port_name, "port%d", port);
 9552		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9553					&np->device->kobj,
 9554					port_name);
 9555		if (!err) {
 9556			p->ports[port] = np;
 9557			atomic_inc(&p->refcnt);
 9558		}
 9559	}
 9560	mutex_unlock(&niu_parent_lock);
 9561
 9562	return p;
 9563}
 9564
 9565static void niu_put_parent(struct niu *np)
 9566{
 9567	struct niu_parent *p = np->parent;
 9568	u8 port = np->port;
 9569	char port_name[6];
 9570
 9571	BUG_ON(!p || p->ports[port] != np);
 9572
 9573	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9574		     "%s() port[%u]\n", __func__, port);
 9575
 9576	sprintf(port_name, "port%d", port);
 9577
 9578	mutex_lock(&niu_parent_lock);
 9579
 9580	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9581
 9582	p->ports[port] = NULL;
 9583	np->parent = NULL;
 9584
 9585	if (atomic_dec_and_test(&p->refcnt)) {
 9586		list_del(&p->list);
 9587		platform_device_unregister(p->plat_dev);
 9588	}
 9589
 9590	mutex_unlock(&niu_parent_lock);
 9591}
 9592
 9593static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9594				    u64 *handle, gfp_t flag)
 9595{
 9596	dma_addr_t dh;
 9597	void *ret;
 9598
 9599	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9600	if (ret)
 9601		*handle = dh;
 9602	return ret;
 9603}
 9604
 9605static void niu_pci_free_coherent(struct device *dev, size_t size,
 9606				  void *cpu_addr, u64 handle)
 9607{
 9608	dma_free_coherent(dev, size, cpu_addr, handle);
 9609}
 9610
 9611static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9612			    unsigned long offset, size_t size,
 9613			    enum dma_data_direction direction)
 9614{
 9615	return dma_map_page(dev, page, offset, size, direction);
 9616}
 9617
 9618static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9619			       size_t size, enum dma_data_direction direction)
 9620{
 9621	dma_unmap_page(dev, dma_address, size, direction);
 9622}
 9623
 9624static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9625			      size_t size,
 9626			      enum dma_data_direction direction)
 9627{
 9628	return dma_map_single(dev, cpu_addr, size, direction);
 9629}
 9630
 9631static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9632				 size_t size,
 9633				 enum dma_data_direction direction)
 9634{
 9635	dma_unmap_single(dev, dma_address, size, direction);
 9636}
 9637
 9638static const struct niu_ops niu_pci_ops = {
 9639	.alloc_coherent	= niu_pci_alloc_coherent,
 9640	.free_coherent	= niu_pci_free_coherent,
 9641	.map_page	= niu_pci_map_page,
 9642	.unmap_page	= niu_pci_unmap_page,
 9643	.map_single	= niu_pci_map_single,
 9644	.unmap_single	= niu_pci_unmap_single,
 9645};
 9646
 9647static void niu_driver_version(void)
 9648{
 9649	static int niu_version_printed;
 9650
 9651	if (niu_version_printed++ == 0)
 9652		pr_info("%s", version);
 9653}
 9654
 9655static struct net_device *niu_alloc_and_init(struct device *gen_dev,
 9656					     struct pci_dev *pdev,
 9657					     struct platform_device *op,
 9658					     const struct niu_ops *ops, u8 port)
 9659{
 9660	struct net_device *dev;
 9661	struct niu *np;
 9662
 9663	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9664	if (!dev)
 9665		return NULL;
 9666
 9667	SET_NETDEV_DEV(dev, gen_dev);
 9668
 9669	np = netdev_priv(dev);
 9670	np->dev = dev;
 9671	np->pdev = pdev;
 9672	np->op = op;
 9673	np->device = gen_dev;
 9674	np->ops = ops;
 9675
 9676	np->msg_enable = niu_debug;
 9677
 9678	spin_lock_init(&np->lock);
 9679	INIT_WORK(&np->reset_task, niu_reset_task);
 9680
 9681	np->port = port;
 9682
 9683	return dev;
 9684}
 9685
 9686static const struct net_device_ops niu_netdev_ops = {
 9687	.ndo_open		= niu_open,
 9688	.ndo_stop		= niu_close,
 9689	.ndo_start_xmit		= niu_start_xmit,
 9690	.ndo_get_stats64	= niu_get_stats,
 9691	.ndo_set_rx_mode	= niu_set_rx_mode,
 9692	.ndo_validate_addr	= eth_validate_addr,
 9693	.ndo_set_mac_address	= niu_set_mac_addr,
 9694	.ndo_do_ioctl		= niu_ioctl,
 9695	.ndo_tx_timeout		= niu_tx_timeout,
 9696	.ndo_change_mtu		= niu_change_mtu,
 9697};
 9698
 9699static void niu_assign_netdev_ops(struct net_device *dev)
 9700{
 9701	dev->netdev_ops = &niu_netdev_ops;
 9702	dev->ethtool_ops = &niu_ethtool_ops;
 9703	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9704}
 9705
 9706static void niu_device_announce(struct niu *np)
 9707{
 9708	struct net_device *dev = np->dev;
 9709
 9710	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9711
 9712	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9713		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9714				dev->name,
 9715				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9716				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9717				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9718				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9719				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9720				np->vpd.phy_type);
 9721	} else {
 9722		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9723				dev->name,
 9724				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9725				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9726				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9727				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9728				  "COPPER")),
 9729				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9730				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9731				np->vpd.phy_type);
 9732	}
 9733}
 9734
 9735static void niu_set_basic_features(struct net_device *dev)
 9736{
 9737	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9738	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9739}
 9740
 9741static int niu_pci_init_one(struct pci_dev *pdev,
 9742			    const struct pci_device_id *ent)
 9743{
 9744	union niu_parent_id parent_id;
 9745	struct net_device *dev;
 9746	struct niu *np;
 9747	int err;
 9748	u64 dma_mask;
 9749
 9750	niu_driver_version();
 9751
 9752	err = pci_enable_device(pdev);
 9753	if (err) {
 9754		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9755		return err;
 9756	}
 9757
 9758	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9759	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9760		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9761		err = -ENODEV;
 9762		goto err_out_disable_pdev;
 9763	}
 9764
 9765	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9766	if (err) {
 9767		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9768		goto err_out_disable_pdev;
 9769	}
 9770
 9771	if (!pci_is_pcie(pdev)) {
 9772		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9773		err = -ENODEV;
 9774		goto err_out_free_res;
 9775	}
 9776
 9777	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9778				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9779	if (!dev) {
 9780		err = -ENOMEM;
 9781		goto err_out_free_res;
 9782	}
 9783	np = netdev_priv(dev);
 9784
 9785	memset(&parent_id, 0, sizeof(parent_id));
 9786	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9787	parent_id.pci.bus = pdev->bus->number;
 9788	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9789
 9790	np->parent = niu_get_parent(np, &parent_id,
 9791				    PLAT_TYPE_ATLAS);
 9792	if (!np->parent) {
 9793		err = -ENOMEM;
 9794		goto err_out_free_dev;
 9795	}
 9796
 9797	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 9798		PCI_EXP_DEVCTL_NOSNOOP_EN,
 9799		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
 9800		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
 9801		PCI_EXP_DEVCTL_RELAX_EN);
 9802
 9803	dma_mask = DMA_BIT_MASK(44);
 9804	err = pci_set_dma_mask(pdev, dma_mask);
 9805	if (!err) {
 9806		dev->features |= NETIF_F_HIGHDMA;
 9807		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9808		if (err) {
 9809			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9810			goto err_out_release_parent;
 9811		}
 9812	}
 9813	if (err) {
 9814		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9815		if (err) {
 9816			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9817			goto err_out_release_parent;
 9818		}
 9819	}
 9820
 9821	niu_set_basic_features(dev);
 9822
 9823	dev->priv_flags |= IFF_UNICAST_FLT;
 9824
 9825	np->regs = pci_ioremap_bar(pdev, 0);
 9826	if (!np->regs) {
 9827		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9828		err = -ENOMEM;
 9829		goto err_out_release_parent;
 9830	}
 9831
 9832	pci_set_master(pdev);
 9833	pci_save_state(pdev);
 9834
 9835	dev->irq = pdev->irq;
 9836
 9837	niu_assign_netdev_ops(dev);
 9838
 9839	err = niu_get_invariants(np);
 9840	if (err) {
 9841		if (err != -ENODEV)
 9842			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9843		goto err_out_iounmap;
 9844	}
 9845
 9846	err = register_netdev(dev);
 9847	if (err) {
 9848		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9849		goto err_out_iounmap;
 9850	}
 9851
 9852	pci_set_drvdata(pdev, dev);
 9853
 9854	niu_device_announce(np);
 9855
 9856	return 0;
 9857
 9858err_out_iounmap:
 9859	if (np->regs) {
 9860		iounmap(np->regs);
 9861		np->regs = NULL;
 9862	}
 9863
 9864err_out_release_parent:
 9865	niu_put_parent(np);
 9866
 9867err_out_free_dev:
 9868	free_netdev(dev);
 9869
 9870err_out_free_res:
 9871	pci_release_regions(pdev);
 9872
 9873err_out_disable_pdev:
 9874	pci_disable_device(pdev);
 9875
 9876	return err;
 9877}
 9878
 9879static void niu_pci_remove_one(struct pci_dev *pdev)
 9880{
 9881	struct net_device *dev = pci_get_drvdata(pdev);
 9882
 9883	if (dev) {
 9884		struct niu *np = netdev_priv(dev);
 9885
 9886		unregister_netdev(dev);
 9887		if (np->regs) {
 9888			iounmap(np->regs);
 9889			np->regs = NULL;
 9890		}
 9891
 9892		niu_ldg_free(np);
 9893
 9894		niu_put_parent(np);
 9895
 9896		free_netdev(dev);
 9897		pci_release_regions(pdev);
 9898		pci_disable_device(pdev);
 9899	}
 9900}
 9901
 9902static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9903{
 9904	struct net_device *dev = pci_get_drvdata(pdev);
 9905	struct niu *np = netdev_priv(dev);
 9906	unsigned long flags;
 9907
 9908	if (!netif_running(dev))
 9909		return 0;
 9910
 9911	flush_work(&np->reset_task);
 9912	niu_netif_stop(np);
 9913
 9914	del_timer_sync(&np->timer);
 9915
 9916	spin_lock_irqsave(&np->lock, flags);
 9917	niu_enable_interrupts(np, 0);
 9918	spin_unlock_irqrestore(&np->lock, flags);
 9919
 9920	netif_device_detach(dev);
 9921
 9922	spin_lock_irqsave(&np->lock, flags);
 9923	niu_stop_hw(np);
 9924	spin_unlock_irqrestore(&np->lock, flags);
 9925
 9926	pci_save_state(pdev);
 9927
 9928	return 0;
 9929}
 9930
 9931static int niu_resume(struct pci_dev *pdev)
 9932{
 9933	struct net_device *dev = pci_get_drvdata(pdev);
 9934	struct niu *np = netdev_priv(dev);
 9935	unsigned long flags;
 9936	int err;
 9937
 9938	if (!netif_running(dev))
 9939		return 0;
 9940
 9941	pci_restore_state(pdev);
 9942
 9943	netif_device_attach(dev);
 9944
 9945	spin_lock_irqsave(&np->lock, flags);
 9946
 9947	err = niu_init_hw(np);
 9948	if (!err) {
 9949		np->timer.expires = jiffies + HZ;
 9950		add_timer(&np->timer);
 9951		niu_netif_start(np);
 9952	}
 9953
 9954	spin_unlock_irqrestore(&np->lock, flags);
 9955
 9956	return err;
 9957}
 9958
 9959static struct pci_driver niu_pci_driver = {
 9960	.name		= DRV_MODULE_NAME,
 9961	.id_table	= niu_pci_tbl,
 9962	.probe		= niu_pci_init_one,
 9963	.remove		= niu_pci_remove_one,
 9964	.suspend	= niu_suspend,
 9965	.resume		= niu_resume,
 9966};
 9967
 9968#ifdef CONFIG_SPARC64
 9969static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
 9970				     u64 *dma_addr, gfp_t flag)
 9971{
 9972	unsigned long order = get_order(size);
 9973	unsigned long page = __get_free_pages(flag, order);
 9974
 9975	if (page == 0UL)
 9976		return NULL;
 9977	memset((char *)page, 0, PAGE_SIZE << order);
 9978	*dma_addr = __pa(page);
 9979
 9980	return (void *) page;
 9981}
 9982
 9983static void niu_phys_free_coherent(struct device *dev, size_t size,
 9984				   void *cpu_addr, u64 handle)
 9985{
 9986	unsigned long order = get_order(size);
 9987
 9988	free_pages((unsigned long) cpu_addr, order);
 9989}
 9990
 9991static u64 niu_phys_map_page(struct device *dev, struct page *page,
 9992			     unsigned long offset, size_t size,
 9993			     enum dma_data_direction direction)
 9994{
 9995	return page_to_phys(page) + offset;
 9996}
 9997
 9998static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
 9999				size_t size, enum dma_data_direction direction)
10000{
10001	/* Nothing to do.  */
10002}
10003
10004static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
10005			       size_t size,
10006			       enum dma_data_direction direction)
10007{
10008	return __pa(cpu_addr);
10009}
10010
10011static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10012				  size_t size,
10013				  enum dma_data_direction direction)
10014{
10015	/* Nothing to do.  */
10016}
10017
10018static const struct niu_ops niu_phys_ops = {
10019	.alloc_coherent	= niu_phys_alloc_coherent,
10020	.free_coherent	= niu_phys_free_coherent,
10021	.map_page	= niu_phys_map_page,
10022	.unmap_page	= niu_phys_unmap_page,
10023	.map_single	= niu_phys_map_single,
10024	.unmap_single	= niu_phys_unmap_single,
10025};
10026
10027static int niu_of_probe(struct platform_device *op)
10028{
10029	union niu_parent_id parent_id;
10030	struct net_device *dev;
10031	struct niu *np;
10032	const u32 *reg;
10033	int err;
10034
10035	niu_driver_version();
10036
10037	reg = of_get_property(op->dev.of_node, "reg", NULL);
10038	if (!reg) {
10039		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10040			op->dev.of_node->full_name);
10041		return -ENODEV;
10042	}
10043
10044	dev = niu_alloc_and_init(&op->dev, NULL, op,
10045				 &niu_phys_ops, reg[0] & 0x1);
10046	if (!dev) {
10047		err = -ENOMEM;
10048		goto err_out;
10049	}
10050	np = netdev_priv(dev);
10051
10052	memset(&parent_id, 0, sizeof(parent_id));
10053	parent_id.of = of_get_parent(op->dev.of_node);
10054
10055	np->parent = niu_get_parent(np, &parent_id,
10056				    PLAT_TYPE_NIU);
10057	if (!np->parent) {
10058		err = -ENOMEM;
10059		goto err_out_free_dev;
10060	}
10061
10062	niu_set_basic_features(dev);
10063
10064	np->regs = of_ioremap(&op->resource[1], 0,
10065			      resource_size(&op->resource[1]),
10066			      "niu regs");
10067	if (!np->regs) {
10068		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10069		err = -ENOMEM;
10070		goto err_out_release_parent;
10071	}
10072
10073	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10074				    resource_size(&op->resource[2]),
10075				    "niu vregs-1");
10076	if (!np->vir_regs_1) {
10077		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10078		err = -ENOMEM;
10079		goto err_out_iounmap;
10080	}
10081
10082	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10083				    resource_size(&op->resource[3]),
10084				    "niu vregs-2");
10085	if (!np->vir_regs_2) {
10086		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10087		err = -ENOMEM;
10088		goto err_out_iounmap;
10089	}
10090
10091	niu_assign_netdev_ops(dev);
10092
10093	err = niu_get_invariants(np);
10094	if (err) {
10095		if (err != -ENODEV)
10096			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10097		goto err_out_iounmap;
10098	}
10099
10100	err = register_netdev(dev);
10101	if (err) {
10102		dev_err(&op->dev, "Cannot register net device, aborting\n");
10103		goto err_out_iounmap;
10104	}
10105
10106	platform_set_drvdata(op, dev);
10107
10108	niu_device_announce(np);
10109
10110	return 0;
10111
10112err_out_iounmap:
10113	if (np->vir_regs_1) {
10114		of_iounmap(&op->resource[2], np->vir_regs_1,
10115			   resource_size(&op->resource[2]));
10116		np->vir_regs_1 = NULL;
10117	}
10118
10119	if (np->vir_regs_2) {
10120		of_iounmap(&op->resource[3], np->vir_regs_2,
10121			   resource_size(&op->resource[3]));
10122		np->vir_regs_2 = NULL;
10123	}
10124
10125	if (np->regs) {
10126		of_iounmap(&op->resource[1], np->regs,
10127			   resource_size(&op->resource[1]));
10128		np->regs = NULL;
10129	}
10130
10131err_out_release_parent:
10132	niu_put_parent(np);
10133
10134err_out_free_dev:
10135	free_netdev(dev);
10136
10137err_out:
10138	return err;
10139}
10140
10141static int niu_of_remove(struct platform_device *op)
10142{
10143	struct net_device *dev = platform_get_drvdata(op);
10144
10145	if (dev) {
10146		struct niu *np = netdev_priv(dev);
10147
10148		unregister_netdev(dev);
10149
10150		if (np->vir_regs_1) {
10151			of_iounmap(&op->resource[2], np->vir_regs_1,
10152				   resource_size(&op->resource[2]));
10153			np->vir_regs_1 = NULL;
10154		}
10155
10156		if (np->vir_regs_2) {
10157			of_iounmap(&op->resource[3], np->vir_regs_2,
10158				   resource_size(&op->resource[3]));
10159			np->vir_regs_2 = NULL;
10160		}
10161
10162		if (np->regs) {
10163			of_iounmap(&op->resource[1], np->regs,
10164				   resource_size(&op->resource[1]));
10165			np->regs = NULL;
10166		}
10167
10168		niu_ldg_free(np);
10169
10170		niu_put_parent(np);
10171
10172		free_netdev(dev);
10173	}
10174	return 0;
10175}
10176
10177static const struct of_device_id niu_match[] = {
10178	{
10179		.name = "network",
10180		.compatible = "SUNW,niusl",
10181	},
10182	{},
10183};
10184MODULE_DEVICE_TABLE(of, niu_match);
10185
10186static struct platform_driver niu_of_driver = {
10187	.driver = {
10188		.name = "niu",
10189		.owner = THIS_MODULE,
10190		.of_match_table = niu_match,
10191	},
10192	.probe		= niu_of_probe,
10193	.remove		= niu_of_remove,
10194};
10195
10196#endif /* CONFIG_SPARC64 */
10197
10198static int __init niu_init(void)
10199{
10200	int err = 0;
10201
10202	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10203
10204	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10205
10206#ifdef CONFIG_SPARC64
10207	err = platform_driver_register(&niu_of_driver);
10208#endif
10209
10210	if (!err) {
10211		err = pci_register_driver(&niu_pci_driver);
10212#ifdef CONFIG_SPARC64
10213		if (err)
10214			platform_driver_unregister(&niu_of_driver);
10215#endif
10216	}
10217
10218	return err;
10219}
10220
10221static void __exit niu_exit(void)
10222{
10223	pci_unregister_driver(&niu_pci_driver);
10224#ifdef CONFIG_SPARC64
10225	platform_driver_unregister(&niu_of_driver);
10226#endif
10227}
10228
10229module_init(niu_init);
10230module_exit(niu_exit);