Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1/* niu.c: Neptune ethernet driver.
    2 *
    3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
    4 */
    5
    6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    7
    8#include <linux/module.h>
    9#include <linux/init.h>
   10#include <linux/interrupt.h>
   11#include <linux/pci.h>
   12#include <linux/dma-mapping.h>
   13#include <linux/netdevice.h>
   14#include <linux/ethtool.h>
   15#include <linux/etherdevice.h>
   16#include <linux/platform_device.h>
   17#include <linux/delay.h>
   18#include <linux/bitops.h>
   19#include <linux/mii.h>
   20#include <linux/if.h>
   21#include <linux/if_ether.h>
   22#include <linux/if_vlan.h>
   23#include <linux/ip.h>
   24#include <linux/in.h>
   25#include <linux/ipv6.h>
   26#include <linux/log2.h>
   27#include <linux/jiffies.h>
   28#include <linux/crc32.h>
   29#include <linux/list.h>
   30#include <linux/slab.h>
   31
   32#include <linux/io.h>
   33#include <linux/of_device.h>
   34
   35#include "niu.h"
   36
   37#define DRV_MODULE_NAME		"niu"
   38#define DRV_MODULE_VERSION	"1.1"
   39#define DRV_MODULE_RELDATE	"Apr 22, 2010"
   40
   41static char version[] =
   42	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
   43
   44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
   45MODULE_DESCRIPTION("NIU ethernet driver");
   46MODULE_LICENSE("GPL");
   47MODULE_VERSION(DRV_MODULE_VERSION);
   48
   49#ifndef readq
   50static u64 readq(void __iomem *reg)
   51{
   52	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
   53}
   54
   55static void writeq(u64 val, void __iomem *reg)
   56{
   57	writel(val & 0xffffffff, reg);
   58	writel(val >> 32, reg + 0x4UL);
   59}
   60#endif
   61
   62static const struct pci_device_id niu_pci_tbl[] = {
   63	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
   64	{}
   65};
   66
   67MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
   68
   69#define NIU_TX_TIMEOUT			(5 * HZ)
   70
   71#define nr64(reg)		readq(np->regs + (reg))
   72#define nw64(reg, val)		writeq((val), np->regs + (reg))
   73
   74#define nr64_mac(reg)		readq(np->mac_regs + (reg))
   75#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
   76
   77#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
   78#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
   79
   80#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
   81#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
   82
   83#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
   84#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
   85
   86#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
   87
   88static int niu_debug;
   89static int debug = -1;
   90module_param(debug, int, 0);
   91MODULE_PARM_DESC(debug, "NIU debug level");
   92
   93#define niu_lock_parent(np, flags) \
   94	spin_lock_irqsave(&np->parent->lock, flags)
   95#define niu_unlock_parent(np, flags) \
   96	spin_unlock_irqrestore(&np->parent->lock, flags)
   97
   98static int serdes_init_10g_serdes(struct niu *np);
   99
  100static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
  101				     u64 bits, int limit, int delay)
  102{
  103	while (--limit >= 0) {
  104		u64 val = nr64_mac(reg);
  105
  106		if (!(val & bits))
  107			break;
  108		udelay(delay);
  109	}
  110	if (limit < 0)
  111		return -ENODEV;
  112	return 0;
  113}
  114
  115static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
  116					u64 bits, int limit, int delay,
  117					const char *reg_name)
  118{
  119	int err;
  120
  121	nw64_mac(reg, bits);
  122	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
  123	if (err)
  124		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  125			   (unsigned long long)bits, reg_name,
  126			   (unsigned long long)nr64_mac(reg));
  127	return err;
  128}
  129
  130#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  131({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  132	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  133})
  134
  135static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
  136				     u64 bits, int limit, int delay)
  137{
  138	while (--limit >= 0) {
  139		u64 val = nr64_ipp(reg);
  140
  141		if (!(val & bits))
  142			break;
  143		udelay(delay);
  144	}
  145	if (limit < 0)
  146		return -ENODEV;
  147	return 0;
  148}
  149
  150static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
  151					u64 bits, int limit, int delay,
  152					const char *reg_name)
  153{
  154	int err;
  155	u64 val;
  156
  157	val = nr64_ipp(reg);
  158	val |= bits;
  159	nw64_ipp(reg, val);
  160
  161	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
  162	if (err)
  163		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  164			   (unsigned long long)bits, reg_name,
  165			   (unsigned long long)nr64_ipp(reg));
  166	return err;
  167}
  168
  169#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  170({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  171	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  172})
  173
  174static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
  175				 u64 bits, int limit, int delay)
  176{
  177	while (--limit >= 0) {
  178		u64 val = nr64(reg);
  179
  180		if (!(val & bits))
  181			break;
  182		udelay(delay);
  183	}
  184	if (limit < 0)
  185		return -ENODEV;
  186	return 0;
  187}
  188
  189#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
  190({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  191	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
  192})
  193
  194static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
  195				    u64 bits, int limit, int delay,
  196				    const char *reg_name)
  197{
  198	int err;
  199
  200	nw64(reg, bits);
  201	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
  202	if (err)
  203		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
  204			   (unsigned long long)bits, reg_name,
  205			   (unsigned long long)nr64(reg));
  206	return err;
  207}
  208
  209#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
  210({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
  211	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
  212})
  213
  214static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
  215{
  216	u64 val = (u64) lp->timer;
  217
  218	if (on)
  219		val |= LDG_IMGMT_ARM;
  220
  221	nw64(LDG_IMGMT(lp->ldg_num), val);
  222}
  223
  224static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
  225{
  226	unsigned long mask_reg, bits;
  227	u64 val;
  228
  229	if (ldn < 0 || ldn > LDN_MAX)
  230		return -EINVAL;
  231
  232	if (ldn < 64) {
  233		mask_reg = LD_IM0(ldn);
  234		bits = LD_IM0_MASK;
  235	} else {
  236		mask_reg = LD_IM1(ldn - 64);
  237		bits = LD_IM1_MASK;
  238	}
  239
  240	val = nr64(mask_reg);
  241	if (on)
  242		val &= ~bits;
  243	else
  244		val |= bits;
  245	nw64(mask_reg, val);
  246
  247	return 0;
  248}
  249
  250static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
  251{
  252	struct niu_parent *parent = np->parent;
  253	int i;
  254
  255	for (i = 0; i <= LDN_MAX; i++) {
  256		int err;
  257
  258		if (parent->ldg_map[i] != lp->ldg_num)
  259			continue;
  260
  261		err = niu_ldn_irq_enable(np, i, on);
  262		if (err)
  263			return err;
  264	}
  265	return 0;
  266}
  267
  268static int niu_enable_interrupts(struct niu *np, int on)
  269{
  270	int i;
  271
  272	for (i = 0; i < np->num_ldg; i++) {
  273		struct niu_ldg *lp = &np->ldg[i];
  274		int err;
  275
  276		err = niu_enable_ldn_in_ldg(np, lp, on);
  277		if (err)
  278			return err;
  279	}
  280	for (i = 0; i < np->num_ldg; i++)
  281		niu_ldg_rearm(np, &np->ldg[i], on);
  282
  283	return 0;
  284}
  285
  286static u32 phy_encode(u32 type, int port)
  287{
  288	return type << (port * 2);
  289}
  290
  291static u32 phy_decode(u32 val, int port)
  292{
  293	return (val >> (port * 2)) & PORT_TYPE_MASK;
  294}
  295
  296static int mdio_wait(struct niu *np)
  297{
  298	int limit = 1000;
  299	u64 val;
  300
  301	while (--limit > 0) {
  302		val = nr64(MIF_FRAME_OUTPUT);
  303		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
  304			return val & MIF_FRAME_OUTPUT_DATA;
  305
  306		udelay(10);
  307	}
  308
  309	return -ENODEV;
  310}
  311
  312static int mdio_read(struct niu *np, int port, int dev, int reg)
  313{
  314	int err;
  315
  316	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  317	err = mdio_wait(np);
  318	if (err < 0)
  319		return err;
  320
  321	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
  322	return mdio_wait(np);
  323}
  324
  325static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
  326{
  327	int err;
  328
  329	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
  330	err = mdio_wait(np);
  331	if (err < 0)
  332		return err;
  333
  334	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
  335	err = mdio_wait(np);
  336	if (err < 0)
  337		return err;
  338
  339	return 0;
  340}
  341
  342static int mii_read(struct niu *np, int port, int reg)
  343{
  344	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
  345	return mdio_wait(np);
  346}
  347
  348static int mii_write(struct niu *np, int port, int reg, int data)
  349{
  350	int err;
  351
  352	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
  353	err = mdio_wait(np);
  354	if (err < 0)
  355		return err;
  356
  357	return 0;
  358}
  359
  360static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
  361{
  362	int err;
  363
  364	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  365			 ESR2_TI_PLL_TX_CFG_L(channel),
  366			 val & 0xffff);
  367	if (!err)
  368		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  369				 ESR2_TI_PLL_TX_CFG_H(channel),
  370				 val >> 16);
  371	return err;
  372}
  373
  374static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
  375{
  376	int err;
  377
  378	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  379			 ESR2_TI_PLL_RX_CFG_L(channel),
  380			 val & 0xffff);
  381	if (!err)
  382		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  383				 ESR2_TI_PLL_RX_CFG_H(channel),
  384				 val >> 16);
  385	return err;
  386}
  387
  388/* Mode is always 10G fiber.  */
  389static int serdes_init_niu_10g_fiber(struct niu *np)
  390{
  391	struct niu_link_config *lp = &np->link_config;
  392	u32 tx_cfg, rx_cfg;
  393	unsigned long i;
  394
  395	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  396	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  397		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  398		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  399
  400	if (lp->loopback_mode == LOOPBACK_PHY) {
  401		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  402
  403		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  404			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  405
  406		tx_cfg |= PLL_TX_CFG_ENTEST;
  407		rx_cfg |= PLL_RX_CFG_ENTEST;
  408	}
  409
  410	/* Initialize all 4 lanes of the SERDES.  */
  411	for (i = 0; i < 4; i++) {
  412		int err = esr2_set_tx_cfg(np, i, tx_cfg);
  413		if (err)
  414			return err;
  415	}
  416
  417	for (i = 0; i < 4; i++) {
  418		int err = esr2_set_rx_cfg(np, i, rx_cfg);
  419		if (err)
  420			return err;
  421	}
  422
  423	return 0;
  424}
  425
  426static int serdes_init_niu_1g_serdes(struct niu *np)
  427{
  428	struct niu_link_config *lp = &np->link_config;
  429	u16 pll_cfg, pll_sts;
  430	int max_retry = 100;
  431	u64 uninitialized_var(sig), mask, val;
  432	u32 tx_cfg, rx_cfg;
  433	unsigned long i;
  434	int err;
  435
  436	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
  437		  PLL_TX_CFG_RATE_HALF);
  438	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  439		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  440		  PLL_RX_CFG_RATE_HALF);
  441
  442	if (np->port == 0)
  443		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
  444
  445	if (lp->loopback_mode == LOOPBACK_PHY) {
  446		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  447
  448		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  449			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  450
  451		tx_cfg |= PLL_TX_CFG_ENTEST;
  452		rx_cfg |= PLL_RX_CFG_ENTEST;
  453	}
  454
  455	/* Initialize PLL for 1G */
  456	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
  457
  458	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  459			 ESR2_TI_PLL_CFG_L, pll_cfg);
  460	if (err) {
  461		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  462			   np->port, __func__);
  463		return err;
  464	}
  465
  466	pll_sts = PLL_CFG_ENPLL;
  467
  468	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  469			 ESR2_TI_PLL_STS_L, pll_sts);
  470	if (err) {
  471		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  472			   np->port, __func__);
  473		return err;
  474	}
  475
  476	udelay(200);
  477
  478	/* Initialize all 4 lanes of the SERDES.  */
  479	for (i = 0; i < 4; i++) {
  480		err = esr2_set_tx_cfg(np, i, tx_cfg);
  481		if (err)
  482			return err;
  483	}
  484
  485	for (i = 0; i < 4; i++) {
  486		err = esr2_set_rx_cfg(np, i, rx_cfg);
  487		if (err)
  488			return err;
  489	}
  490
  491	switch (np->port) {
  492	case 0:
  493		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
  494		mask = val;
  495		break;
  496
  497	case 1:
  498		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
  499		mask = val;
  500		break;
  501
  502	default:
  503		return -EINVAL;
  504	}
  505
  506	while (max_retry--) {
  507		sig = nr64(ESR_INT_SIGNALS);
  508		if ((sig & mask) == val)
  509			break;
  510
  511		mdelay(500);
  512	}
  513
  514	if ((sig & mask) != val) {
  515		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  516			   np->port, (int)(sig & mask), (int)val);
  517		return -ENODEV;
  518	}
  519
  520	return 0;
  521}
  522
  523static int serdes_init_niu_10g_serdes(struct niu *np)
  524{
  525	struct niu_link_config *lp = &np->link_config;
  526	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
  527	int max_retry = 100;
  528	u64 uninitialized_var(sig), mask, val;
  529	unsigned long i;
  530	int err;
  531
  532	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
  533	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
  534		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
  535		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
  536
  537	if (lp->loopback_mode == LOOPBACK_PHY) {
  538		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
  539
  540		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  541			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
  542
  543		tx_cfg |= PLL_TX_CFG_ENTEST;
  544		rx_cfg |= PLL_RX_CFG_ENTEST;
  545	}
  546
  547	/* Initialize PLL for 10G */
  548	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
  549
  550	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  551			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
  552	if (err) {
  553		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
  554			   np->port, __func__);
  555		return err;
  556	}
  557
  558	pll_sts = PLL_CFG_ENPLL;
  559
  560	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
  561			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
  562	if (err) {
  563		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
  564			   np->port, __func__);
  565		return err;
  566	}
  567
  568	udelay(200);
  569
  570	/* Initialize all 4 lanes of the SERDES.  */
  571	for (i = 0; i < 4; i++) {
  572		err = esr2_set_tx_cfg(np, i, tx_cfg);
  573		if (err)
  574			return err;
  575	}
  576
  577	for (i = 0; i < 4; i++) {
  578		err = esr2_set_rx_cfg(np, i, rx_cfg);
  579		if (err)
  580			return err;
  581	}
  582
  583	/* check if serdes is ready */
  584
  585	switch (np->port) {
  586	case 0:
  587		mask = ESR_INT_SIGNALS_P0_BITS;
  588		val = (ESR_INT_SRDY0_P0 |
  589		       ESR_INT_DET0_P0 |
  590		       ESR_INT_XSRDY_P0 |
  591		       ESR_INT_XDP_P0_CH3 |
  592		       ESR_INT_XDP_P0_CH2 |
  593		       ESR_INT_XDP_P0_CH1 |
  594		       ESR_INT_XDP_P0_CH0);
  595		break;
  596
  597	case 1:
  598		mask = ESR_INT_SIGNALS_P1_BITS;
  599		val = (ESR_INT_SRDY0_P1 |
  600		       ESR_INT_DET0_P1 |
  601		       ESR_INT_XSRDY_P1 |
  602		       ESR_INT_XDP_P1_CH3 |
  603		       ESR_INT_XDP_P1_CH2 |
  604		       ESR_INT_XDP_P1_CH1 |
  605		       ESR_INT_XDP_P1_CH0);
  606		break;
  607
  608	default:
  609		return -EINVAL;
  610	}
  611
  612	while (max_retry--) {
  613		sig = nr64(ESR_INT_SIGNALS);
  614		if ((sig & mask) == val)
  615			break;
  616
  617		mdelay(500);
  618	}
  619
  620	if ((sig & mask) != val) {
  621		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
  622			np->port, (int)(sig & mask), (int)val);
  623
  624		/* 10G failed, try initializing at 1G */
  625		err = serdes_init_niu_1g_serdes(np);
  626		if (!err) {
  627			np->flags &= ~NIU_FLAGS_10G;
  628			np->mac_xcvr = MAC_XCVR_PCS;
  629		}  else {
  630			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
  631				   np->port);
  632			return -ENODEV;
  633		}
  634	}
  635	return 0;
  636}
  637
  638static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
  639{
  640	int err;
  641
  642	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
  643	if (err >= 0) {
  644		*val = (err & 0xffff);
  645		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  646				ESR_RXTX_CTRL_H(chan));
  647		if (err >= 0)
  648			*val |= ((err & 0xffff) << 16);
  649		err = 0;
  650	}
  651	return err;
  652}
  653
  654static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
  655{
  656	int err;
  657
  658	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  659			ESR_GLUE_CTRL0_L(chan));
  660	if (err >= 0) {
  661		*val = (err & 0xffff);
  662		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  663				ESR_GLUE_CTRL0_H(chan));
  664		if (err >= 0) {
  665			*val |= ((err & 0xffff) << 16);
  666			err = 0;
  667		}
  668	}
  669	return err;
  670}
  671
  672static int esr_read_reset(struct niu *np, u32 *val)
  673{
  674	int err;
  675
  676	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  677			ESR_RXTX_RESET_CTRL_L);
  678	if (err >= 0) {
  679		*val = (err & 0xffff);
  680		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
  681				ESR_RXTX_RESET_CTRL_H);
  682		if (err >= 0) {
  683			*val |= ((err & 0xffff) << 16);
  684			err = 0;
  685		}
  686	}
  687	return err;
  688}
  689
  690static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
  691{
  692	int err;
  693
  694	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  695			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
  696	if (!err)
  697		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  698				 ESR_RXTX_CTRL_H(chan), (val >> 16));
  699	return err;
  700}
  701
  702static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
  703{
  704	int err;
  705
  706	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  707			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
  708	if (!err)
  709		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  710				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
  711	return err;
  712}
  713
  714static int esr_reset(struct niu *np)
  715{
  716	u32 uninitialized_var(reset);
  717	int err;
  718
  719	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  720			 ESR_RXTX_RESET_CTRL_L, 0x0000);
  721	if (err)
  722		return err;
  723	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  724			 ESR_RXTX_RESET_CTRL_H, 0xffff);
  725	if (err)
  726		return err;
  727	udelay(200);
  728
  729	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  730			 ESR_RXTX_RESET_CTRL_L, 0xffff);
  731	if (err)
  732		return err;
  733	udelay(200);
  734
  735	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
  736			 ESR_RXTX_RESET_CTRL_H, 0x0000);
  737	if (err)
  738		return err;
  739	udelay(200);
  740
  741	err = esr_read_reset(np, &reset);
  742	if (err)
  743		return err;
  744	if (reset != 0) {
  745		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
  746			   np->port, reset);
  747		return -ENODEV;
  748	}
  749
  750	return 0;
  751}
  752
  753static int serdes_init_10g(struct niu *np)
  754{
  755	struct niu_link_config *lp = &np->link_config;
  756	unsigned long ctrl_reg, test_cfg_reg, i;
  757	u64 ctrl_val, test_cfg_val, sig, mask, val;
  758	int err;
  759
  760	switch (np->port) {
  761	case 0:
  762		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  763		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  764		break;
  765	case 1:
  766		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  767		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  768		break;
  769
  770	default:
  771		return -EINVAL;
  772	}
  773	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  774		    ENET_SERDES_CTRL_SDET_1 |
  775		    ENET_SERDES_CTRL_SDET_2 |
  776		    ENET_SERDES_CTRL_SDET_3 |
  777		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  778		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  779		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  780		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  781		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  782		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  783		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  784		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  785	test_cfg_val = 0;
  786
  787	if (lp->loopback_mode == LOOPBACK_PHY) {
  788		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  789				  ENET_SERDES_TEST_MD_0_SHIFT) |
  790				 (ENET_TEST_MD_PAD_LOOPBACK <<
  791				  ENET_SERDES_TEST_MD_1_SHIFT) |
  792				 (ENET_TEST_MD_PAD_LOOPBACK <<
  793				  ENET_SERDES_TEST_MD_2_SHIFT) |
  794				 (ENET_TEST_MD_PAD_LOOPBACK <<
  795				  ENET_SERDES_TEST_MD_3_SHIFT));
  796	}
  797
  798	nw64(ctrl_reg, ctrl_val);
  799	nw64(test_cfg_reg, test_cfg_val);
  800
  801	/* Initialize all 4 lanes of the SERDES.  */
  802	for (i = 0; i < 4; i++) {
  803		u32 rxtx_ctrl, glue0;
  804
  805		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  806		if (err)
  807			return err;
  808		err = esr_read_glue0(np, i, &glue0);
  809		if (err)
  810			return err;
  811
  812		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  813		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  814			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  815
  816		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  817			   ESR_GLUE_CTRL0_THCNT |
  818			   ESR_GLUE_CTRL0_BLTIME);
  819		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  820			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  821			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  822			  (BLTIME_300_CYCLES <<
  823			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  824
  825		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  826		if (err)
  827			return err;
  828		err = esr_write_glue0(np, i, glue0);
  829		if (err)
  830			return err;
  831	}
  832
  833	err = esr_reset(np);
  834	if (err)
  835		return err;
  836
  837	sig = nr64(ESR_INT_SIGNALS);
  838	switch (np->port) {
  839	case 0:
  840		mask = ESR_INT_SIGNALS_P0_BITS;
  841		val = (ESR_INT_SRDY0_P0 |
  842		       ESR_INT_DET0_P0 |
  843		       ESR_INT_XSRDY_P0 |
  844		       ESR_INT_XDP_P0_CH3 |
  845		       ESR_INT_XDP_P0_CH2 |
  846		       ESR_INT_XDP_P0_CH1 |
  847		       ESR_INT_XDP_P0_CH0);
  848		break;
  849
  850	case 1:
  851		mask = ESR_INT_SIGNALS_P1_BITS;
  852		val = (ESR_INT_SRDY0_P1 |
  853		       ESR_INT_DET0_P1 |
  854		       ESR_INT_XSRDY_P1 |
  855		       ESR_INT_XDP_P1_CH3 |
  856		       ESR_INT_XDP_P1_CH2 |
  857		       ESR_INT_XDP_P1_CH1 |
  858		       ESR_INT_XDP_P1_CH0);
  859		break;
  860
  861	default:
  862		return -EINVAL;
  863	}
  864
  865	if ((sig & mask) != val) {
  866		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
  867			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  868			return 0;
  869		}
  870		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
  871			   np->port, (int)(sig & mask), (int)val);
  872		return -ENODEV;
  873	}
  874	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
  875		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
  876	return 0;
  877}
  878
  879static int serdes_init_1g(struct niu *np)
  880{
  881	u64 val;
  882
  883	val = nr64(ENET_SERDES_1_PLL_CFG);
  884	val &= ~ENET_SERDES_PLL_FBDIV2;
  885	switch (np->port) {
  886	case 0:
  887		val |= ENET_SERDES_PLL_HRATE0;
  888		break;
  889	case 1:
  890		val |= ENET_SERDES_PLL_HRATE1;
  891		break;
  892	case 2:
  893		val |= ENET_SERDES_PLL_HRATE2;
  894		break;
  895	case 3:
  896		val |= ENET_SERDES_PLL_HRATE3;
  897		break;
  898	default:
  899		return -EINVAL;
  900	}
  901	nw64(ENET_SERDES_1_PLL_CFG, val);
  902
  903	return 0;
  904}
  905
  906static int serdes_init_1g_serdes(struct niu *np)
  907{
  908	struct niu_link_config *lp = &np->link_config;
  909	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
  910	u64 ctrl_val, test_cfg_val, sig, mask, val;
  911	int err;
  912	u64 reset_val, val_rd;
  913
  914	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
  915		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
  916		ENET_SERDES_PLL_FBDIV0;
  917	switch (np->port) {
  918	case 0:
  919		reset_val =  ENET_SERDES_RESET_0;
  920		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
  921		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
  922		pll_cfg = ENET_SERDES_0_PLL_CFG;
  923		break;
  924	case 1:
  925		reset_val =  ENET_SERDES_RESET_1;
  926		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
  927		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
  928		pll_cfg = ENET_SERDES_1_PLL_CFG;
  929		break;
  930
  931	default:
  932		return -EINVAL;
  933	}
  934	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
  935		    ENET_SERDES_CTRL_SDET_1 |
  936		    ENET_SERDES_CTRL_SDET_2 |
  937		    ENET_SERDES_CTRL_SDET_3 |
  938		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
  939		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
  940		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
  941		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
  942		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
  943		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
  944		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
  945		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
  946	test_cfg_val = 0;
  947
  948	if (lp->loopback_mode == LOOPBACK_PHY) {
  949		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
  950				  ENET_SERDES_TEST_MD_0_SHIFT) |
  951				 (ENET_TEST_MD_PAD_LOOPBACK <<
  952				  ENET_SERDES_TEST_MD_1_SHIFT) |
  953				 (ENET_TEST_MD_PAD_LOOPBACK <<
  954				  ENET_SERDES_TEST_MD_2_SHIFT) |
  955				 (ENET_TEST_MD_PAD_LOOPBACK <<
  956				  ENET_SERDES_TEST_MD_3_SHIFT));
  957	}
  958
  959	nw64(ENET_SERDES_RESET, reset_val);
  960	mdelay(20);
  961	val_rd = nr64(ENET_SERDES_RESET);
  962	val_rd &= ~reset_val;
  963	nw64(pll_cfg, val);
  964	nw64(ctrl_reg, ctrl_val);
  965	nw64(test_cfg_reg, test_cfg_val);
  966	nw64(ENET_SERDES_RESET, val_rd);
  967	mdelay(2000);
  968
  969	/* Initialize all 4 lanes of the SERDES.  */
  970	for (i = 0; i < 4; i++) {
  971		u32 rxtx_ctrl, glue0;
  972
  973		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
  974		if (err)
  975			return err;
  976		err = esr_read_glue0(np, i, &glue0);
  977		if (err)
  978			return err;
  979
  980		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
  981		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
  982			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
  983
  984		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
  985			   ESR_GLUE_CTRL0_THCNT |
  986			   ESR_GLUE_CTRL0_BLTIME);
  987		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
  988			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
  989			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
  990			  (BLTIME_300_CYCLES <<
  991			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
  992
  993		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
  994		if (err)
  995			return err;
  996		err = esr_write_glue0(np, i, glue0);
  997		if (err)
  998			return err;
  999	}
 1000
 1001
 1002	sig = nr64(ESR_INT_SIGNALS);
 1003	switch (np->port) {
 1004	case 0:
 1005		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
 1006		mask = val;
 1007		break;
 1008
 1009	case 1:
 1010		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
 1011		mask = val;
 1012		break;
 1013
 1014	default:
 1015		return -EINVAL;
 1016	}
 1017
 1018	if ((sig & mask) != val) {
 1019		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
 1020			   np->port, (int)(sig & mask), (int)val);
 1021		return -ENODEV;
 1022	}
 1023
 1024	return 0;
 1025}
 1026
 1027static int link_status_1g_serdes(struct niu *np, int *link_up_p)
 1028{
 1029	struct niu_link_config *lp = &np->link_config;
 1030	int link_up;
 1031	u64 val;
 1032	u16 current_speed;
 1033	unsigned long flags;
 1034	u8 current_duplex;
 1035
 1036	link_up = 0;
 1037	current_speed = SPEED_INVALID;
 1038	current_duplex = DUPLEX_INVALID;
 1039
 1040	spin_lock_irqsave(&np->lock, flags);
 1041
 1042	val = nr64_pcs(PCS_MII_STAT);
 1043
 1044	if (val & PCS_MII_STAT_LINK_STATUS) {
 1045		link_up = 1;
 1046		current_speed = SPEED_1000;
 1047		current_duplex = DUPLEX_FULL;
 1048	}
 1049
 1050	lp->active_speed = current_speed;
 1051	lp->active_duplex = current_duplex;
 1052	spin_unlock_irqrestore(&np->lock, flags);
 1053
 1054	*link_up_p = link_up;
 1055	return 0;
 1056}
 1057
 1058static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 1059{
 1060	unsigned long flags;
 1061	struct niu_link_config *lp = &np->link_config;
 1062	int link_up = 0;
 1063	int link_ok = 1;
 1064	u64 val, val2;
 1065	u16 current_speed;
 1066	u8 current_duplex;
 1067
 1068	if (!(np->flags & NIU_FLAGS_10G))
 1069		return link_status_1g_serdes(np, link_up_p);
 1070
 1071	current_speed = SPEED_INVALID;
 1072	current_duplex = DUPLEX_INVALID;
 1073	spin_lock_irqsave(&np->lock, flags);
 1074
 1075	val = nr64_xpcs(XPCS_STATUS(0));
 1076	val2 = nr64_mac(XMAC_INTER2);
 1077	if (val2 & 0x01000000)
 1078		link_ok = 0;
 1079
 1080	if ((val & 0x1000ULL) && link_ok) {
 1081		link_up = 1;
 1082		current_speed = SPEED_10000;
 1083		current_duplex = DUPLEX_FULL;
 1084	}
 1085	lp->active_speed = current_speed;
 1086	lp->active_duplex = current_duplex;
 1087	spin_unlock_irqrestore(&np->lock, flags);
 1088	*link_up_p = link_up;
 1089	return 0;
 1090}
 1091
 1092static int link_status_mii(struct niu *np, int *link_up_p)
 1093{
 1094	struct niu_link_config *lp = &np->link_config;
 1095	int err;
 1096	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
 1097	int supported, advertising, active_speed, active_duplex;
 1098
 1099	err = mii_read(np, np->phy_addr, MII_BMCR);
 1100	if (unlikely(err < 0))
 1101		return err;
 1102	bmcr = err;
 1103
 1104	err = mii_read(np, np->phy_addr, MII_BMSR);
 1105	if (unlikely(err < 0))
 1106		return err;
 1107	bmsr = err;
 1108
 1109	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1110	if (unlikely(err < 0))
 1111		return err;
 1112	advert = err;
 1113
 1114	err = mii_read(np, np->phy_addr, MII_LPA);
 1115	if (unlikely(err < 0))
 1116		return err;
 1117	lpa = err;
 1118
 1119	if (likely(bmsr & BMSR_ESTATEN)) {
 1120		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1121		if (unlikely(err < 0))
 1122			return err;
 1123		estatus = err;
 1124
 1125		err = mii_read(np, np->phy_addr, MII_CTRL1000);
 1126		if (unlikely(err < 0))
 1127			return err;
 1128		ctrl1000 = err;
 1129
 1130		err = mii_read(np, np->phy_addr, MII_STAT1000);
 1131		if (unlikely(err < 0))
 1132			return err;
 1133		stat1000 = err;
 1134	} else
 1135		estatus = ctrl1000 = stat1000 = 0;
 1136
 1137	supported = 0;
 1138	if (bmsr & BMSR_ANEGCAPABLE)
 1139		supported |= SUPPORTED_Autoneg;
 1140	if (bmsr & BMSR_10HALF)
 1141		supported |= SUPPORTED_10baseT_Half;
 1142	if (bmsr & BMSR_10FULL)
 1143		supported |= SUPPORTED_10baseT_Full;
 1144	if (bmsr & BMSR_100HALF)
 1145		supported |= SUPPORTED_100baseT_Half;
 1146	if (bmsr & BMSR_100FULL)
 1147		supported |= SUPPORTED_100baseT_Full;
 1148	if (estatus & ESTATUS_1000_THALF)
 1149		supported |= SUPPORTED_1000baseT_Half;
 1150	if (estatus & ESTATUS_1000_TFULL)
 1151		supported |= SUPPORTED_1000baseT_Full;
 1152	lp->supported = supported;
 1153
 1154	advertising = mii_adv_to_ethtool_adv_t(advert);
 1155	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 1156
 1157	if (bmcr & BMCR_ANENABLE) {
 1158		int neg, neg1000;
 1159
 1160		lp->active_autoneg = 1;
 1161		advertising |= ADVERTISED_Autoneg;
 1162
 1163		neg = advert & lpa;
 1164		neg1000 = (ctrl1000 << 2) & stat1000;
 1165
 1166		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
 1167			active_speed = SPEED_1000;
 1168		else if (neg & LPA_100)
 1169			active_speed = SPEED_100;
 1170		else if (neg & (LPA_10HALF | LPA_10FULL))
 1171			active_speed = SPEED_10;
 1172		else
 1173			active_speed = SPEED_INVALID;
 1174
 1175		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
 1176			active_duplex = DUPLEX_FULL;
 1177		else if (active_speed != SPEED_INVALID)
 1178			active_duplex = DUPLEX_HALF;
 1179		else
 1180			active_duplex = DUPLEX_INVALID;
 1181	} else {
 1182		lp->active_autoneg = 0;
 1183
 1184		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
 1185			active_speed = SPEED_1000;
 1186		else if (bmcr & BMCR_SPEED100)
 1187			active_speed = SPEED_100;
 1188		else
 1189			active_speed = SPEED_10;
 1190
 1191		if (bmcr & BMCR_FULLDPLX)
 1192			active_duplex = DUPLEX_FULL;
 1193		else
 1194			active_duplex = DUPLEX_HALF;
 1195	}
 1196
 1197	lp->active_advertising = advertising;
 1198	lp->active_speed = active_speed;
 1199	lp->active_duplex = active_duplex;
 1200	*link_up_p = !!(bmsr & BMSR_LSTATUS);
 1201
 1202	return 0;
 1203}
 1204
 1205static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 1206{
 1207	struct niu_link_config *lp = &np->link_config;
 1208	u16 current_speed, bmsr;
 1209	unsigned long flags;
 1210	u8 current_duplex;
 1211	int err, link_up;
 1212
 1213	link_up = 0;
 1214	current_speed = SPEED_INVALID;
 1215	current_duplex = DUPLEX_INVALID;
 1216
 1217	spin_lock_irqsave(&np->lock, flags);
 1218
 1219	err = -EINVAL;
 1220
 1221	err = mii_read(np, np->phy_addr, MII_BMSR);
 1222	if (err < 0)
 1223		goto out;
 1224
 1225	bmsr = err;
 1226	if (bmsr & BMSR_LSTATUS) {
 1227		u16 adv, lpa;
 1228
 1229		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
 1230		if (err < 0)
 1231			goto out;
 1232		adv = err;
 1233
 1234		err = mii_read(np, np->phy_addr, MII_LPA);
 1235		if (err < 0)
 1236			goto out;
 1237		lpa = err;
 1238
 1239		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1240		if (err < 0)
 1241			goto out;
 1242		link_up = 1;
 1243		current_speed = SPEED_1000;
 1244		current_duplex = DUPLEX_FULL;
 1245
 1246	}
 1247	lp->active_speed = current_speed;
 1248	lp->active_duplex = current_duplex;
 1249	err = 0;
 1250
 1251out:
 1252	spin_unlock_irqrestore(&np->lock, flags);
 1253
 1254	*link_up_p = link_up;
 1255	return err;
 1256}
 1257
 1258static int link_status_1g(struct niu *np, int *link_up_p)
 1259{
 1260	struct niu_link_config *lp = &np->link_config;
 1261	unsigned long flags;
 1262	int err;
 1263
 1264	spin_lock_irqsave(&np->lock, flags);
 1265
 1266	err = link_status_mii(np, link_up_p);
 1267	lp->supported |= SUPPORTED_TP;
 1268	lp->active_advertising |= ADVERTISED_TP;
 1269
 1270	spin_unlock_irqrestore(&np->lock, flags);
 1271	return err;
 1272}
 1273
 1274static int bcm8704_reset(struct niu *np)
 1275{
 1276	int err, limit;
 1277
 1278	err = mdio_read(np, np->phy_addr,
 1279			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1280	if (err < 0 || err == 0xffff)
 1281		return err;
 1282	err |= BMCR_RESET;
 1283	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1284			 MII_BMCR, err);
 1285	if (err)
 1286		return err;
 1287
 1288	limit = 1000;
 1289	while (--limit >= 0) {
 1290		err = mdio_read(np, np->phy_addr,
 1291				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 1292		if (err < 0)
 1293			return err;
 1294		if (!(err & BMCR_RESET))
 1295			break;
 1296	}
 1297	if (limit < 0) {
 1298		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
 1299			   np->port, (err & 0xffff));
 1300		return -ENODEV;
 1301	}
 1302	return 0;
 1303}
 1304
 1305/* When written, certain PHY registers need to be read back twice
 1306 * in order for the bits to settle properly.
 1307 */
 1308static int bcm8704_user_dev3_readback(struct niu *np, int reg)
 1309{
 1310	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1311	if (err < 0)
 1312		return err;
 1313	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
 1314	if (err < 0)
 1315		return err;
 1316	return 0;
 1317}
 1318
 1319static int bcm8706_init_user_dev3(struct niu *np)
 1320{
 1321	int err;
 1322
 1323
 1324	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1325			BCM8704_USER_OPT_DIGITAL_CTRL);
 1326	if (err < 0)
 1327		return err;
 1328	err &= ~USER_ODIG_CTRL_GPIOS;
 1329	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1330	err |=  USER_ODIG_CTRL_RESV2;
 1331	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1332			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1333	if (err)
 1334		return err;
 1335
 1336	mdelay(1000);
 1337
 1338	return 0;
 1339}
 1340
 1341static int bcm8704_init_user_dev3(struct niu *np)
 1342{
 1343	int err;
 1344
 1345	err = mdio_write(np, np->phy_addr,
 1346			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
 1347			 (USER_CONTROL_OPTXRST_LVL |
 1348			  USER_CONTROL_OPBIASFLT_LVL |
 1349			  USER_CONTROL_OBTMPFLT_LVL |
 1350			  USER_CONTROL_OPPRFLT_LVL |
 1351			  USER_CONTROL_OPTXFLT_LVL |
 1352			  USER_CONTROL_OPRXLOS_LVL |
 1353			  USER_CONTROL_OPRXFLT_LVL |
 1354			  USER_CONTROL_OPTXON_LVL |
 1355			  (0x3f << USER_CONTROL_RES1_SHIFT)));
 1356	if (err)
 1357		return err;
 1358
 1359	err = mdio_write(np, np->phy_addr,
 1360			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
 1361			 (USER_PMD_TX_CTL_XFP_CLKEN |
 1362			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
 1363			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
 1364			  USER_PMD_TX_CTL_TSCK_LPWREN));
 1365	if (err)
 1366		return err;
 1367
 1368	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
 1369	if (err)
 1370		return err;
 1371	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
 1372	if (err)
 1373		return err;
 1374
 1375	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1376			BCM8704_USER_OPT_DIGITAL_CTRL);
 1377	if (err < 0)
 1378		return err;
 1379	err &= ~USER_ODIG_CTRL_GPIOS;
 1380	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
 1381	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1382			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
 1383	if (err)
 1384		return err;
 1385
 1386	mdelay(1000);
 1387
 1388	return 0;
 1389}
 1390
 1391static int mrvl88x2011_act_led(struct niu *np, int val)
 1392{
 1393	int	err;
 1394
 1395	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1396		MRVL88X2011_LED_8_TO_11_CTL);
 1397	if (err < 0)
 1398		return err;
 1399
 1400	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
 1401	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
 1402
 1403	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1404			  MRVL88X2011_LED_8_TO_11_CTL, err);
 1405}
 1406
 1407static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
 1408{
 1409	int	err;
 1410
 1411	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1412			MRVL88X2011_LED_BLINK_CTL);
 1413	if (err >= 0) {
 1414		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
 1415		err |= (rate << 4);
 1416
 1417		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
 1418				 MRVL88X2011_LED_BLINK_CTL, err);
 1419	}
 1420
 1421	return err;
 1422}
 1423
 1424static int xcvr_init_10g_mrvl88x2011(struct niu *np)
 1425{
 1426	int	err;
 1427
 1428	/* Set LED functions */
 1429	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
 1430	if (err)
 1431		return err;
 1432
 1433	/* led activity */
 1434	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
 1435	if (err)
 1436		return err;
 1437
 1438	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1439			MRVL88X2011_GENERAL_CTL);
 1440	if (err < 0)
 1441		return err;
 1442
 1443	err |= MRVL88X2011_ENA_XFPREFCLK;
 1444
 1445	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1446			 MRVL88X2011_GENERAL_CTL, err);
 1447	if (err < 0)
 1448		return err;
 1449
 1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1451			MRVL88X2011_PMA_PMD_CTL_1);
 1452	if (err < 0)
 1453		return err;
 1454
 1455	if (np->link_config.loopback_mode == LOOPBACK_MAC)
 1456		err |= MRVL88X2011_LOOPBACK;
 1457	else
 1458		err &= ~MRVL88X2011_LOOPBACK;
 1459
 1460	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1461			 MRVL88X2011_PMA_PMD_CTL_1, err);
 1462	if (err < 0)
 1463		return err;
 1464
 1465	/* Enable PMD  */
 1466	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1467			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 1468}
 1469
 1470
 1471static int xcvr_diag_bcm870x(struct niu *np)
 1472{
 1473	u16 analog_stat0, tx_alarm_status;
 1474	int err = 0;
 1475
 1476#if 1
 1477	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1478			MII_STAT1000);
 1479	if (err < 0)
 1480		return err;
 1481	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
 1482
 1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
 1484	if (err < 0)
 1485		return err;
 1486	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
 1487
 1488	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 1489			MII_NWAYTEST);
 1490	if (err < 0)
 1491		return err;
 1492	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
 1493#endif
 1494
 1495	/* XXX dig this out it might not be so useful XXX */
 1496	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1497			BCM8704_USER_ANALOG_STATUS0);
 1498	if (err < 0)
 1499		return err;
 1500	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1501			BCM8704_USER_ANALOG_STATUS0);
 1502	if (err < 0)
 1503		return err;
 1504	analog_stat0 = err;
 1505
 1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1507			BCM8704_USER_TX_ALARM_STATUS);
 1508	if (err < 0)
 1509		return err;
 1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
 1511			BCM8704_USER_TX_ALARM_STATUS);
 1512	if (err < 0)
 1513		return err;
 1514	tx_alarm_status = err;
 1515
 1516	if (analog_stat0 != 0x03fc) {
 1517		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
 1518			pr_info("Port %u cable not connected or bad cable\n",
 1519				np->port);
 1520		} else if (analog_stat0 == 0x639c) {
 1521			pr_info("Port %u optical module is bad or missing\n",
 1522				np->port);
 1523		}
 1524	}
 1525
 1526	return 0;
 1527}
 1528
 1529static int xcvr_10g_set_lb_bcm870x(struct niu *np)
 1530{
 1531	struct niu_link_config *lp = &np->link_config;
 1532	int err;
 1533
 1534	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1535			MII_BMCR);
 1536	if (err < 0)
 1537		return err;
 1538
 1539	err &= ~BMCR_LOOPBACK;
 1540
 1541	if (lp->loopback_mode == LOOPBACK_MAC)
 1542		err |= BMCR_LOOPBACK;
 1543
 1544	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 1545			 MII_BMCR, err);
 1546	if (err)
 1547		return err;
 1548
 1549	return 0;
 1550}
 1551
 1552static int xcvr_init_10g_bcm8706(struct niu *np)
 1553{
 1554	int err = 0;
 1555	u64 val;
 1556
 1557	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
 1558	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
 1559			return err;
 1560
 1561	val = nr64_mac(XMAC_CONFIG);
 1562	val &= ~XMAC_CONFIG_LED_POLARITY;
 1563	val |= XMAC_CONFIG_FORCE_LED_ON;
 1564	nw64_mac(XMAC_CONFIG, val);
 1565
 1566	val = nr64(MIF_CONFIG);
 1567	val |= MIF_CONFIG_INDIRECT_MODE;
 1568	nw64(MIF_CONFIG, val);
 1569
 1570	err = bcm8704_reset(np);
 1571	if (err)
 1572		return err;
 1573
 1574	err = xcvr_10g_set_lb_bcm870x(np);
 1575	if (err)
 1576		return err;
 1577
 1578	err = bcm8706_init_user_dev3(np);
 1579	if (err)
 1580		return err;
 1581
 1582	err = xcvr_diag_bcm870x(np);
 1583	if (err)
 1584		return err;
 1585
 1586	return 0;
 1587}
 1588
 1589static int xcvr_init_10g_bcm8704(struct niu *np)
 1590{
 1591	int err;
 1592
 1593	err = bcm8704_reset(np);
 1594	if (err)
 1595		return err;
 1596
 1597	err = bcm8704_init_user_dev3(np);
 1598	if (err)
 1599		return err;
 1600
 1601	err = xcvr_10g_set_lb_bcm870x(np);
 1602	if (err)
 1603		return err;
 1604
 1605	err =  xcvr_diag_bcm870x(np);
 1606	if (err)
 1607		return err;
 1608
 1609	return 0;
 1610}
 1611
 1612static int xcvr_init_10g(struct niu *np)
 1613{
 1614	int phy_id, err;
 1615	u64 val;
 1616
 1617	val = nr64_mac(XMAC_CONFIG);
 1618	val &= ~XMAC_CONFIG_LED_POLARITY;
 1619	val |= XMAC_CONFIG_FORCE_LED_ON;
 1620	nw64_mac(XMAC_CONFIG, val);
 1621
 1622	/* XXX shared resource, lock parent XXX */
 1623	val = nr64(MIF_CONFIG);
 1624	val |= MIF_CONFIG_INDIRECT_MODE;
 1625	nw64(MIF_CONFIG, val);
 1626
 1627	phy_id = phy_decode(np->parent->port_phy, np->port);
 1628	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 1629
 1630	/* handle different phy types */
 1631	switch (phy_id & NIU_PHY_ID_MASK) {
 1632	case NIU_PHY_ID_MRVL88X2011:
 1633		err = xcvr_init_10g_mrvl88x2011(np);
 1634		break;
 1635
 1636	default: /* bcom 8704 */
 1637		err = xcvr_init_10g_bcm8704(np);
 1638		break;
 1639	}
 1640
 1641	return err;
 1642}
 1643
 1644static int mii_reset(struct niu *np)
 1645{
 1646	int limit, err;
 1647
 1648	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
 1649	if (err)
 1650		return err;
 1651
 1652	limit = 1000;
 1653	while (--limit >= 0) {
 1654		udelay(500);
 1655		err = mii_read(np, np->phy_addr, MII_BMCR);
 1656		if (err < 0)
 1657			return err;
 1658		if (!(err & BMCR_RESET))
 1659			break;
 1660	}
 1661	if (limit < 0) {
 1662		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
 1663			   np->port, err);
 1664		return -ENODEV;
 1665	}
 1666
 1667	return 0;
 1668}
 1669
 1670static int xcvr_init_1g_rgmii(struct niu *np)
 1671{
 1672	int err;
 1673	u64 val;
 1674	u16 bmcr, bmsr, estat;
 1675
 1676	val = nr64(MIF_CONFIG);
 1677	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1678	nw64(MIF_CONFIG, val);
 1679
 1680	err = mii_reset(np);
 1681	if (err)
 1682		return err;
 1683
 1684	err = mii_read(np, np->phy_addr, MII_BMSR);
 1685	if (err < 0)
 1686		return err;
 1687	bmsr = err;
 1688
 1689	estat = 0;
 1690	if (bmsr & BMSR_ESTATEN) {
 1691		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1692		if (err < 0)
 1693			return err;
 1694		estat = err;
 1695	}
 1696
 1697	bmcr = 0;
 1698	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1699	if (err)
 1700		return err;
 1701
 1702	if (bmsr & BMSR_ESTATEN) {
 1703		u16 ctrl1000 = 0;
 1704
 1705		if (estat & ESTATUS_1000_TFULL)
 1706			ctrl1000 |= ADVERTISE_1000FULL;
 1707		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
 1708		if (err)
 1709			return err;
 1710	}
 1711
 1712	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
 1713
 1714	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1715	if (err)
 1716		return err;
 1717
 1718	err = mii_read(np, np->phy_addr, MII_BMCR);
 1719	if (err < 0)
 1720		return err;
 1721	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
 1722
 1723	err = mii_read(np, np->phy_addr, MII_BMSR);
 1724	if (err < 0)
 1725		return err;
 1726
 1727	return 0;
 1728}
 1729
 1730static int mii_init_common(struct niu *np)
 1731{
 1732	struct niu_link_config *lp = &np->link_config;
 1733	u16 bmcr, bmsr, adv, estat;
 1734	int err;
 1735
 1736	err = mii_reset(np);
 1737	if (err)
 1738		return err;
 1739
 1740	err = mii_read(np, np->phy_addr, MII_BMSR);
 1741	if (err < 0)
 1742		return err;
 1743	bmsr = err;
 1744
 1745	estat = 0;
 1746	if (bmsr & BMSR_ESTATEN) {
 1747		err = mii_read(np, np->phy_addr, MII_ESTATUS);
 1748		if (err < 0)
 1749			return err;
 1750		estat = err;
 1751	}
 1752
 1753	bmcr = 0;
 1754	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1755	if (err)
 1756		return err;
 1757
 1758	if (lp->loopback_mode == LOOPBACK_MAC) {
 1759		bmcr |= BMCR_LOOPBACK;
 1760		if (lp->active_speed == SPEED_1000)
 1761			bmcr |= BMCR_SPEED1000;
 1762		if (lp->active_duplex == DUPLEX_FULL)
 1763			bmcr |= BMCR_FULLDPLX;
 1764	}
 1765
 1766	if (lp->loopback_mode == LOOPBACK_PHY) {
 1767		u16 aux;
 1768
 1769		aux = (BCM5464R_AUX_CTL_EXT_LB |
 1770		       BCM5464R_AUX_CTL_WRITE_1);
 1771		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
 1772		if (err)
 1773			return err;
 1774	}
 1775
 1776	if (lp->autoneg) {
 1777		u16 ctrl1000;
 1778
 1779		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
 1780		if ((bmsr & BMSR_10HALF) &&
 1781			(lp->advertising & ADVERTISED_10baseT_Half))
 1782			adv |= ADVERTISE_10HALF;
 1783		if ((bmsr & BMSR_10FULL) &&
 1784			(lp->advertising & ADVERTISED_10baseT_Full))
 1785			adv |= ADVERTISE_10FULL;
 1786		if ((bmsr & BMSR_100HALF) &&
 1787			(lp->advertising & ADVERTISED_100baseT_Half))
 1788			adv |= ADVERTISE_100HALF;
 1789		if ((bmsr & BMSR_100FULL) &&
 1790			(lp->advertising & ADVERTISED_100baseT_Full))
 1791			adv |= ADVERTISE_100FULL;
 1792		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
 1793		if (err)
 1794			return err;
 1795
 1796		if (likely(bmsr & BMSR_ESTATEN)) {
 1797			ctrl1000 = 0;
 1798			if ((estat & ESTATUS_1000_THALF) &&
 1799				(lp->advertising & ADVERTISED_1000baseT_Half))
 1800				ctrl1000 |= ADVERTISE_1000HALF;
 1801			if ((estat & ESTATUS_1000_TFULL) &&
 1802				(lp->advertising & ADVERTISED_1000baseT_Full))
 1803				ctrl1000 |= ADVERTISE_1000FULL;
 1804			err = mii_write(np, np->phy_addr,
 1805					MII_CTRL1000, ctrl1000);
 1806			if (err)
 1807				return err;
 1808		}
 1809
 1810		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 1811	} else {
 1812		/* !lp->autoneg */
 1813		int fulldpx;
 1814
 1815		if (lp->duplex == DUPLEX_FULL) {
 1816			bmcr |= BMCR_FULLDPLX;
 1817			fulldpx = 1;
 1818		} else if (lp->duplex == DUPLEX_HALF)
 1819			fulldpx = 0;
 1820		else
 1821			return -EINVAL;
 1822
 1823		if (lp->speed == SPEED_1000) {
 1824			/* if X-full requested while not supported, or
 1825			   X-half requested while not supported... */
 1826			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
 1827				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
 1828				return -EINVAL;
 1829			bmcr |= BMCR_SPEED1000;
 1830		} else if (lp->speed == SPEED_100) {
 1831			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
 1832				(!fulldpx && !(bmsr & BMSR_100HALF)))
 1833				return -EINVAL;
 1834			bmcr |= BMCR_SPEED100;
 1835		} else if (lp->speed == SPEED_10) {
 1836			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
 1837				(!fulldpx && !(bmsr & BMSR_10HALF)))
 1838				return -EINVAL;
 1839		} else
 1840			return -EINVAL;
 1841	}
 1842
 1843	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
 1844	if (err)
 1845		return err;
 1846
 1847#if 0
 1848	err = mii_read(np, np->phy_addr, MII_BMCR);
 1849	if (err < 0)
 1850		return err;
 1851	bmcr = err;
 1852
 1853	err = mii_read(np, np->phy_addr, MII_BMSR);
 1854	if (err < 0)
 1855		return err;
 1856	bmsr = err;
 1857
 1858	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
 1859		np->port, bmcr, bmsr);
 1860#endif
 1861
 1862	return 0;
 1863}
 1864
 1865static int xcvr_init_1g(struct niu *np)
 1866{
 1867	u64 val;
 1868
 1869	/* XXX shared resource, lock parent XXX */
 1870	val = nr64(MIF_CONFIG);
 1871	val &= ~MIF_CONFIG_INDIRECT_MODE;
 1872	nw64(MIF_CONFIG, val);
 1873
 1874	return mii_init_common(np);
 1875}
 1876
 1877static int niu_xcvr_init(struct niu *np)
 1878{
 1879	const struct niu_phy_ops *ops = np->phy_ops;
 1880	int err;
 1881
 1882	err = 0;
 1883	if (ops->xcvr_init)
 1884		err = ops->xcvr_init(np);
 1885
 1886	return err;
 1887}
 1888
 1889static int niu_serdes_init(struct niu *np)
 1890{
 1891	const struct niu_phy_ops *ops = np->phy_ops;
 1892	int err;
 1893
 1894	err = 0;
 1895	if (ops->serdes_init)
 1896		err = ops->serdes_init(np);
 1897
 1898	return err;
 1899}
 1900
 1901static void niu_init_xif(struct niu *);
 1902static void niu_handle_led(struct niu *, int status);
 1903
 1904static int niu_link_status_common(struct niu *np, int link_up)
 1905{
 1906	struct niu_link_config *lp = &np->link_config;
 1907	struct net_device *dev = np->dev;
 1908	unsigned long flags;
 1909
 1910	if (!netif_carrier_ok(dev) && link_up) {
 1911		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
 1912			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
 1913			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
 1914			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
 1915			   "10Mbit/sec",
 1916			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
 1917
 1918		spin_lock_irqsave(&np->lock, flags);
 1919		niu_init_xif(np);
 1920		niu_handle_led(np, 1);
 1921		spin_unlock_irqrestore(&np->lock, flags);
 1922
 1923		netif_carrier_on(dev);
 1924	} else if (netif_carrier_ok(dev) && !link_up) {
 1925		netif_warn(np, link, dev, "Link is down\n");
 1926		spin_lock_irqsave(&np->lock, flags);
 1927		niu_handle_led(np, 0);
 1928		spin_unlock_irqrestore(&np->lock, flags);
 1929		netif_carrier_off(dev);
 1930	}
 1931
 1932	return 0;
 1933}
 1934
 1935static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
 1936{
 1937	int err, link_up, pma_status, pcs_status;
 1938
 1939	link_up = 0;
 1940
 1941	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1942			MRVL88X2011_10G_PMD_STATUS_2);
 1943	if (err < 0)
 1944		goto out;
 1945
 1946	/* Check PMA/PMD Register: 1.0001.2 == 1 */
 1947	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
 1948			MRVL88X2011_PMA_PMD_STATUS_1);
 1949	if (err < 0)
 1950		goto out;
 1951
 1952	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1953
 1954        /* Check PMC Register : 3.0001.2 == 1: read twice */
 1955	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1956			MRVL88X2011_PMA_PMD_STATUS_1);
 1957	if (err < 0)
 1958		goto out;
 1959
 1960	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
 1961			MRVL88X2011_PMA_PMD_STATUS_1);
 1962	if (err < 0)
 1963		goto out;
 1964
 1965	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
 1966
 1967        /* Check XGXS Register : 4.0018.[0-3,12] */
 1968	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
 1969			MRVL88X2011_10G_XGXS_LANE_STAT);
 1970	if (err < 0)
 1971		goto out;
 1972
 1973	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
 1974		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
 1975		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
 1976		    0x800))
 1977		link_up = (pma_status && pcs_status) ? 1 : 0;
 1978
 1979	np->link_config.active_speed = SPEED_10000;
 1980	np->link_config.active_duplex = DUPLEX_FULL;
 1981	err = 0;
 1982out:
 1983	mrvl88x2011_act_led(np, (link_up ?
 1984				 MRVL88X2011_LED_CTL_PCS_ACT :
 1985				 MRVL88X2011_LED_CTL_OFF));
 1986
 1987	*link_up_p = link_up;
 1988	return err;
 1989}
 1990
 1991static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
 1992{
 1993	int err, link_up;
 1994	link_up = 0;
 1995
 1996	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 1997			BCM8704_PMD_RCV_SIGDET);
 1998	if (err < 0 || err == 0xffff)
 1999		goto out;
 2000	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2001		err = 0;
 2002		goto out;
 2003	}
 2004
 2005	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2006			BCM8704_PCS_10G_R_STATUS);
 2007	if (err < 0)
 2008		goto out;
 2009
 2010	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2011		err = 0;
 2012		goto out;
 2013	}
 2014
 2015	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2016			BCM8704_PHYXS_XGXS_LANE_STAT);
 2017	if (err < 0)
 2018		goto out;
 2019	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2020		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2021		    PHYXS_XGXS_LANE_STAT_PATTEST |
 2022		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2023		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2024		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2025		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2026		err = 0;
 2027		np->link_config.active_speed = SPEED_INVALID;
 2028		np->link_config.active_duplex = DUPLEX_INVALID;
 2029		goto out;
 2030	}
 2031
 2032	link_up = 1;
 2033	np->link_config.active_speed = SPEED_10000;
 2034	np->link_config.active_duplex = DUPLEX_FULL;
 2035	err = 0;
 2036
 2037out:
 2038	*link_up_p = link_up;
 2039	return err;
 2040}
 2041
 2042static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 2043{
 2044	int err, link_up;
 2045
 2046	link_up = 0;
 2047
 2048	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
 2049			BCM8704_PMD_RCV_SIGDET);
 2050	if (err < 0)
 2051		goto out;
 2052	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
 2053		err = 0;
 2054		goto out;
 2055	}
 2056
 2057	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
 2058			BCM8704_PCS_10G_R_STATUS);
 2059	if (err < 0)
 2060		goto out;
 2061	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
 2062		err = 0;
 2063		goto out;
 2064	}
 2065
 2066	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
 2067			BCM8704_PHYXS_XGXS_LANE_STAT);
 2068	if (err < 0)
 2069		goto out;
 2070
 2071	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
 2072		    PHYXS_XGXS_LANE_STAT_MAGIC |
 2073		    PHYXS_XGXS_LANE_STAT_LANE3 |
 2074		    PHYXS_XGXS_LANE_STAT_LANE2 |
 2075		    PHYXS_XGXS_LANE_STAT_LANE1 |
 2076		    PHYXS_XGXS_LANE_STAT_LANE0)) {
 2077		err = 0;
 2078		goto out;
 2079	}
 2080
 2081	link_up = 1;
 2082	np->link_config.active_speed = SPEED_10000;
 2083	np->link_config.active_duplex = DUPLEX_FULL;
 2084	err = 0;
 2085
 2086out:
 2087	*link_up_p = link_up;
 2088	return err;
 2089}
 2090
 2091static int link_status_10g(struct niu *np, int *link_up_p)
 2092{
 2093	unsigned long flags;
 2094	int err = -EINVAL;
 2095
 2096	spin_lock_irqsave(&np->lock, flags);
 2097
 2098	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2099		int phy_id;
 2100
 2101		phy_id = phy_decode(np->parent->port_phy, np->port);
 2102		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
 2103
 2104		/* handle different phy types */
 2105		switch (phy_id & NIU_PHY_ID_MASK) {
 2106		case NIU_PHY_ID_MRVL88X2011:
 2107			err = link_status_10g_mrvl(np, link_up_p);
 2108			break;
 2109
 2110		default: /* bcom 8704 */
 2111			err = link_status_10g_bcom(np, link_up_p);
 2112			break;
 2113		}
 2114	}
 2115
 2116	spin_unlock_irqrestore(&np->lock, flags);
 2117
 2118	return err;
 2119}
 2120
 2121static int niu_10g_phy_present(struct niu *np)
 2122{
 2123	u64 sig, mask, val;
 2124
 2125	sig = nr64(ESR_INT_SIGNALS);
 2126	switch (np->port) {
 2127	case 0:
 2128		mask = ESR_INT_SIGNALS_P0_BITS;
 2129		val = (ESR_INT_SRDY0_P0 |
 2130		       ESR_INT_DET0_P0 |
 2131		       ESR_INT_XSRDY_P0 |
 2132		       ESR_INT_XDP_P0_CH3 |
 2133		       ESR_INT_XDP_P0_CH2 |
 2134		       ESR_INT_XDP_P0_CH1 |
 2135		       ESR_INT_XDP_P0_CH0);
 2136		break;
 2137
 2138	case 1:
 2139		mask = ESR_INT_SIGNALS_P1_BITS;
 2140		val = (ESR_INT_SRDY0_P1 |
 2141		       ESR_INT_DET0_P1 |
 2142		       ESR_INT_XSRDY_P1 |
 2143		       ESR_INT_XDP_P1_CH3 |
 2144		       ESR_INT_XDP_P1_CH2 |
 2145		       ESR_INT_XDP_P1_CH1 |
 2146		       ESR_INT_XDP_P1_CH0);
 2147		break;
 2148
 2149	default:
 2150		return 0;
 2151	}
 2152
 2153	if ((sig & mask) != val)
 2154		return 0;
 2155	return 1;
 2156}
 2157
 2158static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
 2159{
 2160	unsigned long flags;
 2161	int err = 0;
 2162	int phy_present;
 2163	int phy_present_prev;
 2164
 2165	spin_lock_irqsave(&np->lock, flags);
 2166
 2167	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
 2168		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
 2169			1 : 0;
 2170		phy_present = niu_10g_phy_present(np);
 2171		if (phy_present != phy_present_prev) {
 2172			/* state change */
 2173			if (phy_present) {
 2174				/* A NEM was just plugged in */
 2175				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2176				if (np->phy_ops->xcvr_init)
 2177					err = np->phy_ops->xcvr_init(np);
 2178				if (err) {
 2179					err = mdio_read(np, np->phy_addr,
 2180						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
 2181					if (err == 0xffff) {
 2182						/* No mdio, back-to-back XAUI */
 2183						goto out;
 2184					}
 2185					/* debounce */
 2186					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2187				}
 2188			} else {
 2189				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
 2190				*link_up_p = 0;
 2191				netif_warn(np, link, np->dev,
 2192					   "Hotplug PHY Removed\n");
 2193			}
 2194		}
 2195out:
 2196		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
 2197			err = link_status_10g_bcm8706(np, link_up_p);
 2198			if (err == 0xffff) {
 2199				/* No mdio, back-to-back XAUI: it is C10NEM */
 2200				*link_up_p = 1;
 2201				np->link_config.active_speed = SPEED_10000;
 2202				np->link_config.active_duplex = DUPLEX_FULL;
 2203			}
 2204		}
 2205	}
 2206
 2207	spin_unlock_irqrestore(&np->lock, flags);
 2208
 2209	return 0;
 2210}
 2211
 2212static int niu_link_status(struct niu *np, int *link_up_p)
 2213{
 2214	const struct niu_phy_ops *ops = np->phy_ops;
 2215	int err;
 2216
 2217	err = 0;
 2218	if (ops->link_status)
 2219		err = ops->link_status(np, link_up_p);
 2220
 2221	return err;
 2222}
 2223
 2224static void niu_timer(unsigned long __opaque)
 2225{
 2226	struct niu *np = (struct niu *) __opaque;
 2227	unsigned long off;
 2228	int err, link_up;
 2229
 2230	err = niu_link_status(np, &link_up);
 2231	if (!err)
 2232		niu_link_status_common(np, link_up);
 2233
 2234	if (netif_carrier_ok(np->dev))
 2235		off = 5 * HZ;
 2236	else
 2237		off = 1 * HZ;
 2238	np->timer.expires = jiffies + off;
 2239
 2240	add_timer(&np->timer);
 2241}
 2242
 2243static const struct niu_phy_ops phy_ops_10g_serdes = {
 2244	.serdes_init		= serdes_init_10g_serdes,
 2245	.link_status		= link_status_10g_serdes,
 2246};
 2247
 2248static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
 2249	.serdes_init		= serdes_init_niu_10g_serdes,
 2250	.link_status		= link_status_10g_serdes,
 2251};
 2252
 2253static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
 2254	.serdes_init		= serdes_init_niu_1g_serdes,
 2255	.link_status		= link_status_1g_serdes,
 2256};
 2257
 2258static const struct niu_phy_ops phy_ops_1g_rgmii = {
 2259	.xcvr_init		= xcvr_init_1g_rgmii,
 2260	.link_status		= link_status_1g_rgmii,
 2261};
 2262
 2263static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
 2264	.serdes_init		= serdes_init_niu_10g_fiber,
 2265	.xcvr_init		= xcvr_init_10g,
 2266	.link_status		= link_status_10g,
 2267};
 2268
 2269static const struct niu_phy_ops phy_ops_10g_fiber = {
 2270	.serdes_init		= serdes_init_10g,
 2271	.xcvr_init		= xcvr_init_10g,
 2272	.link_status		= link_status_10g,
 2273};
 2274
 2275static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
 2276	.serdes_init		= serdes_init_10g,
 2277	.xcvr_init		= xcvr_init_10g_bcm8706,
 2278	.link_status		= link_status_10g_hotplug,
 2279};
 2280
 2281static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
 2282	.serdes_init		= serdes_init_niu_10g_fiber,
 2283	.xcvr_init		= xcvr_init_10g_bcm8706,
 2284	.link_status		= link_status_10g_hotplug,
 2285};
 2286
 2287static const struct niu_phy_ops phy_ops_10g_copper = {
 2288	.serdes_init		= serdes_init_10g,
 2289	.link_status		= link_status_10g, /* XXX */
 2290};
 2291
 2292static const struct niu_phy_ops phy_ops_1g_fiber = {
 2293	.serdes_init		= serdes_init_1g,
 2294	.xcvr_init		= xcvr_init_1g,
 2295	.link_status		= link_status_1g,
 2296};
 2297
 2298static const struct niu_phy_ops phy_ops_1g_copper = {
 2299	.xcvr_init		= xcvr_init_1g,
 2300	.link_status		= link_status_1g,
 2301};
 2302
 2303struct niu_phy_template {
 2304	const struct niu_phy_ops	*ops;
 2305	u32				phy_addr_base;
 2306};
 2307
 2308static const struct niu_phy_template phy_template_niu_10g_fiber = {
 2309	.ops		= &phy_ops_10g_fiber_niu,
 2310	.phy_addr_base	= 16,
 2311};
 2312
 2313static const struct niu_phy_template phy_template_niu_10g_serdes = {
 2314	.ops		= &phy_ops_10g_serdes_niu,
 2315	.phy_addr_base	= 0,
 2316};
 2317
 2318static const struct niu_phy_template phy_template_niu_1g_serdes = {
 2319	.ops		= &phy_ops_1g_serdes_niu,
 2320	.phy_addr_base	= 0,
 2321};
 2322
 2323static const struct niu_phy_template phy_template_10g_fiber = {
 2324	.ops		= &phy_ops_10g_fiber,
 2325	.phy_addr_base	= 8,
 2326};
 2327
 2328static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
 2329	.ops		= &phy_ops_10g_fiber_hotplug,
 2330	.phy_addr_base	= 8,
 2331};
 2332
 2333static const struct niu_phy_template phy_template_niu_10g_hotplug = {
 2334	.ops		= &phy_ops_niu_10g_hotplug,
 2335	.phy_addr_base	= 8,
 2336};
 2337
 2338static const struct niu_phy_template phy_template_10g_copper = {
 2339	.ops		= &phy_ops_10g_copper,
 2340	.phy_addr_base	= 10,
 2341};
 2342
 2343static const struct niu_phy_template phy_template_1g_fiber = {
 2344	.ops		= &phy_ops_1g_fiber,
 2345	.phy_addr_base	= 0,
 2346};
 2347
 2348static const struct niu_phy_template phy_template_1g_copper = {
 2349	.ops		= &phy_ops_1g_copper,
 2350	.phy_addr_base	= 0,
 2351};
 2352
 2353static const struct niu_phy_template phy_template_1g_rgmii = {
 2354	.ops		= &phy_ops_1g_rgmii,
 2355	.phy_addr_base	= 0,
 2356};
 2357
 2358static const struct niu_phy_template phy_template_10g_serdes = {
 2359	.ops		= &phy_ops_10g_serdes,
 2360	.phy_addr_base	= 0,
 2361};
 2362
 2363static int niu_atca_port_num[4] = {
 2364	0, 0,  11, 10
 2365};
 2366
 2367static int serdes_init_10g_serdes(struct niu *np)
 2368{
 2369	struct niu_link_config *lp = &np->link_config;
 2370	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
 2371	u64 ctrl_val, test_cfg_val, sig, mask, val;
 2372
 2373	switch (np->port) {
 2374	case 0:
 2375		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
 2376		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
 2377		pll_cfg = ENET_SERDES_0_PLL_CFG;
 2378		break;
 2379	case 1:
 2380		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
 2381		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
 2382		pll_cfg = ENET_SERDES_1_PLL_CFG;
 2383		break;
 2384
 2385	default:
 2386		return -EINVAL;
 2387	}
 2388	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
 2389		    ENET_SERDES_CTRL_SDET_1 |
 2390		    ENET_SERDES_CTRL_SDET_2 |
 2391		    ENET_SERDES_CTRL_SDET_3 |
 2392		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
 2393		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
 2394		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
 2395		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
 2396		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
 2397		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
 2398		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
 2399		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
 2400	test_cfg_val = 0;
 2401
 2402	if (lp->loopback_mode == LOOPBACK_PHY) {
 2403		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
 2404				  ENET_SERDES_TEST_MD_0_SHIFT) |
 2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2406				  ENET_SERDES_TEST_MD_1_SHIFT) |
 2407				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2408				  ENET_SERDES_TEST_MD_2_SHIFT) |
 2409				 (ENET_TEST_MD_PAD_LOOPBACK <<
 2410				  ENET_SERDES_TEST_MD_3_SHIFT));
 2411	}
 2412
 2413	esr_reset(np);
 2414	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
 2415	nw64(ctrl_reg, ctrl_val);
 2416	nw64(test_cfg_reg, test_cfg_val);
 2417
 2418	/* Initialize all 4 lanes of the SERDES.  */
 2419	for (i = 0; i < 4; i++) {
 2420		u32 rxtx_ctrl, glue0;
 2421		int err;
 2422
 2423		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
 2424		if (err)
 2425			return err;
 2426		err = esr_read_glue0(np, i, &glue0);
 2427		if (err)
 2428			return err;
 2429
 2430		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
 2431		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
 2432			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
 2433
 2434		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
 2435			   ESR_GLUE_CTRL0_THCNT |
 2436			   ESR_GLUE_CTRL0_BLTIME);
 2437		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
 2438			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
 2439			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
 2440			  (BLTIME_300_CYCLES <<
 2441			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
 2442
 2443		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
 2444		if (err)
 2445			return err;
 2446		err = esr_write_glue0(np, i, glue0);
 2447		if (err)
 2448			return err;
 2449	}
 2450
 2451
 2452	sig = nr64(ESR_INT_SIGNALS);
 2453	switch (np->port) {
 2454	case 0:
 2455		mask = ESR_INT_SIGNALS_P0_BITS;
 2456		val = (ESR_INT_SRDY0_P0 |
 2457		       ESR_INT_DET0_P0 |
 2458		       ESR_INT_XSRDY_P0 |
 2459		       ESR_INT_XDP_P0_CH3 |
 2460		       ESR_INT_XDP_P0_CH2 |
 2461		       ESR_INT_XDP_P0_CH1 |
 2462		       ESR_INT_XDP_P0_CH0);
 2463		break;
 2464
 2465	case 1:
 2466		mask = ESR_INT_SIGNALS_P1_BITS;
 2467		val = (ESR_INT_SRDY0_P1 |
 2468		       ESR_INT_DET0_P1 |
 2469		       ESR_INT_XSRDY_P1 |
 2470		       ESR_INT_XDP_P1_CH3 |
 2471		       ESR_INT_XDP_P1_CH2 |
 2472		       ESR_INT_XDP_P1_CH1 |
 2473		       ESR_INT_XDP_P1_CH0);
 2474		break;
 2475
 2476	default:
 2477		return -EINVAL;
 2478	}
 2479
 2480	if ((sig & mask) != val) {
 2481		int err;
 2482		err = serdes_init_1g_serdes(np);
 2483		if (!err) {
 2484			np->flags &= ~NIU_FLAGS_10G;
 2485			np->mac_xcvr = MAC_XCVR_PCS;
 2486		}  else {
 2487			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
 2488				   np->port);
 2489			return -ENODEV;
 2490		}
 2491	}
 2492
 2493	return 0;
 2494}
 2495
 2496static int niu_determine_phy_disposition(struct niu *np)
 2497{
 2498	struct niu_parent *parent = np->parent;
 2499	u8 plat_type = parent->plat_type;
 2500	const struct niu_phy_template *tp;
 2501	u32 phy_addr_off = 0;
 2502
 2503	if (plat_type == PLAT_TYPE_NIU) {
 2504		switch (np->flags &
 2505			(NIU_FLAGS_10G |
 2506			 NIU_FLAGS_FIBER |
 2507			 NIU_FLAGS_XCVR_SERDES)) {
 2508		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2509			/* 10G Serdes */
 2510			tp = &phy_template_niu_10g_serdes;
 2511			break;
 2512		case NIU_FLAGS_XCVR_SERDES:
 2513			/* 1G Serdes */
 2514			tp = &phy_template_niu_1g_serdes;
 2515			break;
 2516		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2517			/* 10G Fiber */
 2518		default:
 2519			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2520				tp = &phy_template_niu_10g_hotplug;
 2521				if (np->port == 0)
 2522					phy_addr_off = 8;
 2523				if (np->port == 1)
 2524					phy_addr_off = 12;
 2525			} else {
 2526				tp = &phy_template_niu_10g_fiber;
 2527				phy_addr_off += np->port;
 2528			}
 2529			break;
 2530		}
 2531	} else {
 2532		switch (np->flags &
 2533			(NIU_FLAGS_10G |
 2534			 NIU_FLAGS_FIBER |
 2535			 NIU_FLAGS_XCVR_SERDES)) {
 2536		case 0:
 2537			/* 1G copper */
 2538			tp = &phy_template_1g_copper;
 2539			if (plat_type == PLAT_TYPE_VF_P0)
 2540				phy_addr_off = 10;
 2541			else if (plat_type == PLAT_TYPE_VF_P1)
 2542				phy_addr_off = 26;
 2543
 2544			phy_addr_off += (np->port ^ 0x3);
 2545			break;
 2546
 2547		case NIU_FLAGS_10G:
 2548			/* 10G copper */
 2549			tp = &phy_template_10g_copper;
 2550			break;
 2551
 2552		case NIU_FLAGS_FIBER:
 2553			/* 1G fiber */
 2554			tp = &phy_template_1g_fiber;
 2555			break;
 2556
 2557		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 2558			/* 10G fiber */
 2559			tp = &phy_template_10g_fiber;
 2560			if (plat_type == PLAT_TYPE_VF_P0 ||
 2561			    plat_type == PLAT_TYPE_VF_P1)
 2562				phy_addr_off = 8;
 2563			phy_addr_off += np->port;
 2564			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
 2565				tp = &phy_template_10g_fiber_hotplug;
 2566				if (np->port == 0)
 2567					phy_addr_off = 8;
 2568				if (np->port == 1)
 2569					phy_addr_off = 12;
 2570			}
 2571			break;
 2572
 2573		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 2574		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 2575		case NIU_FLAGS_XCVR_SERDES:
 2576			switch(np->port) {
 2577			case 0:
 2578			case 1:
 2579				tp = &phy_template_10g_serdes;
 2580				break;
 2581			case 2:
 2582			case 3:
 2583				tp = &phy_template_1g_rgmii;
 2584				break;
 2585			default:
 2586				return -EINVAL;
 2587			}
 2588			phy_addr_off = niu_atca_port_num[np->port];
 2589			break;
 2590
 2591		default:
 2592			return -EINVAL;
 2593		}
 2594	}
 2595
 2596	np->phy_ops = tp->ops;
 2597	np->phy_addr = tp->phy_addr_base + phy_addr_off;
 2598
 2599	return 0;
 2600}
 2601
 2602static int niu_init_link(struct niu *np)
 2603{
 2604	struct niu_parent *parent = np->parent;
 2605	int err, ignore;
 2606
 2607	if (parent->plat_type == PLAT_TYPE_NIU) {
 2608		err = niu_xcvr_init(np);
 2609		if (err)
 2610			return err;
 2611		msleep(200);
 2612	}
 2613	err = niu_serdes_init(np);
 2614	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2615		return err;
 2616	msleep(200);
 2617	err = niu_xcvr_init(np);
 2618	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
 2619		niu_link_status(np, &ignore);
 2620	return 0;
 2621}
 2622
 2623static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
 2624{
 2625	u16 reg0 = addr[4] << 8 | addr[5];
 2626	u16 reg1 = addr[2] << 8 | addr[3];
 2627	u16 reg2 = addr[0] << 8 | addr[1];
 2628
 2629	if (np->flags & NIU_FLAGS_XMAC) {
 2630		nw64_mac(XMAC_ADDR0, reg0);
 2631		nw64_mac(XMAC_ADDR1, reg1);
 2632		nw64_mac(XMAC_ADDR2, reg2);
 2633	} else {
 2634		nw64_mac(BMAC_ADDR0, reg0);
 2635		nw64_mac(BMAC_ADDR1, reg1);
 2636		nw64_mac(BMAC_ADDR2, reg2);
 2637	}
 2638}
 2639
 2640static int niu_num_alt_addr(struct niu *np)
 2641{
 2642	if (np->flags & NIU_FLAGS_XMAC)
 2643		return XMAC_NUM_ALT_ADDR;
 2644	else
 2645		return BMAC_NUM_ALT_ADDR;
 2646}
 2647
 2648static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
 2649{
 2650	u16 reg0 = addr[4] << 8 | addr[5];
 2651	u16 reg1 = addr[2] << 8 | addr[3];
 2652	u16 reg2 = addr[0] << 8 | addr[1];
 2653
 2654	if (index >= niu_num_alt_addr(np))
 2655		return -EINVAL;
 2656
 2657	if (np->flags & NIU_FLAGS_XMAC) {
 2658		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
 2659		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
 2660		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
 2661	} else {
 2662		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
 2663		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
 2664		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
 2665	}
 2666
 2667	return 0;
 2668}
 2669
 2670static int niu_enable_alt_mac(struct niu *np, int index, int on)
 2671{
 2672	unsigned long reg;
 2673	u64 val, mask;
 2674
 2675	if (index >= niu_num_alt_addr(np))
 2676		return -EINVAL;
 2677
 2678	if (np->flags & NIU_FLAGS_XMAC) {
 2679		reg = XMAC_ADDR_CMPEN;
 2680		mask = 1 << index;
 2681	} else {
 2682		reg = BMAC_ADDR_CMPEN;
 2683		mask = 1 << (index + 1);
 2684	}
 2685
 2686	val = nr64_mac(reg);
 2687	if (on)
 2688		val |= mask;
 2689	else
 2690		val &= ~mask;
 2691	nw64_mac(reg, val);
 2692
 2693	return 0;
 2694}
 2695
 2696static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
 2697				   int num, int mac_pref)
 2698{
 2699	u64 val = nr64_mac(reg);
 2700	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
 2701	val |= num;
 2702	if (mac_pref)
 2703		val |= HOST_INFO_MPR;
 2704	nw64_mac(reg, val);
 2705}
 2706
 2707static int __set_rdc_table_num(struct niu *np,
 2708			       int xmac_index, int bmac_index,
 2709			       int rdc_table_num, int mac_pref)
 2710{
 2711	unsigned long reg;
 2712
 2713	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
 2714		return -EINVAL;
 2715	if (np->flags & NIU_FLAGS_XMAC)
 2716		reg = XMAC_HOST_INFO(xmac_index);
 2717	else
 2718		reg = BMAC_HOST_INFO(bmac_index);
 2719	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
 2720	return 0;
 2721}
 2722
 2723static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
 2724					 int mac_pref)
 2725{
 2726	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
 2727}
 2728
 2729static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
 2730					   int mac_pref)
 2731{
 2732	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
 2733}
 2734
 2735static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
 2736				     int table_num, int mac_pref)
 2737{
 2738	if (idx >= niu_num_alt_addr(np))
 2739		return -EINVAL;
 2740	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
 2741}
 2742
 2743static u64 vlan_entry_set_parity(u64 reg_val)
 2744{
 2745	u64 port01_mask;
 2746	u64 port23_mask;
 2747
 2748	port01_mask = 0x00ff;
 2749	port23_mask = 0xff00;
 2750
 2751	if (hweight64(reg_val & port01_mask) & 1)
 2752		reg_val |= ENET_VLAN_TBL_PARITY0;
 2753	else
 2754		reg_val &= ~ENET_VLAN_TBL_PARITY0;
 2755
 2756	if (hweight64(reg_val & port23_mask) & 1)
 2757		reg_val |= ENET_VLAN_TBL_PARITY1;
 2758	else
 2759		reg_val &= ~ENET_VLAN_TBL_PARITY1;
 2760
 2761	return reg_val;
 2762}
 2763
 2764static void vlan_tbl_write(struct niu *np, unsigned long index,
 2765			   int port, int vpr, int rdc_table)
 2766{
 2767	u64 reg_val = nr64(ENET_VLAN_TBL(index));
 2768
 2769	reg_val &= ~((ENET_VLAN_TBL_VPR |
 2770		      ENET_VLAN_TBL_VLANRDCTBLN) <<
 2771		     ENET_VLAN_TBL_SHIFT(port));
 2772	if (vpr)
 2773		reg_val |= (ENET_VLAN_TBL_VPR <<
 2774			    ENET_VLAN_TBL_SHIFT(port));
 2775	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
 2776
 2777	reg_val = vlan_entry_set_parity(reg_val);
 2778
 2779	nw64(ENET_VLAN_TBL(index), reg_val);
 2780}
 2781
 2782static void vlan_tbl_clear(struct niu *np)
 2783{
 2784	int i;
 2785
 2786	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
 2787		nw64(ENET_VLAN_TBL(i), 0);
 2788}
 2789
 2790static int tcam_wait_bit(struct niu *np, u64 bit)
 2791{
 2792	int limit = 1000;
 2793
 2794	while (--limit > 0) {
 2795		if (nr64(TCAM_CTL) & bit)
 2796			break;
 2797		udelay(1);
 2798	}
 2799	if (limit <= 0)
 2800		return -ENODEV;
 2801
 2802	return 0;
 2803}
 2804
 2805static int tcam_flush(struct niu *np, int index)
 2806{
 2807	nw64(TCAM_KEY_0, 0x00);
 2808	nw64(TCAM_KEY_MASK_0, 0xff);
 2809	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2810
 2811	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2812}
 2813
 2814#if 0
 2815static int tcam_read(struct niu *np, int index,
 2816		     u64 *key, u64 *mask)
 2817{
 2818	int err;
 2819
 2820	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
 2821	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2822	if (!err) {
 2823		key[0] = nr64(TCAM_KEY_0);
 2824		key[1] = nr64(TCAM_KEY_1);
 2825		key[2] = nr64(TCAM_KEY_2);
 2826		key[3] = nr64(TCAM_KEY_3);
 2827		mask[0] = nr64(TCAM_KEY_MASK_0);
 2828		mask[1] = nr64(TCAM_KEY_MASK_1);
 2829		mask[2] = nr64(TCAM_KEY_MASK_2);
 2830		mask[3] = nr64(TCAM_KEY_MASK_3);
 2831	}
 2832	return err;
 2833}
 2834#endif
 2835
 2836static int tcam_write(struct niu *np, int index,
 2837		      u64 *key, u64 *mask)
 2838{
 2839	nw64(TCAM_KEY_0, key[0]);
 2840	nw64(TCAM_KEY_1, key[1]);
 2841	nw64(TCAM_KEY_2, key[2]);
 2842	nw64(TCAM_KEY_3, key[3]);
 2843	nw64(TCAM_KEY_MASK_0, mask[0]);
 2844	nw64(TCAM_KEY_MASK_1, mask[1]);
 2845	nw64(TCAM_KEY_MASK_2, mask[2]);
 2846	nw64(TCAM_KEY_MASK_3, mask[3]);
 2847	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
 2848
 2849	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2850}
 2851
 2852#if 0
 2853static int tcam_assoc_read(struct niu *np, int index, u64 *data)
 2854{
 2855	int err;
 2856
 2857	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
 2858	err = tcam_wait_bit(np, TCAM_CTL_STAT);
 2859	if (!err)
 2860		*data = nr64(TCAM_KEY_1);
 2861
 2862	return err;
 2863}
 2864#endif
 2865
 2866static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
 2867{
 2868	nw64(TCAM_KEY_1, assoc_data);
 2869	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
 2870
 2871	return tcam_wait_bit(np, TCAM_CTL_STAT);
 2872}
 2873
 2874static void tcam_enable(struct niu *np, int on)
 2875{
 2876	u64 val = nr64(FFLP_CFG_1);
 2877
 2878	if (on)
 2879		val &= ~FFLP_CFG_1_TCAM_DIS;
 2880	else
 2881		val |= FFLP_CFG_1_TCAM_DIS;
 2882	nw64(FFLP_CFG_1, val);
 2883}
 2884
 2885static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
 2886{
 2887	u64 val = nr64(FFLP_CFG_1);
 2888
 2889	val &= ~(FFLP_CFG_1_FFLPINITDONE |
 2890		 FFLP_CFG_1_CAMLAT |
 2891		 FFLP_CFG_1_CAMRATIO);
 2892	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
 2893	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
 2894	nw64(FFLP_CFG_1, val);
 2895
 2896	val = nr64(FFLP_CFG_1);
 2897	val |= FFLP_CFG_1_FFLPINITDONE;
 2898	nw64(FFLP_CFG_1, val);
 2899}
 2900
 2901static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
 2902				      int on)
 2903{
 2904	unsigned long reg;
 2905	u64 val;
 2906
 2907	if (class < CLASS_CODE_ETHERTYPE1 ||
 2908	    class > CLASS_CODE_ETHERTYPE2)
 2909		return -EINVAL;
 2910
 2911	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2912	val = nr64(reg);
 2913	if (on)
 2914		val |= L2_CLS_VLD;
 2915	else
 2916		val &= ~L2_CLS_VLD;
 2917	nw64(reg, val);
 2918
 2919	return 0;
 2920}
 2921
 2922#if 0
 2923static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
 2924				   u64 ether_type)
 2925{
 2926	unsigned long reg;
 2927	u64 val;
 2928
 2929	if (class < CLASS_CODE_ETHERTYPE1 ||
 2930	    class > CLASS_CODE_ETHERTYPE2 ||
 2931	    (ether_type & ~(u64)0xffff) != 0)
 2932		return -EINVAL;
 2933
 2934	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
 2935	val = nr64(reg);
 2936	val &= ~L2_CLS_ETYPE;
 2937	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
 2938	nw64(reg, val);
 2939
 2940	return 0;
 2941}
 2942#endif
 2943
 2944static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
 2945				     int on)
 2946{
 2947	unsigned long reg;
 2948	u64 val;
 2949
 2950	if (class < CLASS_CODE_USER_PROG1 ||
 2951	    class > CLASS_CODE_USER_PROG4)
 2952		return -EINVAL;
 2953
 2954	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2955	val = nr64(reg);
 2956	if (on)
 2957		val |= L3_CLS_VALID;
 2958	else
 2959		val &= ~L3_CLS_VALID;
 2960	nw64(reg, val);
 2961
 2962	return 0;
 2963}
 2964
 2965static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
 2966				  int ipv6, u64 protocol_id,
 2967				  u64 tos_mask, u64 tos_val)
 2968{
 2969	unsigned long reg;
 2970	u64 val;
 2971
 2972	if (class < CLASS_CODE_USER_PROG1 ||
 2973	    class > CLASS_CODE_USER_PROG4 ||
 2974	    (protocol_id & ~(u64)0xff) != 0 ||
 2975	    (tos_mask & ~(u64)0xff) != 0 ||
 2976	    (tos_val & ~(u64)0xff) != 0)
 2977		return -EINVAL;
 2978
 2979	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
 2980	val = nr64(reg);
 2981	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
 2982		 L3_CLS_TOSMASK | L3_CLS_TOS);
 2983	if (ipv6)
 2984		val |= L3_CLS_IPVER;
 2985	val |= (protocol_id << L3_CLS_PID_SHIFT);
 2986	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
 2987	val |= (tos_val << L3_CLS_TOS_SHIFT);
 2988	nw64(reg, val);
 2989
 2990	return 0;
 2991}
 2992
 2993static int tcam_early_init(struct niu *np)
 2994{
 2995	unsigned long i;
 2996	int err;
 2997
 2998	tcam_enable(np, 0);
 2999	tcam_set_lat_and_ratio(np,
 3000			       DEFAULT_TCAM_LATENCY,
 3001			       DEFAULT_TCAM_ACCESS_RATIO);
 3002	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
 3003		err = tcam_user_eth_class_enable(np, i, 0);
 3004		if (err)
 3005			return err;
 3006	}
 3007	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
 3008		err = tcam_user_ip_class_enable(np, i, 0);
 3009		if (err)
 3010			return err;
 3011	}
 3012
 3013	return 0;
 3014}
 3015
 3016static int tcam_flush_all(struct niu *np)
 3017{
 3018	unsigned long i;
 3019
 3020	for (i = 0; i < np->parent->tcam_num_entries; i++) {
 3021		int err = tcam_flush(np, i);
 3022		if (err)
 3023			return err;
 3024	}
 3025	return 0;
 3026}
 3027
 3028static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
 3029{
 3030	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
 3031}
 3032
 3033#if 0
 3034static int hash_read(struct niu *np, unsigned long partition,
 3035		     unsigned long index, unsigned long num_entries,
 3036		     u64 *data)
 3037{
 3038	u64 val = hash_addr_regval(index, num_entries);
 3039	unsigned long i;
 3040
 3041	if (partition >= FCRAM_NUM_PARTITIONS ||
 3042	    index + num_entries > FCRAM_SIZE)
 3043		return -EINVAL;
 3044
 3045	nw64(HASH_TBL_ADDR(partition), val);
 3046	for (i = 0; i < num_entries; i++)
 3047		data[i] = nr64(HASH_TBL_DATA(partition));
 3048
 3049	return 0;
 3050}
 3051#endif
 3052
 3053static int hash_write(struct niu *np, unsigned long partition,
 3054		      unsigned long index, unsigned long num_entries,
 3055		      u64 *data)
 3056{
 3057	u64 val = hash_addr_regval(index, num_entries);
 3058	unsigned long i;
 3059
 3060	if (partition >= FCRAM_NUM_PARTITIONS ||
 3061	    index + (num_entries * 8) > FCRAM_SIZE)
 3062		return -EINVAL;
 3063
 3064	nw64(HASH_TBL_ADDR(partition), val);
 3065	for (i = 0; i < num_entries; i++)
 3066		nw64(HASH_TBL_DATA(partition), data[i]);
 3067
 3068	return 0;
 3069}
 3070
 3071static void fflp_reset(struct niu *np)
 3072{
 3073	u64 val;
 3074
 3075	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
 3076	udelay(10);
 3077	nw64(FFLP_CFG_1, 0);
 3078
 3079	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
 3080	nw64(FFLP_CFG_1, val);
 3081}
 3082
 3083static void fflp_set_timings(struct niu *np)
 3084{
 3085	u64 val = nr64(FFLP_CFG_1);
 3086
 3087	val &= ~FFLP_CFG_1_FFLPINITDONE;
 3088	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
 3089	nw64(FFLP_CFG_1, val);
 3090
 3091	val = nr64(FFLP_CFG_1);
 3092	val |= FFLP_CFG_1_FFLPINITDONE;
 3093	nw64(FFLP_CFG_1, val);
 3094
 3095	val = nr64(FCRAM_REF_TMR);
 3096	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
 3097	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
 3098	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
 3099	nw64(FCRAM_REF_TMR, val);
 3100}
 3101
 3102static int fflp_set_partition(struct niu *np, u64 partition,
 3103			      u64 mask, u64 base, int enable)
 3104{
 3105	unsigned long reg;
 3106	u64 val;
 3107
 3108	if (partition >= FCRAM_NUM_PARTITIONS ||
 3109	    (mask & ~(u64)0x1f) != 0 ||
 3110	    (base & ~(u64)0x1f) != 0)
 3111		return -EINVAL;
 3112
 3113	reg = FLW_PRT_SEL(partition);
 3114
 3115	val = nr64(reg);
 3116	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
 3117	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
 3118	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
 3119	if (enable)
 3120		val |= FLW_PRT_SEL_EXT;
 3121	nw64(reg, val);
 3122
 3123	return 0;
 3124}
 3125
 3126static int fflp_disable_all_partitions(struct niu *np)
 3127{
 3128	unsigned long i;
 3129
 3130	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
 3131		int err = fflp_set_partition(np, 0, 0, 0, 0);
 3132		if (err)
 3133			return err;
 3134	}
 3135	return 0;
 3136}
 3137
 3138static void fflp_llcsnap_enable(struct niu *np, int on)
 3139{
 3140	u64 val = nr64(FFLP_CFG_1);
 3141
 3142	if (on)
 3143		val |= FFLP_CFG_1_LLCSNAP;
 3144	else
 3145		val &= ~FFLP_CFG_1_LLCSNAP;
 3146	nw64(FFLP_CFG_1, val);
 3147}
 3148
 3149static void fflp_errors_enable(struct niu *np, int on)
 3150{
 3151	u64 val = nr64(FFLP_CFG_1);
 3152
 3153	if (on)
 3154		val &= ~FFLP_CFG_1_ERRORDIS;
 3155	else
 3156		val |= FFLP_CFG_1_ERRORDIS;
 3157	nw64(FFLP_CFG_1, val);
 3158}
 3159
 3160static int fflp_hash_clear(struct niu *np)
 3161{
 3162	struct fcram_hash_ipv4 ent;
 3163	unsigned long i;
 3164
 3165	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
 3166	memset(&ent, 0, sizeof(ent));
 3167	ent.header = HASH_HEADER_EXT;
 3168
 3169	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
 3170		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
 3171		if (err)
 3172			return err;
 3173	}
 3174	return 0;
 3175}
 3176
 3177static int fflp_early_init(struct niu *np)
 3178{
 3179	struct niu_parent *parent;
 3180	unsigned long flags;
 3181	int err;
 3182
 3183	niu_lock_parent(np, flags);
 3184
 3185	parent = np->parent;
 3186	err = 0;
 3187	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
 3188		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3189			fflp_reset(np);
 3190			fflp_set_timings(np);
 3191			err = fflp_disable_all_partitions(np);
 3192			if (err) {
 3193				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3194					     "fflp_disable_all_partitions failed, err=%d\n",
 3195					     err);
 3196				goto out;
 3197			}
 3198		}
 3199
 3200		err = tcam_early_init(np);
 3201		if (err) {
 3202			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3203				     "tcam_early_init failed, err=%d\n", err);
 3204			goto out;
 3205		}
 3206		fflp_llcsnap_enable(np, 1);
 3207		fflp_errors_enable(np, 0);
 3208		nw64(H1POLY, 0);
 3209		nw64(H2POLY, 0);
 3210
 3211		err = tcam_flush_all(np);
 3212		if (err) {
 3213			netif_printk(np, probe, KERN_DEBUG, np->dev,
 3214				     "tcam_flush_all failed, err=%d\n", err);
 3215			goto out;
 3216		}
 3217		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 3218			err = fflp_hash_clear(np);
 3219			if (err) {
 3220				netif_printk(np, probe, KERN_DEBUG, np->dev,
 3221					     "fflp_hash_clear failed, err=%d\n",
 3222					     err);
 3223				goto out;
 3224			}
 3225		}
 3226
 3227		vlan_tbl_clear(np);
 3228
 3229		parent->flags |= PARENT_FLGS_CLS_HWINIT;
 3230	}
 3231out:
 3232	niu_unlock_parent(np, flags);
 3233	return err;
 3234}
 3235
 3236static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
 3237{
 3238	if (class_code < CLASS_CODE_USER_PROG1 ||
 3239	    class_code > CLASS_CODE_SCTP_IPV6)
 3240		return -EINVAL;
 3241
 3242	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3243	return 0;
 3244}
 3245
 3246static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
 3247{
 3248	if (class_code < CLASS_CODE_USER_PROG1 ||
 3249	    class_code > CLASS_CODE_SCTP_IPV6)
 3250		return -EINVAL;
 3251
 3252	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
 3253	return 0;
 3254}
 3255
 3256/* Entries for the ports are interleaved in the TCAM */
 3257static u16 tcam_get_index(struct niu *np, u16 idx)
 3258{
 3259	/* One entry reserved for IP fragment rule */
 3260	if (idx >= (np->clas.tcam_sz - 1))
 3261		idx = 0;
 3262	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
 3263}
 3264
 3265static u16 tcam_get_size(struct niu *np)
 3266{
 3267	/* One entry reserved for IP fragment rule */
 3268	return np->clas.tcam_sz - 1;
 3269}
 3270
 3271static u16 tcam_get_valid_entry_cnt(struct niu *np)
 3272{
 3273	/* One entry reserved for IP fragment rule */
 3274	return np->clas.tcam_valid_entries - 1;
 3275}
 3276
 3277static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
 3278			      u32 offset, u32 size, u32 truesize)
 3279{
 3280	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
 3281
 3282	skb->len += size;
 3283	skb->data_len += size;
 3284	skb->truesize += truesize;
 3285}
 3286
 3287static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
 3288{
 3289	a >>= PAGE_SHIFT;
 3290	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
 3291
 3292	return a & (MAX_RBR_RING_SIZE - 1);
 3293}
 3294
 3295static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
 3296				    struct page ***link)
 3297{
 3298	unsigned int h = niu_hash_rxaddr(rp, addr);
 3299	struct page *p, **pp;
 3300
 3301	addr &= PAGE_MASK;
 3302	pp = &rp->rxhash[h];
 3303	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
 3304		if (p->index == addr) {
 3305			*link = pp;
 3306			goto found;
 3307		}
 3308	}
 3309	BUG();
 3310
 3311found:
 3312	return p;
 3313}
 3314
 3315static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
 3316{
 3317	unsigned int h = niu_hash_rxaddr(rp, base);
 3318
 3319	page->index = base;
 3320	page->mapping = (struct address_space *) rp->rxhash[h];
 3321	rp->rxhash[h] = page;
 3322}
 3323
 3324static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 3325			    gfp_t mask, int start_index)
 3326{
 3327	struct page *page;
 3328	u64 addr;
 3329	int i;
 3330
 3331	page = alloc_page(mask);
 3332	if (!page)
 3333		return -ENOMEM;
 3334
 3335	addr = np->ops->map_page(np->device, page, 0,
 3336				 PAGE_SIZE, DMA_FROM_DEVICE);
 3337	if (!addr) {
 3338		__free_page(page);
 3339		return -ENOMEM;
 3340	}
 3341
 3342	niu_hash_page(rp, page, addr);
 3343	if (rp->rbr_blocks_per_page > 1)
 3344		page_ref_add(page, rp->rbr_blocks_per_page - 1);
 3345
 3346	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
 3347		__le32 *rbr = &rp->rbr[start_index + i];
 3348
 3349		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
 3350		addr += rp->rbr_block_size;
 3351	}
 3352
 3353	return 0;
 3354}
 3355
 3356static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3357{
 3358	int index = rp->rbr_index;
 3359
 3360	rp->rbr_pending++;
 3361	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
 3362		int err = niu_rbr_add_page(np, rp, mask, index);
 3363
 3364		if (unlikely(err)) {
 3365			rp->rbr_pending--;
 3366			return;
 3367		}
 3368
 3369		rp->rbr_index += rp->rbr_blocks_per_page;
 3370		BUG_ON(rp->rbr_index > rp->rbr_table_size);
 3371		if (rp->rbr_index == rp->rbr_table_size)
 3372			rp->rbr_index = 0;
 3373
 3374		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
 3375			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
 3376			rp->rbr_pending = 0;
 3377		}
 3378	}
 3379}
 3380
 3381static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
 3382{
 3383	unsigned int index = rp->rcr_index;
 3384	int num_rcr = 0;
 3385
 3386	rp->rx_dropped++;
 3387	while (1) {
 3388		struct page *page, **link;
 3389		u64 addr, val;
 3390		u32 rcr_size;
 3391
 3392		num_rcr++;
 3393
 3394		val = le64_to_cpup(&rp->rcr[index]);
 3395		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3396			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3397		page = niu_find_rxpage(rp, addr, &link);
 3398
 3399		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3400					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3401		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
 3402			*link = (struct page *) page->mapping;
 3403			np->ops->unmap_page(np->device, page->index,
 3404					    PAGE_SIZE, DMA_FROM_DEVICE);
 3405			page->index = 0;
 3406			page->mapping = NULL;
 3407			__free_page(page);
 3408			rp->rbr_refill_pending++;
 3409		}
 3410
 3411		index = NEXT_RCR(rp, index);
 3412		if (!(val & RCR_ENTRY_MULTI))
 3413			break;
 3414
 3415	}
 3416	rp->rcr_index = index;
 3417
 3418	return num_rcr;
 3419}
 3420
 3421static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 3422			      struct rx_ring_info *rp)
 3423{
 3424	unsigned int index = rp->rcr_index;
 3425	struct rx_pkt_hdr1 *rh;
 3426	struct sk_buff *skb;
 3427	int len, num_rcr;
 3428
 3429	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
 3430	if (unlikely(!skb))
 3431		return niu_rx_pkt_ignore(np, rp);
 3432
 3433	num_rcr = 0;
 3434	while (1) {
 3435		struct page *page, **link;
 3436		u32 rcr_size, append_size;
 3437		u64 addr, val, off;
 3438
 3439		num_rcr++;
 3440
 3441		val = le64_to_cpup(&rp->rcr[index]);
 3442
 3443		len = (val & RCR_ENTRY_L2_LEN) >>
 3444			RCR_ENTRY_L2_LEN_SHIFT;
 3445		len -= ETH_FCS_LEN;
 3446
 3447		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 3448			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
 3449		page = niu_find_rxpage(rp, addr, &link);
 3450
 3451		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
 3452					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 3453
 3454		off = addr & ~PAGE_MASK;
 3455		append_size = rcr_size;
 3456		if (num_rcr == 1) {
 3457			int ptype;
 3458
 3459			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
 3460			if ((ptype == RCR_PKT_TYPE_TCP ||
 3461			     ptype == RCR_PKT_TYPE_UDP) &&
 3462			    !(val & (RCR_ENTRY_NOPORT |
 3463				     RCR_ENTRY_ERROR)))
 3464				skb->ip_summed = CHECKSUM_UNNECESSARY;
 3465			else
 3466				skb_checksum_none_assert(skb);
 3467		} else if (!(val & RCR_ENTRY_MULTI))
 3468			append_size = len - skb->len;
 3469
 3470		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 3471		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
 3472			*link = (struct page *) page->mapping;
 3473			np->ops->unmap_page(np->device, page->index,
 3474					    PAGE_SIZE, DMA_FROM_DEVICE);
 3475			page->index = 0;
 3476			page->mapping = NULL;
 3477			rp->rbr_refill_pending++;
 3478		} else
 3479			get_page(page);
 3480
 3481		index = NEXT_RCR(rp, index);
 3482		if (!(val & RCR_ENTRY_MULTI))
 3483			break;
 3484
 3485	}
 3486	rp->rcr_index = index;
 3487
 3488	len += sizeof(*rh);
 3489	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
 3490	__pskb_pull_tail(skb, len);
 3491
 3492	rh = (struct rx_pkt_hdr1 *) skb->data;
 3493	if (np->dev->features & NETIF_F_RXHASH)
 3494		skb_set_hash(skb,
 3495			     ((u32)rh->hashval2_0 << 24 |
 3496			      (u32)rh->hashval2_1 << 16 |
 3497			      (u32)rh->hashval1_1 << 8 |
 3498			      (u32)rh->hashval1_2 << 0),
 3499			     PKT_HASH_TYPE_L3);
 3500	skb_pull(skb, sizeof(*rh));
 3501
 3502	rp->rx_packets++;
 3503	rp->rx_bytes += skb->len;
 3504
 3505	skb->protocol = eth_type_trans(skb, np->dev);
 3506	skb_record_rx_queue(skb, rp->rx_channel);
 3507	napi_gro_receive(napi, skb);
 3508
 3509	return num_rcr;
 3510}
 3511
 3512static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
 3513{
 3514	int blocks_per_page = rp->rbr_blocks_per_page;
 3515	int err, index = rp->rbr_index;
 3516
 3517	err = 0;
 3518	while (index < (rp->rbr_table_size - blocks_per_page)) {
 3519		err = niu_rbr_add_page(np, rp, mask, index);
 3520		if (unlikely(err))
 3521			break;
 3522
 3523		index += blocks_per_page;
 3524	}
 3525
 3526	rp->rbr_index = index;
 3527	return err;
 3528}
 3529
 3530static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
 3531{
 3532	int i;
 3533
 3534	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
 3535		struct page *page;
 3536
 3537		page = rp->rxhash[i];
 3538		while (page) {
 3539			struct page *next = (struct page *) page->mapping;
 3540			u64 base = page->index;
 3541
 3542			np->ops->unmap_page(np->device, base, PAGE_SIZE,
 3543					    DMA_FROM_DEVICE);
 3544			page->index = 0;
 3545			page->mapping = NULL;
 3546
 3547			__free_page(page);
 3548
 3549			page = next;
 3550		}
 3551	}
 3552
 3553	for (i = 0; i < rp->rbr_table_size; i++)
 3554		rp->rbr[i] = cpu_to_le32(0);
 3555	rp->rbr_index = 0;
 3556}
 3557
 3558static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 3559{
 3560	struct tx_buff_info *tb = &rp->tx_buffs[idx];
 3561	struct sk_buff *skb = tb->skb;
 3562	struct tx_pkt_hdr *tp;
 3563	u64 tx_flags;
 3564	int i, len;
 3565
 3566	tp = (struct tx_pkt_hdr *) skb->data;
 3567	tx_flags = le64_to_cpup(&tp->flags);
 3568
 3569	rp->tx_packets++;
 3570	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
 3571			 ((tx_flags & TXHDR_PAD) / 2));
 3572
 3573	len = skb_headlen(skb);
 3574	np->ops->unmap_single(np->device, tb->mapping,
 3575			      len, DMA_TO_DEVICE);
 3576
 3577	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
 3578		rp->mark_pending--;
 3579
 3580	tb->skb = NULL;
 3581	do {
 3582		idx = NEXT_TX(rp, idx);
 3583		len -= MAX_TX_DESC_LEN;
 3584	} while (len > 0);
 3585
 3586	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 3587		tb = &rp->tx_buffs[idx];
 3588		BUG_ON(tb->skb != NULL);
 3589		np->ops->unmap_page(np->device, tb->mapping,
 3590				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
 3591				    DMA_TO_DEVICE);
 3592		idx = NEXT_TX(rp, idx);
 3593	}
 3594
 3595	dev_kfree_skb(skb);
 3596
 3597	return idx;
 3598}
 3599
 3600#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
 3601
 3602static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 3603{
 3604	struct netdev_queue *txq;
 3605	u16 pkt_cnt, tmp;
 3606	int cons, index;
 3607	u64 cs;
 3608
 3609	index = (rp - np->tx_rings);
 3610	txq = netdev_get_tx_queue(np->dev, index);
 3611
 3612	cs = rp->tx_cs;
 3613	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
 3614		goto out;
 3615
 3616	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
 3617	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
 3618		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
 3619
 3620	rp->last_pkt_cnt = tmp;
 3621
 3622	cons = rp->cons;
 3623
 3624	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 3625		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 3626
 3627	while (pkt_cnt--)
 3628		cons = release_tx_packet(np, rp, cons);
 3629
 3630	rp->cons = cons;
 3631	smp_mb();
 3632
 3633out:
 3634	if (unlikely(netif_tx_queue_stopped(txq) &&
 3635		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
 3636		__netif_tx_lock(txq, smp_processor_id());
 3637		if (netif_tx_queue_stopped(txq) &&
 3638		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
 3639			netif_tx_wake_queue(txq);
 3640		__netif_tx_unlock(txq);
 3641	}
 3642}
 3643
 3644static inline void niu_sync_rx_discard_stats(struct niu *np,
 3645					     struct rx_ring_info *rp,
 3646					     const int limit)
 3647{
 3648	/* This elaborate scheme is needed for reading the RX discard
 3649	 * counters, as they are only 16-bit and can overflow quickly,
 3650	 * and because the overflow indication bit is not usable as
 3651	 * the counter value does not wrap, but remains at max value
 3652	 * 0xFFFF.
 3653	 *
 3654	 * In theory and in practice counters can be lost in between
 3655	 * reading nr64() and clearing the counter nw64().  For this
 3656	 * reason, the number of counter clearings nw64() is
 3657	 * limited/reduced though the limit parameter.
 3658	 */
 3659	int rx_channel = rp->rx_channel;
 3660	u32 misc, wred;
 3661
 3662	/* RXMISC (Receive Miscellaneous Discard Count), covers the
 3663	 * following discard events: IPP (Input Port Process),
 3664	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
 3665	 * Block Ring) prefetch buffer is empty.
 3666	 */
 3667	misc = nr64(RXMISC(rx_channel));
 3668	if (unlikely((misc & RXMISC_COUNT) > limit)) {
 3669		nw64(RXMISC(rx_channel), 0);
 3670		rp->rx_errors += misc & RXMISC_COUNT;
 3671
 3672		if (unlikely(misc & RXMISC_OFLOW))
 3673			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
 3674				rx_channel);
 3675
 3676		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3677			     "rx-%d: MISC drop=%u over=%u\n",
 3678			     rx_channel, misc, misc-limit);
 3679	}
 3680
 3681	/* WRED (Weighted Random Early Discard) by hardware */
 3682	wred = nr64(RED_DIS_CNT(rx_channel));
 3683	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
 3684		nw64(RED_DIS_CNT(rx_channel), 0);
 3685		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
 3686
 3687		if (unlikely(wred & RED_DIS_CNT_OFLOW))
 3688			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
 3689
 3690		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
 3691			     "rx-%d: WRED drop=%u over=%u\n",
 3692			     rx_channel, wred, wred-limit);
 3693	}
 3694}
 3695
 3696static int niu_rx_work(struct napi_struct *napi, struct niu *np,
 3697		       struct rx_ring_info *rp, int budget)
 3698{
 3699	int qlen, rcr_done = 0, work_done = 0;
 3700	struct rxdma_mailbox *mbox = rp->mbox;
 3701	u64 stat;
 3702
 3703#if 1
 3704	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3705	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
 3706#else
 3707	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 3708	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
 3709#endif
 3710	mbox->rx_dma_ctl_stat = 0;
 3711	mbox->rcrstat_a = 0;
 3712
 3713	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
 3714		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
 3715		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
 3716
 3717	rcr_done = work_done = 0;
 3718	qlen = min(qlen, budget);
 3719	while (work_done < qlen) {
 3720		rcr_done += niu_process_rx_pkt(napi, np, rp);
 3721		work_done++;
 3722	}
 3723
 3724	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
 3725		unsigned int i;
 3726
 3727		for (i = 0; i < rp->rbr_refill_pending; i++)
 3728			niu_rbr_refill(np, rp, GFP_ATOMIC);
 3729		rp->rbr_refill_pending = 0;
 3730	}
 3731
 3732	stat = (RX_DMA_CTL_STAT_MEX |
 3733		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
 3734		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
 3735
 3736	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
 3737
 3738	/* Only sync discards stats when qlen indicate potential for drops */
 3739	if (qlen > 10)
 3740		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
 3741
 3742	return work_done;
 3743}
 3744
 3745static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
 3746{
 3747	u64 v0 = lp->v0;
 3748	u32 tx_vec = (v0 >> 32);
 3749	u32 rx_vec = (v0 & 0xffffffff);
 3750	int i, work_done = 0;
 3751
 3752	netif_printk(np, intr, KERN_DEBUG, np->dev,
 3753		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
 3754
 3755	for (i = 0; i < np->num_tx_rings; i++) {
 3756		struct tx_ring_info *rp = &np->tx_rings[i];
 3757		if (tx_vec & (1 << rp->tx_channel))
 3758			niu_tx_work(np, rp);
 3759		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
 3760	}
 3761
 3762	for (i = 0; i < np->num_rx_rings; i++) {
 3763		struct rx_ring_info *rp = &np->rx_rings[i];
 3764
 3765		if (rx_vec & (1 << rp->rx_channel)) {
 3766			int this_work_done;
 3767
 3768			this_work_done = niu_rx_work(&lp->napi, np, rp,
 3769						     budget);
 3770
 3771			budget -= this_work_done;
 3772			work_done += this_work_done;
 3773		}
 3774		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
 3775	}
 3776
 3777	return work_done;
 3778}
 3779
 3780static int niu_poll(struct napi_struct *napi, int budget)
 3781{
 3782	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
 3783	struct niu *np = lp->np;
 3784	int work_done;
 3785
 3786	work_done = niu_poll_core(np, lp, budget);
 3787
 3788	if (work_done < budget) {
 3789		napi_complete(napi);
 3790		niu_ldg_rearm(np, lp, 1);
 3791	}
 3792	return work_done;
 3793}
 3794
 3795static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
 3796				  u64 stat)
 3797{
 3798	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
 3799
 3800	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
 3801		pr_cont("RBR_TMOUT ");
 3802	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
 3803		pr_cont("RSP_CNT ");
 3804	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
 3805		pr_cont("BYTE_EN_BUS ");
 3806	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
 3807		pr_cont("RSP_DAT ");
 3808	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
 3809		pr_cont("RCR_ACK ");
 3810	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
 3811		pr_cont("RCR_SHA_PAR ");
 3812	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
 3813		pr_cont("RBR_PRE_PAR ");
 3814	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
 3815		pr_cont("CONFIG ");
 3816	if (stat & RX_DMA_CTL_STAT_RCRINCON)
 3817		pr_cont("RCRINCON ");
 3818	if (stat & RX_DMA_CTL_STAT_RCRFULL)
 3819		pr_cont("RCRFULL ");
 3820	if (stat & RX_DMA_CTL_STAT_RBRFULL)
 3821		pr_cont("RBRFULL ");
 3822	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
 3823		pr_cont("RBRLOGPAGE ");
 3824	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
 3825		pr_cont("CFIGLOGPAGE ");
 3826	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
 3827		pr_cont("DC_FIDO ");
 3828
 3829	pr_cont(")\n");
 3830}
 3831
 3832static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
 3833{
 3834	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
 3835	int err = 0;
 3836
 3837
 3838	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
 3839		    RX_DMA_CTL_STAT_PORT_FATAL))
 3840		err = -EINVAL;
 3841
 3842	if (err) {
 3843		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
 3844			   rp->rx_channel,
 3845			   (unsigned long long) stat);
 3846
 3847		niu_log_rxchan_errors(np, rp, stat);
 3848	}
 3849
 3850	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 3851	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
 3852
 3853	return err;
 3854}
 3855
 3856static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
 3857				  u64 cs)
 3858{
 3859	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
 3860
 3861	if (cs & TX_CS_MBOX_ERR)
 3862		pr_cont("MBOX ");
 3863	if (cs & TX_CS_PKT_SIZE_ERR)
 3864		pr_cont("PKT_SIZE ");
 3865	if (cs & TX_CS_TX_RING_OFLOW)
 3866		pr_cont("TX_RING_OFLOW ");
 3867	if (cs & TX_CS_PREF_BUF_PAR_ERR)
 3868		pr_cont("PREF_BUF_PAR ");
 3869	if (cs & TX_CS_NACK_PREF)
 3870		pr_cont("NACK_PREF ");
 3871	if (cs & TX_CS_NACK_PKT_RD)
 3872		pr_cont("NACK_PKT_RD ");
 3873	if (cs & TX_CS_CONF_PART_ERR)
 3874		pr_cont("CONF_PART ");
 3875	if (cs & TX_CS_PKT_PRT_ERR)
 3876		pr_cont("PKT_PTR ");
 3877
 3878	pr_cont(")\n");
 3879}
 3880
 3881static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
 3882{
 3883	u64 cs, logh, logl;
 3884
 3885	cs = nr64(TX_CS(rp->tx_channel));
 3886	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
 3887	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
 3888
 3889	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
 3890		   rp->tx_channel,
 3891		   (unsigned long long)cs,
 3892		   (unsigned long long)logh,
 3893		   (unsigned long long)logl);
 3894
 3895	niu_log_txchan_errors(np, rp, cs);
 3896
 3897	return -ENODEV;
 3898}
 3899
 3900static int niu_mif_interrupt(struct niu *np)
 3901{
 3902	u64 mif_status = nr64(MIF_STATUS);
 3903	int phy_mdint = 0;
 3904
 3905	if (np->flags & NIU_FLAGS_XMAC) {
 3906		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
 3907
 3908		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
 3909			phy_mdint = 1;
 3910	}
 3911
 3912	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
 3913		   (unsigned long long)mif_status, phy_mdint);
 3914
 3915	return -ENODEV;
 3916}
 3917
 3918static void niu_xmac_interrupt(struct niu *np)
 3919{
 3920	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 3921	u64 val;
 3922
 3923	val = nr64_mac(XTXMAC_STATUS);
 3924	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
 3925		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
 3926	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
 3927		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
 3928	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
 3929		mp->tx_fifo_errors++;
 3930	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
 3931		mp->tx_overflow_errors++;
 3932	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
 3933		mp->tx_max_pkt_size_errors++;
 3934	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
 3935		mp->tx_underflow_errors++;
 3936
 3937	val = nr64_mac(XRXMAC_STATUS);
 3938	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
 3939		mp->rx_local_faults++;
 3940	if (val & XRXMAC_STATUS_RFLT_DET)
 3941		mp->rx_remote_faults++;
 3942	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
 3943		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
 3944	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
 3945		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
 3946	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
 3947		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
 3948	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
 3949		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
 3950	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3951		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3952	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
 3953		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
 3954	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
 3955		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
 3956	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
 3957		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
 3958	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
 3959		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
 3960	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
 3961		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
 3962	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
 3963		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
 3964	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
 3965		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
 3966	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
 3967		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
 3968	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
 3969		mp->rx_octets += RXMAC_BT_CNT_COUNT;
 3970	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
 3971		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
 3972	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
 3973		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
 3974	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
 3975		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
 3976	if (val & XRXMAC_STATUS_RXUFLOW)
 3977		mp->rx_underflows++;
 3978	if (val & XRXMAC_STATUS_RXOFLOW)
 3979		mp->rx_overflows++;
 3980
 3981	val = nr64_mac(XMAC_FC_STAT);
 3982	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
 3983		mp->pause_off_state++;
 3984	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
 3985		mp->pause_on_state++;
 3986	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
 3987		mp->pause_received++;
 3988}
 3989
 3990static void niu_bmac_interrupt(struct niu *np)
 3991{
 3992	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 3993	u64 val;
 3994
 3995	val = nr64_mac(BTXMAC_STATUS);
 3996	if (val & BTXMAC_STATUS_UNDERRUN)
 3997		mp->tx_underflow_errors++;
 3998	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
 3999		mp->tx_max_pkt_size_errors++;
 4000	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
 4001		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
 4002	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
 4003		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
 4004
 4005	val = nr64_mac(BRXMAC_STATUS);
 4006	if (val & BRXMAC_STATUS_OVERFLOW)
 4007		mp->rx_overflows++;
 4008	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
 4009		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
 4010	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
 4011		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4012	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
 4013		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
 4014	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
 4015		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
 4016
 4017	val = nr64_mac(BMAC_CTRL_STATUS);
 4018	if (val & BMAC_CTRL_STATUS_NOPAUSE)
 4019		mp->pause_off_state++;
 4020	if (val & BMAC_CTRL_STATUS_PAUSE)
 4021		mp->pause_on_state++;
 4022	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
 4023		mp->pause_received++;
 4024}
 4025
 4026static int niu_mac_interrupt(struct niu *np)
 4027{
 4028	if (np->flags & NIU_FLAGS_XMAC)
 4029		niu_xmac_interrupt(np);
 4030	else
 4031		niu_bmac_interrupt(np);
 4032
 4033	return 0;
 4034}
 4035
 4036static void niu_log_device_error(struct niu *np, u64 stat)
 4037{
 4038	netdev_err(np->dev, "Core device errors ( ");
 4039
 4040	if (stat & SYS_ERR_MASK_META2)
 4041		pr_cont("META2 ");
 4042	if (stat & SYS_ERR_MASK_META1)
 4043		pr_cont("META1 ");
 4044	if (stat & SYS_ERR_MASK_PEU)
 4045		pr_cont("PEU ");
 4046	if (stat & SYS_ERR_MASK_TXC)
 4047		pr_cont("TXC ");
 4048	if (stat & SYS_ERR_MASK_RDMC)
 4049		pr_cont("RDMC ");
 4050	if (stat & SYS_ERR_MASK_TDMC)
 4051		pr_cont("TDMC ");
 4052	if (stat & SYS_ERR_MASK_ZCP)
 4053		pr_cont("ZCP ");
 4054	if (stat & SYS_ERR_MASK_FFLP)
 4055		pr_cont("FFLP ");
 4056	if (stat & SYS_ERR_MASK_IPP)
 4057		pr_cont("IPP ");
 4058	if (stat & SYS_ERR_MASK_MAC)
 4059		pr_cont("MAC ");
 4060	if (stat & SYS_ERR_MASK_SMX)
 4061		pr_cont("SMX ");
 4062
 4063	pr_cont(")\n");
 4064}
 4065
 4066static int niu_device_error(struct niu *np)
 4067{
 4068	u64 stat = nr64(SYS_ERR_STAT);
 4069
 4070	netdev_err(np->dev, "Core device error, stat[%llx]\n",
 4071		   (unsigned long long)stat);
 4072
 4073	niu_log_device_error(np, stat);
 4074
 4075	return -ENODEV;
 4076}
 4077
 4078static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
 4079			      u64 v0, u64 v1, u64 v2)
 4080{
 4081
 4082	int i, err = 0;
 4083
 4084	lp->v0 = v0;
 4085	lp->v1 = v1;
 4086	lp->v2 = v2;
 4087
 4088	if (v1 & 0x00000000ffffffffULL) {
 4089		u32 rx_vec = (v1 & 0xffffffff);
 4090
 4091		for (i = 0; i < np->num_rx_rings; i++) {
 4092			struct rx_ring_info *rp = &np->rx_rings[i];
 4093
 4094			if (rx_vec & (1 << rp->rx_channel)) {
 4095				int r = niu_rx_error(np, rp);
 4096				if (r) {
 4097					err = r;
 4098				} else {
 4099					if (!v0)
 4100						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
 4101						     RX_DMA_CTL_STAT_MEX);
 4102				}
 4103			}
 4104		}
 4105	}
 4106	if (v1 & 0x7fffffff00000000ULL) {
 4107		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
 4108
 4109		for (i = 0; i < np->num_tx_rings; i++) {
 4110			struct tx_ring_info *rp = &np->tx_rings[i];
 4111
 4112			if (tx_vec & (1 << rp->tx_channel)) {
 4113				int r = niu_tx_error(np, rp);
 4114				if (r)
 4115					err = r;
 4116			}
 4117		}
 4118	}
 4119	if ((v0 | v1) & 0x8000000000000000ULL) {
 4120		int r = niu_mif_interrupt(np);
 4121		if (r)
 4122			err = r;
 4123	}
 4124	if (v2) {
 4125		if (v2 & 0x01ef) {
 4126			int r = niu_mac_interrupt(np);
 4127			if (r)
 4128				err = r;
 4129		}
 4130		if (v2 & 0x0210) {
 4131			int r = niu_device_error(np);
 4132			if (r)
 4133				err = r;
 4134		}
 4135	}
 4136
 4137	if (err)
 4138		niu_enable_interrupts(np, 0);
 4139
 4140	return err;
 4141}
 4142
 4143static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
 4144			    int ldn)
 4145{
 4146	struct rxdma_mailbox *mbox = rp->mbox;
 4147	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
 4148
 4149	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
 4150		      RX_DMA_CTL_STAT_RCRTO);
 4151	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
 4152
 4153	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4154		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
 4155}
 4156
 4157static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
 4158			    int ldn)
 4159{
 4160	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
 4161
 4162	netif_printk(np, intr, KERN_DEBUG, np->dev,
 4163		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
 4164}
 4165
 4166static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
 4167{
 4168	struct niu_parent *parent = np->parent;
 4169	u32 rx_vec, tx_vec;
 4170	int i;
 4171
 4172	tx_vec = (v0 >> 32);
 4173	rx_vec = (v0 & 0xffffffff);
 4174
 4175	for (i = 0; i < np->num_rx_rings; i++) {
 4176		struct rx_ring_info *rp = &np->rx_rings[i];
 4177		int ldn = LDN_RXDMA(rp->rx_channel);
 4178
 4179		if (parent->ldg_map[ldn] != ldg)
 4180			continue;
 4181
 4182		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4183		if (rx_vec & (1 << rp->rx_channel))
 4184			niu_rxchan_intr(np, rp, ldn);
 4185	}
 4186
 4187	for (i = 0; i < np->num_tx_rings; i++) {
 4188		struct tx_ring_info *rp = &np->tx_rings[i];
 4189		int ldn = LDN_TXDMA(rp->tx_channel);
 4190
 4191		if (parent->ldg_map[ldn] != ldg)
 4192			continue;
 4193
 4194		nw64(LD_IM0(ldn), LD_IM0_MASK);
 4195		if (tx_vec & (1 << rp->tx_channel))
 4196			niu_txchan_intr(np, rp, ldn);
 4197	}
 4198}
 4199
 4200static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
 4201			      u64 v0, u64 v1, u64 v2)
 4202{
 4203	if (likely(napi_schedule_prep(&lp->napi))) {
 4204		lp->v0 = v0;
 4205		lp->v1 = v1;
 4206		lp->v2 = v2;
 4207		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
 4208		__napi_schedule(&lp->napi);
 4209	}
 4210}
 4211
 4212static irqreturn_t niu_interrupt(int irq, void *dev_id)
 4213{
 4214	struct niu_ldg *lp = dev_id;
 4215	struct niu *np = lp->np;
 4216	int ldg = lp->ldg_num;
 4217	unsigned long flags;
 4218	u64 v0, v1, v2;
 4219
 4220	if (netif_msg_intr(np))
 4221		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
 4222		       __func__, lp, ldg);
 4223
 4224	spin_lock_irqsave(&np->lock, flags);
 4225
 4226	v0 = nr64(LDSV0(ldg));
 4227	v1 = nr64(LDSV1(ldg));
 4228	v2 = nr64(LDSV2(ldg));
 4229
 4230	if (netif_msg_intr(np))
 4231		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
 4232		       (unsigned long long) v0,
 4233		       (unsigned long long) v1,
 4234		       (unsigned long long) v2);
 4235
 4236	if (unlikely(!v0 && !v1 && !v2)) {
 4237		spin_unlock_irqrestore(&np->lock, flags);
 4238		return IRQ_NONE;
 4239	}
 4240
 4241	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
 4242		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
 4243		if (err)
 4244			goto out;
 4245	}
 4246	if (likely(v0 & ~((u64)1 << LDN_MIF)))
 4247		niu_schedule_napi(np, lp, v0, v1, v2);
 4248	else
 4249		niu_ldg_rearm(np, lp, 1);
 4250out:
 4251	spin_unlock_irqrestore(&np->lock, flags);
 4252
 4253	return IRQ_HANDLED;
 4254}
 4255
 4256static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
 4257{
 4258	if (rp->mbox) {
 4259		np->ops->free_coherent(np->device,
 4260				       sizeof(struct rxdma_mailbox),
 4261				       rp->mbox, rp->mbox_dma);
 4262		rp->mbox = NULL;
 4263	}
 4264	if (rp->rcr) {
 4265		np->ops->free_coherent(np->device,
 4266				       MAX_RCR_RING_SIZE * sizeof(__le64),
 4267				       rp->rcr, rp->rcr_dma);
 4268		rp->rcr = NULL;
 4269		rp->rcr_table_size = 0;
 4270		rp->rcr_index = 0;
 4271	}
 4272	if (rp->rbr) {
 4273		niu_rbr_free(np, rp);
 4274
 4275		np->ops->free_coherent(np->device,
 4276				       MAX_RBR_RING_SIZE * sizeof(__le32),
 4277				       rp->rbr, rp->rbr_dma);
 4278		rp->rbr = NULL;
 4279		rp->rbr_table_size = 0;
 4280		rp->rbr_index = 0;
 4281	}
 4282	kfree(rp->rxhash);
 4283	rp->rxhash = NULL;
 4284}
 4285
 4286static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
 4287{
 4288	if (rp->mbox) {
 4289		np->ops->free_coherent(np->device,
 4290				       sizeof(struct txdma_mailbox),
 4291				       rp->mbox, rp->mbox_dma);
 4292		rp->mbox = NULL;
 4293	}
 4294	if (rp->descr) {
 4295		int i;
 4296
 4297		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
 4298			if (rp->tx_buffs[i].skb)
 4299				(void) release_tx_packet(np, rp, i);
 4300		}
 4301
 4302		np->ops->free_coherent(np->device,
 4303				       MAX_TX_RING_SIZE * sizeof(__le64),
 4304				       rp->descr, rp->descr_dma);
 4305		rp->descr = NULL;
 4306		rp->pending = 0;
 4307		rp->prod = 0;
 4308		rp->cons = 0;
 4309		rp->wrap_bit = 0;
 4310	}
 4311}
 4312
 4313static void niu_free_channels(struct niu *np)
 4314{
 4315	int i;
 4316
 4317	if (np->rx_rings) {
 4318		for (i = 0; i < np->num_rx_rings; i++) {
 4319			struct rx_ring_info *rp = &np->rx_rings[i];
 4320
 4321			niu_free_rx_ring_info(np, rp);
 4322		}
 4323		kfree(np->rx_rings);
 4324		np->rx_rings = NULL;
 4325		np->num_rx_rings = 0;
 4326	}
 4327
 4328	if (np->tx_rings) {
 4329		for (i = 0; i < np->num_tx_rings; i++) {
 4330			struct tx_ring_info *rp = &np->tx_rings[i];
 4331
 4332			niu_free_tx_ring_info(np, rp);
 4333		}
 4334		kfree(np->tx_rings);
 4335		np->tx_rings = NULL;
 4336		np->num_tx_rings = 0;
 4337	}
 4338}
 4339
 4340static int niu_alloc_rx_ring_info(struct niu *np,
 4341				  struct rx_ring_info *rp)
 4342{
 4343	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
 4344
 4345	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
 4346			     GFP_KERNEL);
 4347	if (!rp->rxhash)
 4348		return -ENOMEM;
 4349
 4350	rp->mbox = np->ops->alloc_coherent(np->device,
 4351					   sizeof(struct rxdma_mailbox),
 4352					   &rp->mbox_dma, GFP_KERNEL);
 4353	if (!rp->mbox)
 4354		return -ENOMEM;
 4355	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4356		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
 4357			   rp->mbox);
 4358		return -EINVAL;
 4359	}
 4360
 4361	rp->rcr = np->ops->alloc_coherent(np->device,
 4362					  MAX_RCR_RING_SIZE * sizeof(__le64),
 4363					  &rp->rcr_dma, GFP_KERNEL);
 4364	if (!rp->rcr)
 4365		return -ENOMEM;
 4366	if ((unsigned long)rp->rcr & (64UL - 1)) {
 4367		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
 4368			   rp->rcr);
 4369		return -EINVAL;
 4370	}
 4371	rp->rcr_table_size = MAX_RCR_RING_SIZE;
 4372	rp->rcr_index = 0;
 4373
 4374	rp->rbr = np->ops->alloc_coherent(np->device,
 4375					  MAX_RBR_RING_SIZE * sizeof(__le32),
 4376					  &rp->rbr_dma, GFP_KERNEL);
 4377	if (!rp->rbr)
 4378		return -ENOMEM;
 4379	if ((unsigned long)rp->rbr & (64UL - 1)) {
 4380		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
 4381			   rp->rbr);
 4382		return -EINVAL;
 4383	}
 4384	rp->rbr_table_size = MAX_RBR_RING_SIZE;
 4385	rp->rbr_index = 0;
 4386	rp->rbr_pending = 0;
 4387
 4388	return 0;
 4389}
 4390
 4391static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
 4392{
 4393	int mtu = np->dev->mtu;
 4394
 4395	/* These values are recommended by the HW designers for fair
 4396	 * utilization of DRR amongst the rings.
 4397	 */
 4398	rp->max_burst = mtu + 32;
 4399	if (rp->max_burst > 4096)
 4400		rp->max_burst = 4096;
 4401}
 4402
 4403static int niu_alloc_tx_ring_info(struct niu *np,
 4404				  struct tx_ring_info *rp)
 4405{
 4406	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
 4407
 4408	rp->mbox = np->ops->alloc_coherent(np->device,
 4409					   sizeof(struct txdma_mailbox),
 4410					   &rp->mbox_dma, GFP_KERNEL);
 4411	if (!rp->mbox)
 4412		return -ENOMEM;
 4413	if ((unsigned long)rp->mbox & (64UL - 1)) {
 4414		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
 4415			   rp->mbox);
 4416		return -EINVAL;
 4417	}
 4418
 4419	rp->descr = np->ops->alloc_coherent(np->device,
 4420					    MAX_TX_RING_SIZE * sizeof(__le64),
 4421					    &rp->descr_dma, GFP_KERNEL);
 4422	if (!rp->descr)
 4423		return -ENOMEM;
 4424	if ((unsigned long)rp->descr & (64UL - 1)) {
 4425		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
 4426			   rp->descr);
 4427		return -EINVAL;
 4428	}
 4429
 4430	rp->pending = MAX_TX_RING_SIZE;
 4431	rp->prod = 0;
 4432	rp->cons = 0;
 4433	rp->wrap_bit = 0;
 4434
 4435	/* XXX make these configurable... XXX */
 4436	rp->mark_freq = rp->pending / 4;
 4437
 4438	niu_set_max_burst(np, rp);
 4439
 4440	return 0;
 4441}
 4442
 4443static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
 4444{
 4445	u16 bss;
 4446
 4447	bss = min(PAGE_SHIFT, 15);
 4448
 4449	rp->rbr_block_size = 1 << bss;
 4450	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
 4451
 4452	rp->rbr_sizes[0] = 256;
 4453	rp->rbr_sizes[1] = 1024;
 4454	if (np->dev->mtu > ETH_DATA_LEN) {
 4455		switch (PAGE_SIZE) {
 4456		case 4 * 1024:
 4457			rp->rbr_sizes[2] = 4096;
 4458			break;
 4459
 4460		default:
 4461			rp->rbr_sizes[2] = 8192;
 4462			break;
 4463		}
 4464	} else {
 4465		rp->rbr_sizes[2] = 2048;
 4466	}
 4467	rp->rbr_sizes[3] = rp->rbr_block_size;
 4468}
 4469
 4470static int niu_alloc_channels(struct niu *np)
 4471{
 4472	struct niu_parent *parent = np->parent;
 4473	int first_rx_channel, first_tx_channel;
 4474	int num_rx_rings, num_tx_rings;
 4475	struct rx_ring_info *rx_rings;
 4476	struct tx_ring_info *tx_rings;
 4477	int i, port, err;
 4478
 4479	port = np->port;
 4480	first_rx_channel = first_tx_channel = 0;
 4481	for (i = 0; i < port; i++) {
 4482		first_rx_channel += parent->rxchan_per_port[i];
 4483		first_tx_channel += parent->txchan_per_port[i];
 4484	}
 4485
 4486	num_rx_rings = parent->rxchan_per_port[port];
 4487	num_tx_rings = parent->txchan_per_port[port];
 4488
 4489	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
 4490			   GFP_KERNEL);
 4491	err = -ENOMEM;
 4492	if (!rx_rings)
 4493		goto out_err;
 4494
 4495	np->num_rx_rings = num_rx_rings;
 4496	smp_wmb();
 4497	np->rx_rings = rx_rings;
 4498
 4499	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
 4500
 4501	for (i = 0; i < np->num_rx_rings; i++) {
 4502		struct rx_ring_info *rp = &np->rx_rings[i];
 4503
 4504		rp->np = np;
 4505		rp->rx_channel = first_rx_channel + i;
 4506
 4507		err = niu_alloc_rx_ring_info(np, rp);
 4508		if (err)
 4509			goto out_err;
 4510
 4511		niu_size_rbr(np, rp);
 4512
 4513		/* XXX better defaults, configurable, etc... XXX */
 4514		rp->nonsyn_window = 64;
 4515		rp->nonsyn_threshold = rp->rcr_table_size - 64;
 4516		rp->syn_window = 64;
 4517		rp->syn_threshold = rp->rcr_table_size - 64;
 4518		rp->rcr_pkt_threshold = 16;
 4519		rp->rcr_timeout = 8;
 4520		rp->rbr_kick_thresh = RBR_REFILL_MIN;
 4521		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
 4522			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
 4523
 4524		err = niu_rbr_fill(np, rp, GFP_KERNEL);
 4525		if (err)
 4526			return err;
 4527	}
 4528
 4529	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
 4530			   GFP_KERNEL);
 4531	err = -ENOMEM;
 4532	if (!tx_rings)
 4533		goto out_err;
 4534
 4535	np->num_tx_rings = num_tx_rings;
 4536	smp_wmb();
 4537	np->tx_rings = tx_rings;
 4538
 4539	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
 4540
 4541	for (i = 0; i < np->num_tx_rings; i++) {
 4542		struct tx_ring_info *rp = &np->tx_rings[i];
 4543
 4544		rp->np = np;
 4545		rp->tx_channel = first_tx_channel + i;
 4546
 4547		err = niu_alloc_tx_ring_info(np, rp);
 4548		if (err)
 4549			goto out_err;
 4550	}
 4551
 4552	return 0;
 4553
 4554out_err:
 4555	niu_free_channels(np);
 4556	return err;
 4557}
 4558
 4559static int niu_tx_cs_sng_poll(struct niu *np, int channel)
 4560{
 4561	int limit = 1000;
 4562
 4563	while (--limit > 0) {
 4564		u64 val = nr64(TX_CS(channel));
 4565		if (val & TX_CS_SNG_STATE)
 4566			return 0;
 4567	}
 4568	return -ENODEV;
 4569}
 4570
 4571static int niu_tx_channel_stop(struct niu *np, int channel)
 4572{
 4573	u64 val = nr64(TX_CS(channel));
 4574
 4575	val |= TX_CS_STOP_N_GO;
 4576	nw64(TX_CS(channel), val);
 4577
 4578	return niu_tx_cs_sng_poll(np, channel);
 4579}
 4580
 4581static int niu_tx_cs_reset_poll(struct niu *np, int channel)
 4582{
 4583	int limit = 1000;
 4584
 4585	while (--limit > 0) {
 4586		u64 val = nr64(TX_CS(channel));
 4587		if (!(val & TX_CS_RST))
 4588			return 0;
 4589	}
 4590	return -ENODEV;
 4591}
 4592
 4593static int niu_tx_channel_reset(struct niu *np, int channel)
 4594{
 4595	u64 val = nr64(TX_CS(channel));
 4596	int err;
 4597
 4598	val |= TX_CS_RST;
 4599	nw64(TX_CS(channel), val);
 4600
 4601	err = niu_tx_cs_reset_poll(np, channel);
 4602	if (!err)
 4603		nw64(TX_RING_KICK(channel), 0);
 4604
 4605	return err;
 4606}
 4607
 4608static int niu_tx_channel_lpage_init(struct niu *np, int channel)
 4609{
 4610	u64 val;
 4611
 4612	nw64(TX_LOG_MASK1(channel), 0);
 4613	nw64(TX_LOG_VAL1(channel), 0);
 4614	nw64(TX_LOG_MASK2(channel), 0);
 4615	nw64(TX_LOG_VAL2(channel), 0);
 4616	nw64(TX_LOG_PAGE_RELO1(channel), 0);
 4617	nw64(TX_LOG_PAGE_RELO2(channel), 0);
 4618	nw64(TX_LOG_PAGE_HDL(channel), 0);
 4619
 4620	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
 4621	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
 4622	nw64(TX_LOG_PAGE_VLD(channel), val);
 4623
 4624	/* XXX TXDMA 32bit mode? XXX */
 4625
 4626	return 0;
 4627}
 4628
 4629static void niu_txc_enable_port(struct niu *np, int on)
 4630{
 4631	unsigned long flags;
 4632	u64 val, mask;
 4633
 4634	niu_lock_parent(np, flags);
 4635	val = nr64(TXC_CONTROL);
 4636	mask = (u64)1 << np->port;
 4637	if (on) {
 4638		val |= TXC_CONTROL_ENABLE | mask;
 4639	} else {
 4640		val &= ~mask;
 4641		if ((val & ~TXC_CONTROL_ENABLE) == 0)
 4642			val &= ~TXC_CONTROL_ENABLE;
 4643	}
 4644	nw64(TXC_CONTROL, val);
 4645	niu_unlock_parent(np, flags);
 4646}
 4647
 4648static void niu_txc_set_imask(struct niu *np, u64 imask)
 4649{
 4650	unsigned long flags;
 4651	u64 val;
 4652
 4653	niu_lock_parent(np, flags);
 4654	val = nr64(TXC_INT_MASK);
 4655	val &= ~TXC_INT_MASK_VAL(np->port);
 4656	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
 4657	niu_unlock_parent(np, flags);
 4658}
 4659
 4660static void niu_txc_port_dma_enable(struct niu *np, int on)
 4661{
 4662	u64 val = 0;
 4663
 4664	if (on) {
 4665		int i;
 4666
 4667		for (i = 0; i < np->num_tx_rings; i++)
 4668			val |= (1 << np->tx_rings[i].tx_channel);
 4669	}
 4670	nw64(TXC_PORT_DMA(np->port), val);
 4671}
 4672
 4673static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 4674{
 4675	int err, channel = rp->tx_channel;
 4676	u64 val, ring_len;
 4677
 4678	err = niu_tx_channel_stop(np, channel);
 4679	if (err)
 4680		return err;
 4681
 4682	err = niu_tx_channel_reset(np, channel);
 4683	if (err)
 4684		return err;
 4685
 4686	err = niu_tx_channel_lpage_init(np, channel);
 4687	if (err)
 4688		return err;
 4689
 4690	nw64(TXC_DMA_MAX(channel), rp->max_burst);
 4691	nw64(TX_ENT_MSK(channel), 0);
 4692
 4693	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
 4694			      TX_RNG_CFIG_STADDR)) {
 4695		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
 4696			   channel, (unsigned long long)rp->descr_dma);
 4697		return -EINVAL;
 4698	}
 4699
 4700	/* The length field in TX_RNG_CFIG is measured in 64-byte
 4701	 * blocks.  rp->pending is the number of TX descriptors in
 4702	 * our ring, 8 bytes each, thus we divide by 8 bytes more
 4703	 * to get the proper value the chip wants.
 4704	 */
 4705	ring_len = (rp->pending / 8);
 4706
 4707	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
 4708	       rp->descr_dma);
 4709	nw64(TX_RNG_CFIG(channel), val);
 4710
 4711	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
 4712	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
 4713		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
 4714			    channel, (unsigned long long)rp->mbox_dma);
 4715		return -EINVAL;
 4716	}
 4717	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
 4718	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
 4719
 4720	nw64(TX_CS(channel), 0);
 4721
 4722	rp->last_pkt_cnt = 0;
 4723
 4724	return 0;
 4725}
 4726
 4727static void niu_init_rdc_groups(struct niu *np)
 4728{
 4729	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
 4730	int i, first_table_num = tp->first_table_num;
 4731
 4732	for (i = 0; i < tp->num_tables; i++) {
 4733		struct rdc_table *tbl = &tp->tables[i];
 4734		int this_table = first_table_num + i;
 4735		int slot;
 4736
 4737		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
 4738			nw64(RDC_TBL(this_table, slot),
 4739			     tbl->rxdma_channel[slot]);
 4740	}
 4741
 4742	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
 4743}
 4744
 4745static void niu_init_drr_weight(struct niu *np)
 4746{
 4747	int type = phy_decode(np->parent->port_phy, np->port);
 4748	u64 val;
 4749
 4750	switch (type) {
 4751	case PORT_TYPE_10G:
 4752		val = PT_DRR_WEIGHT_DEFAULT_10G;
 4753		break;
 4754
 4755	case PORT_TYPE_1G:
 4756	default:
 4757		val = PT_DRR_WEIGHT_DEFAULT_1G;
 4758		break;
 4759	}
 4760	nw64(PT_DRR_WT(np->port), val);
 4761}
 4762
 4763static int niu_init_hostinfo(struct niu *np)
 4764{
 4765	struct niu_parent *parent = np->parent;
 4766	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 4767	int i, err, num_alt = niu_num_alt_addr(np);
 4768	int first_rdc_table = tp->first_table_num;
 4769
 4770	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 4771	if (err)
 4772		return err;
 4773
 4774	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 4775	if (err)
 4776		return err;
 4777
 4778	for (i = 0; i < num_alt; i++) {
 4779		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
 4780		if (err)
 4781			return err;
 4782	}
 4783
 4784	return 0;
 4785}
 4786
 4787static int niu_rx_channel_reset(struct niu *np, int channel)
 4788{
 4789	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
 4790				      RXDMA_CFIG1_RST, 1000, 10,
 4791				      "RXDMA_CFIG1");
 4792}
 4793
 4794static int niu_rx_channel_lpage_init(struct niu *np, int channel)
 4795{
 4796	u64 val;
 4797
 4798	nw64(RX_LOG_MASK1(channel), 0);
 4799	nw64(RX_LOG_VAL1(channel), 0);
 4800	nw64(RX_LOG_MASK2(channel), 0);
 4801	nw64(RX_LOG_VAL2(channel), 0);
 4802	nw64(RX_LOG_PAGE_RELO1(channel), 0);
 4803	nw64(RX_LOG_PAGE_RELO2(channel), 0);
 4804	nw64(RX_LOG_PAGE_HDL(channel), 0);
 4805
 4806	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
 4807	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
 4808	nw64(RX_LOG_PAGE_VLD(channel), val);
 4809
 4810	return 0;
 4811}
 4812
 4813static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
 4814{
 4815	u64 val;
 4816
 4817	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
 4818	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
 4819	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
 4820	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
 4821	nw64(RDC_RED_PARA(rp->rx_channel), val);
 4822}
 4823
 4824static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
 4825{
 4826	u64 val = 0;
 4827
 4828	*ret = 0;
 4829	switch (rp->rbr_block_size) {
 4830	case 4 * 1024:
 4831		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4832		break;
 4833	case 8 * 1024:
 4834		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4835		break;
 4836	case 16 * 1024:
 4837		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4838		break;
 4839	case 32 * 1024:
 4840		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
 4841		break;
 4842	default:
 4843		return -EINVAL;
 4844	}
 4845	val |= RBR_CFIG_B_VLD2;
 4846	switch (rp->rbr_sizes[2]) {
 4847	case 2 * 1024:
 4848		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4849		break;
 4850	case 4 * 1024:
 4851		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4852		break;
 4853	case 8 * 1024:
 4854		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4855		break;
 4856	case 16 * 1024:
 4857		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
 4858		break;
 4859
 4860	default:
 4861		return -EINVAL;
 4862	}
 4863	val |= RBR_CFIG_B_VLD1;
 4864	switch (rp->rbr_sizes[1]) {
 4865	case 1 * 1024:
 4866		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4867		break;
 4868	case 2 * 1024:
 4869		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4870		break;
 4871	case 4 * 1024:
 4872		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4873		break;
 4874	case 8 * 1024:
 4875		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
 4876		break;
 4877
 4878	default:
 4879		return -EINVAL;
 4880	}
 4881	val |= RBR_CFIG_B_VLD0;
 4882	switch (rp->rbr_sizes[0]) {
 4883	case 256:
 4884		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4885		break;
 4886	case 512:
 4887		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
 4888		break;
 4889	case 1 * 1024:
 4890		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4891		break;
 4892	case 2 * 1024:
 4893		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
 4894		break;
 4895
 4896	default:
 4897		return -EINVAL;
 4898	}
 4899
 4900	*ret = val;
 4901	return 0;
 4902}
 4903
 4904static int niu_enable_rx_channel(struct niu *np, int channel, int on)
 4905{
 4906	u64 val = nr64(RXDMA_CFIG1(channel));
 4907	int limit;
 4908
 4909	if (on)
 4910		val |= RXDMA_CFIG1_EN;
 4911	else
 4912		val &= ~RXDMA_CFIG1_EN;
 4913	nw64(RXDMA_CFIG1(channel), val);
 4914
 4915	limit = 1000;
 4916	while (--limit > 0) {
 4917		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
 4918			break;
 4919		udelay(10);
 4920	}
 4921	if (limit <= 0)
 4922		return -ENODEV;
 4923	return 0;
 4924}
 4925
 4926static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 4927{
 4928	int err, channel = rp->rx_channel;
 4929	u64 val;
 4930
 4931	err = niu_rx_channel_reset(np, channel);
 4932	if (err)
 4933		return err;
 4934
 4935	err = niu_rx_channel_lpage_init(np, channel);
 4936	if (err)
 4937		return err;
 4938
 4939	niu_rx_channel_wred_init(np, rp);
 4940
 4941	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
 4942	nw64(RX_DMA_CTL_STAT(channel),
 4943	     (RX_DMA_CTL_STAT_MEX |
 4944	      RX_DMA_CTL_STAT_RCRTHRES |
 4945	      RX_DMA_CTL_STAT_RCRTO |
 4946	      RX_DMA_CTL_STAT_RBR_EMPTY));
 4947	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
 4948	nw64(RXDMA_CFIG2(channel),
 4949	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
 4950	      RXDMA_CFIG2_FULL_HDR));
 4951	nw64(RBR_CFIG_A(channel),
 4952	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
 4953	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
 4954	err = niu_compute_rbr_cfig_b(rp, &val);
 4955	if (err)
 4956		return err;
 4957	nw64(RBR_CFIG_B(channel), val);
 4958	nw64(RCRCFIG_A(channel),
 4959	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
 4960	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
 4961	nw64(RCRCFIG_B(channel),
 4962	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
 4963	     RCRCFIG_B_ENTOUT |
 4964	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
 4965
 4966	err = niu_enable_rx_channel(np, channel, 1);
 4967	if (err)
 4968		return err;
 4969
 4970	nw64(RBR_KICK(channel), rp->rbr_index);
 4971
 4972	val = nr64(RX_DMA_CTL_STAT(channel));
 4973	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
 4974	nw64(RX_DMA_CTL_STAT(channel), val);
 4975
 4976	return 0;
 4977}
 4978
 4979static int niu_init_rx_channels(struct niu *np)
 4980{
 4981	unsigned long flags;
 4982	u64 seed = jiffies_64;
 4983	int err, i;
 4984
 4985	niu_lock_parent(np, flags);
 4986	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
 4987	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
 4988	niu_unlock_parent(np, flags);
 4989
 4990	/* XXX RXDMA 32bit mode? XXX */
 4991
 4992	niu_init_rdc_groups(np);
 4993	niu_init_drr_weight(np);
 4994
 4995	err = niu_init_hostinfo(np);
 4996	if (err)
 4997		return err;
 4998
 4999	for (i = 0; i < np->num_rx_rings; i++) {
 5000		struct rx_ring_info *rp = &np->rx_rings[i];
 5001
 5002		err = niu_init_one_rx_channel(np, rp);
 5003		if (err)
 5004			return err;
 5005	}
 5006
 5007	return 0;
 5008}
 5009
 5010static int niu_set_ip_frag_rule(struct niu *np)
 5011{
 5012	struct niu_parent *parent = np->parent;
 5013	struct niu_classifier *cp = &np->clas;
 5014	struct niu_tcam_entry *tp;
 5015	int index, err;
 5016
 5017	index = cp->tcam_top;
 5018	tp = &parent->tcam[index];
 5019
 5020	/* Note that the noport bit is the same in both ipv4 and
 5021	 * ipv6 format TCAM entries.
 5022	 */
 5023	memset(tp, 0, sizeof(*tp));
 5024	tp->key[1] = TCAM_V4KEY1_NOPORT;
 5025	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
 5026	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 5027			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
 5028	err = tcam_write(np, index, tp->key, tp->key_mask);
 5029	if (err)
 5030		return err;
 5031	err = tcam_assoc_write(np, index, tp->assoc_data);
 5032	if (err)
 5033		return err;
 5034	tp->valid = 1;
 5035	cp->tcam_valid_entries++;
 5036
 5037	return 0;
 5038}
 5039
 5040static int niu_init_classifier_hw(struct niu *np)
 5041{
 5042	struct niu_parent *parent = np->parent;
 5043	struct niu_classifier *cp = &np->clas;
 5044	int i, err;
 5045
 5046	nw64(H1POLY, cp->h1_init);
 5047	nw64(H2POLY, cp->h2_init);
 5048
 5049	err = niu_init_hostinfo(np);
 5050	if (err)
 5051		return err;
 5052
 5053	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
 5054		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
 5055
 5056		vlan_tbl_write(np, i, np->port,
 5057			       vp->vlan_pref, vp->rdc_num);
 5058	}
 5059
 5060	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
 5061		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
 5062
 5063		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
 5064						ap->rdc_num, ap->mac_pref);
 5065		if (err)
 5066			return err;
 5067	}
 5068
 5069	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 5070		int index = i - CLASS_CODE_USER_PROG1;
 5071
 5072		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
 5073		if (err)
 5074			return err;
 5075		err = niu_set_flow_key(np, i, parent->flow_key[index]);
 5076		if (err)
 5077			return err;
 5078	}
 5079
 5080	err = niu_set_ip_frag_rule(np);
 5081	if (err)
 5082		return err;
 5083
 5084	tcam_enable(np, 1);
 5085
 5086	return 0;
 5087}
 5088
 5089static int niu_zcp_write(struct niu *np, int index, u64 *data)
 5090{
 5091	nw64(ZCP_RAM_DATA0, data[0]);
 5092	nw64(ZCP_RAM_DATA1, data[1]);
 5093	nw64(ZCP_RAM_DATA2, data[2]);
 5094	nw64(ZCP_RAM_DATA3, data[3]);
 5095	nw64(ZCP_RAM_DATA4, data[4]);
 5096	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
 5097	nw64(ZCP_RAM_ACC,
 5098	     (ZCP_RAM_ACC_WRITE |
 5099	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5100	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5101
 5102	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5103				   1000, 100);
 5104}
 5105
 5106static int niu_zcp_read(struct niu *np, int index, u64 *data)
 5107{
 5108	int err;
 5109
 5110	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5111				  1000, 100);
 5112	if (err) {
 5113		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
 5114			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5115		return err;
 5116	}
 5117
 5118	nw64(ZCP_RAM_ACC,
 5119	     (ZCP_RAM_ACC_READ |
 5120	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
 5121	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
 5122
 5123	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
 5124				  1000, 100);
 5125	if (err) {
 5126		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
 5127			   (unsigned long long)nr64(ZCP_RAM_ACC));
 5128		return err;
 5129	}
 5130
 5131	data[0] = nr64(ZCP_RAM_DATA0);
 5132	data[1] = nr64(ZCP_RAM_DATA1);
 5133	data[2] = nr64(ZCP_RAM_DATA2);
 5134	data[3] = nr64(ZCP_RAM_DATA3);
 5135	data[4] = nr64(ZCP_RAM_DATA4);
 5136
 5137	return 0;
 5138}
 5139
 5140static void niu_zcp_cfifo_reset(struct niu *np)
 5141{
 5142	u64 val = nr64(RESET_CFIFO);
 5143
 5144	val |= RESET_CFIFO_RST(np->port);
 5145	nw64(RESET_CFIFO, val);
 5146	udelay(10);
 5147
 5148	val &= ~RESET_CFIFO_RST(np->port);
 5149	nw64(RESET_CFIFO, val);
 5150}
 5151
 5152static int niu_init_zcp(struct niu *np)
 5153{
 5154	u64 data[5], rbuf[5];
 5155	int i, max, err;
 5156
 5157	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5158		if (np->port == 0 || np->port == 1)
 5159			max = ATLAS_P0_P1_CFIFO_ENTRIES;
 5160		else
 5161			max = ATLAS_P2_P3_CFIFO_ENTRIES;
 5162	} else
 5163		max = NIU_CFIFO_ENTRIES;
 5164
 5165	data[0] = 0;
 5166	data[1] = 0;
 5167	data[2] = 0;
 5168	data[3] = 0;
 5169	data[4] = 0;
 5170
 5171	for (i = 0; i < max; i++) {
 5172		err = niu_zcp_write(np, i, data);
 5173		if (err)
 5174			return err;
 5175		err = niu_zcp_read(np, i, rbuf);
 5176		if (err)
 5177			return err;
 5178	}
 5179
 5180	niu_zcp_cfifo_reset(np);
 5181	nw64(CFIFO_ECC(np->port), 0);
 5182	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
 5183	(void) nr64(ZCP_INT_STAT);
 5184	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
 5185
 5186	return 0;
 5187}
 5188
 5189static void niu_ipp_write(struct niu *np, int index, u64 *data)
 5190{
 5191	u64 val = nr64_ipp(IPP_CFIG);
 5192
 5193	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
 5194	nw64_ipp(IPP_DFIFO_WR_PTR, index);
 5195	nw64_ipp(IPP_DFIFO_WR0, data[0]);
 5196	nw64_ipp(IPP_DFIFO_WR1, data[1]);
 5197	nw64_ipp(IPP_DFIFO_WR2, data[2]);
 5198	nw64_ipp(IPP_DFIFO_WR3, data[3]);
 5199	nw64_ipp(IPP_DFIFO_WR4, data[4]);
 5200	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
 5201}
 5202
 5203static void niu_ipp_read(struct niu *np, int index, u64 *data)
 5204{
 5205	nw64_ipp(IPP_DFIFO_RD_PTR, index);
 5206	data[0] = nr64_ipp(IPP_DFIFO_RD0);
 5207	data[1] = nr64_ipp(IPP_DFIFO_RD1);
 5208	data[2] = nr64_ipp(IPP_DFIFO_RD2);
 5209	data[3] = nr64_ipp(IPP_DFIFO_RD3);
 5210	data[4] = nr64_ipp(IPP_DFIFO_RD4);
 5211}
 5212
 5213static int niu_ipp_reset(struct niu *np)
 5214{
 5215	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
 5216					  1000, 100, "IPP_CFIG");
 5217}
 5218
 5219static int niu_init_ipp(struct niu *np)
 5220{
 5221	u64 data[5], rbuf[5], val;
 5222	int i, max, err;
 5223
 5224	if (np->parent->plat_type != PLAT_TYPE_NIU) {
 5225		if (np->port == 0 || np->port == 1)
 5226			max = ATLAS_P0_P1_DFIFO_ENTRIES;
 5227		else
 5228			max = ATLAS_P2_P3_DFIFO_ENTRIES;
 5229	} else
 5230		max = NIU_DFIFO_ENTRIES;
 5231
 5232	data[0] = 0;
 5233	data[1] = 0;
 5234	data[2] = 0;
 5235	data[3] = 0;
 5236	data[4] = 0;
 5237
 5238	for (i = 0; i < max; i++) {
 5239		niu_ipp_write(np, i, data);
 5240		niu_ipp_read(np, i, rbuf);
 5241	}
 5242
 5243	(void) nr64_ipp(IPP_INT_STAT);
 5244	(void) nr64_ipp(IPP_INT_STAT);
 5245
 5246	err = niu_ipp_reset(np);
 5247	if (err)
 5248		return err;
 5249
 5250	(void) nr64_ipp(IPP_PKT_DIS);
 5251	(void) nr64_ipp(IPP_BAD_CS_CNT);
 5252	(void) nr64_ipp(IPP_ECC);
 5253
 5254	(void) nr64_ipp(IPP_INT_STAT);
 5255
 5256	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
 5257
 5258	val = nr64_ipp(IPP_CFIG);
 5259	val &= ~IPP_CFIG_IP_MAX_PKT;
 5260	val |= (IPP_CFIG_IPP_ENABLE |
 5261		IPP_CFIG_DFIFO_ECC_EN |
 5262		IPP_CFIG_DROP_BAD_CRC |
 5263		IPP_CFIG_CKSUM_EN |
 5264		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
 5265	nw64_ipp(IPP_CFIG, val);
 5266
 5267	return 0;
 5268}
 5269
 5270static void niu_handle_led(struct niu *np, int status)
 5271{
 5272	u64 val;
 5273	val = nr64_mac(XMAC_CONFIG);
 5274
 5275	if ((np->flags & NIU_FLAGS_10G) != 0 &&
 5276	    (np->flags & NIU_FLAGS_FIBER) != 0) {
 5277		if (status) {
 5278			val |= XMAC_CONFIG_LED_POLARITY;
 5279			val &= ~XMAC_CONFIG_FORCE_LED_ON;
 5280		} else {
 5281			val |= XMAC_CONFIG_FORCE_LED_ON;
 5282			val &= ~XMAC_CONFIG_LED_POLARITY;
 5283		}
 5284	}
 5285
 5286	nw64_mac(XMAC_CONFIG, val);
 5287}
 5288
 5289static void niu_init_xif_xmac(struct niu *np)
 5290{
 5291	struct niu_link_config *lp = &np->link_config;
 5292	u64 val;
 5293
 5294	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
 5295		val = nr64(MIF_CONFIG);
 5296		val |= MIF_CONFIG_ATCA_GE;
 5297		nw64(MIF_CONFIG, val);
 5298	}
 5299
 5300	val = nr64_mac(XMAC_CONFIG);
 5301	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5302
 5303	val |= XMAC_CONFIG_TX_OUTPUT_EN;
 5304
 5305	if (lp->loopback_mode == LOOPBACK_MAC) {
 5306		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
 5307		val |= XMAC_CONFIG_LOOPBACK;
 5308	} else {
 5309		val &= ~XMAC_CONFIG_LOOPBACK;
 5310	}
 5311
 5312	if (np->flags & NIU_FLAGS_10G) {
 5313		val &= ~XMAC_CONFIG_LFS_DISABLE;
 5314	} else {
 5315		val |= XMAC_CONFIG_LFS_DISABLE;
 5316		if (!(np->flags & NIU_FLAGS_FIBER) &&
 5317		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
 5318			val |= XMAC_CONFIG_1G_PCS_BYPASS;
 5319		else
 5320			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
 5321	}
 5322
 5323	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5324
 5325	if (lp->active_speed == SPEED_100)
 5326		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
 5327	else
 5328		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
 5329
 5330	nw64_mac(XMAC_CONFIG, val);
 5331
 5332	val = nr64_mac(XMAC_CONFIG);
 5333	val &= ~XMAC_CONFIG_MODE_MASK;
 5334	if (np->flags & NIU_FLAGS_10G) {
 5335		val |= XMAC_CONFIG_MODE_XGMII;
 5336	} else {
 5337		if (lp->active_speed == SPEED_1000)
 5338			val |= XMAC_CONFIG_MODE_GMII;
 5339		else
 5340			val |= XMAC_CONFIG_MODE_MII;
 5341	}
 5342
 5343	nw64_mac(XMAC_CONFIG, val);
 5344}
 5345
 5346static void niu_init_xif_bmac(struct niu *np)
 5347{
 5348	struct niu_link_config *lp = &np->link_config;
 5349	u64 val;
 5350
 5351	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
 5352
 5353	if (lp->loopback_mode == LOOPBACK_MAC)
 5354		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
 5355	else
 5356		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
 5357
 5358	if (lp->active_speed == SPEED_1000)
 5359		val |= BMAC_XIF_CONFIG_GMII_MODE;
 5360	else
 5361		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
 5362
 5363	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
 5364		 BMAC_XIF_CONFIG_LED_POLARITY);
 5365
 5366	if (!(np->flags & NIU_FLAGS_10G) &&
 5367	    !(np->flags & NIU_FLAGS_FIBER) &&
 5368	    lp->active_speed == SPEED_100)
 5369		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5370	else
 5371		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
 5372
 5373	nw64_mac(BMAC_XIF_CONFIG, val);
 5374}
 5375
 5376static void niu_init_xif(struct niu *np)
 5377{
 5378	if (np->flags & NIU_FLAGS_XMAC)
 5379		niu_init_xif_xmac(np);
 5380	else
 5381		niu_init_xif_bmac(np);
 5382}
 5383
 5384static void niu_pcs_mii_reset(struct niu *np)
 5385{
 5386	int limit = 1000;
 5387	u64 val = nr64_pcs(PCS_MII_CTL);
 5388	val |= PCS_MII_CTL_RST;
 5389	nw64_pcs(PCS_MII_CTL, val);
 5390	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
 5391		udelay(100);
 5392		val = nr64_pcs(PCS_MII_CTL);
 5393	}
 5394}
 5395
 5396static void niu_xpcs_reset(struct niu *np)
 5397{
 5398	int limit = 1000;
 5399	u64 val = nr64_xpcs(XPCS_CONTROL1);
 5400	val |= XPCS_CONTROL1_RESET;
 5401	nw64_xpcs(XPCS_CONTROL1, val);
 5402	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
 5403		udelay(100);
 5404		val = nr64_xpcs(XPCS_CONTROL1);
 5405	}
 5406}
 5407
 5408static int niu_init_pcs(struct niu *np)
 5409{
 5410	struct niu_link_config *lp = &np->link_config;
 5411	u64 val;
 5412
 5413	switch (np->flags & (NIU_FLAGS_10G |
 5414			     NIU_FLAGS_FIBER |
 5415			     NIU_FLAGS_XCVR_SERDES)) {
 5416	case NIU_FLAGS_FIBER:
 5417		/* 1G fiber */
 5418		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5419		nw64_pcs(PCS_DPATH_MODE, 0);
 5420		niu_pcs_mii_reset(np);
 5421		break;
 5422
 5423	case NIU_FLAGS_10G:
 5424	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
 5425	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
 5426		/* 10G SERDES */
 5427		if (!(np->flags & NIU_FLAGS_XMAC))
 5428			return -EINVAL;
 5429
 5430		/* 10G copper or fiber */
 5431		val = nr64_mac(XMAC_CONFIG);
 5432		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
 5433		nw64_mac(XMAC_CONFIG, val);
 5434
 5435		niu_xpcs_reset(np);
 5436
 5437		val = nr64_xpcs(XPCS_CONTROL1);
 5438		if (lp->loopback_mode == LOOPBACK_PHY)
 5439			val |= XPCS_CONTROL1_LOOPBACK;
 5440		else
 5441			val &= ~XPCS_CONTROL1_LOOPBACK;
 5442		nw64_xpcs(XPCS_CONTROL1, val);
 5443
 5444		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
 5445		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
 5446		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
 5447		break;
 5448
 5449
 5450	case NIU_FLAGS_XCVR_SERDES:
 5451		/* 1G SERDES */
 5452		niu_pcs_mii_reset(np);
 5453		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
 5454		nw64_pcs(PCS_DPATH_MODE, 0);
 5455		break;
 5456
 5457	case 0:
 5458		/* 1G copper */
 5459	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
 5460		/* 1G RGMII FIBER */
 5461		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
 5462		niu_pcs_mii_reset(np);
 5463		break;
 5464
 5465	default:
 5466		return -EINVAL;
 5467	}
 5468
 5469	return 0;
 5470}
 5471
 5472static int niu_reset_tx_xmac(struct niu *np)
 5473{
 5474	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
 5475					  (XTXMAC_SW_RST_REG_RS |
 5476					   XTXMAC_SW_RST_SOFT_RST),
 5477					  1000, 100, "XTXMAC_SW_RST");
 5478}
 5479
 5480static int niu_reset_tx_bmac(struct niu *np)
 5481{
 5482	int limit;
 5483
 5484	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
 5485	limit = 1000;
 5486	while (--limit >= 0) {
 5487		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
 5488			break;
 5489		udelay(100);
 5490	}
 5491	if (limit < 0) {
 5492		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
 5493			np->port,
 5494			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
 5495		return -ENODEV;
 5496	}
 5497
 5498	return 0;
 5499}
 5500
 5501static int niu_reset_tx_mac(struct niu *np)
 5502{
 5503	if (np->flags & NIU_FLAGS_XMAC)
 5504		return niu_reset_tx_xmac(np);
 5505	else
 5506		return niu_reset_tx_bmac(np);
 5507}
 5508
 5509static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
 5510{
 5511	u64 val;
 5512
 5513	val = nr64_mac(XMAC_MIN);
 5514	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
 5515		 XMAC_MIN_RX_MIN_PKT_SIZE);
 5516	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
 5517	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
 5518	nw64_mac(XMAC_MIN, val);
 5519
 5520	nw64_mac(XMAC_MAX, max);
 5521
 5522	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
 5523
 5524	val = nr64_mac(XMAC_IPG);
 5525	if (np->flags & NIU_FLAGS_10G) {
 5526		val &= ~XMAC_IPG_IPG_XGMII;
 5527		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
 5528	} else {
 5529		val &= ~XMAC_IPG_IPG_MII_GMII;
 5530		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
 5531	}
 5532	nw64_mac(XMAC_IPG, val);
 5533
 5534	val = nr64_mac(XMAC_CONFIG);
 5535	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
 5536		 XMAC_CONFIG_STRETCH_MODE |
 5537		 XMAC_CONFIG_VAR_MIN_IPG_EN |
 5538		 XMAC_CONFIG_TX_ENABLE);
 5539	nw64_mac(XMAC_CONFIG, val);
 5540
 5541	nw64_mac(TXMAC_FRM_CNT, 0);
 5542	nw64_mac(TXMAC_BYTE_CNT, 0);
 5543}
 5544
 5545static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
 5546{
 5547	u64 val;
 5548
 5549	nw64_mac(BMAC_MIN_FRAME, min);
 5550	nw64_mac(BMAC_MAX_FRAME, max);
 5551
 5552	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
 5553	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
 5554	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
 5555
 5556	val = nr64_mac(BTXMAC_CONFIG);
 5557	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
 5558		 BTXMAC_CONFIG_ENABLE);
 5559	nw64_mac(BTXMAC_CONFIG, val);
 5560}
 5561
 5562static void niu_init_tx_mac(struct niu *np)
 5563{
 5564	u64 min, max;
 5565
 5566	min = 64;
 5567	if (np->dev->mtu > ETH_DATA_LEN)
 5568		max = 9216;
 5569	else
 5570		max = 1522;
 5571
 5572	/* The XMAC_MIN register only accepts values for TX min which
 5573	 * have the low 3 bits cleared.
 5574	 */
 5575	BUG_ON(min & 0x7);
 5576
 5577	if (np->flags & NIU_FLAGS_XMAC)
 5578		niu_init_tx_xmac(np, min, max);
 5579	else
 5580		niu_init_tx_bmac(np, min, max);
 5581}
 5582
 5583static int niu_reset_rx_xmac(struct niu *np)
 5584{
 5585	int limit;
 5586
 5587	nw64_mac(XRXMAC_SW_RST,
 5588		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
 5589	limit = 1000;
 5590	while (--limit >= 0) {
 5591		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
 5592						 XRXMAC_SW_RST_SOFT_RST)))
 5593			break;
 5594		udelay(100);
 5595	}
 5596	if (limit < 0) {
 5597		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
 5598			np->port,
 5599			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
 5600		return -ENODEV;
 5601	}
 5602
 5603	return 0;
 5604}
 5605
 5606static int niu_reset_rx_bmac(struct niu *np)
 5607{
 5608	int limit;
 5609
 5610	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
 5611	limit = 1000;
 5612	while (--limit >= 0) {
 5613		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
 5614			break;
 5615		udelay(100);
 5616	}
 5617	if (limit < 0) {
 5618		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
 5619			np->port,
 5620			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
 5621		return -ENODEV;
 5622	}
 5623
 5624	return 0;
 5625}
 5626
 5627static int niu_reset_rx_mac(struct niu *np)
 5628{
 5629	if (np->flags & NIU_FLAGS_XMAC)
 5630		return niu_reset_rx_xmac(np);
 5631	else
 5632		return niu_reset_rx_bmac(np);
 5633}
 5634
 5635static void niu_init_rx_xmac(struct niu *np)
 5636{
 5637	struct niu_parent *parent = np->parent;
 5638	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5639	int first_rdc_table = tp->first_table_num;
 5640	unsigned long i;
 5641	u64 val;
 5642
 5643	nw64_mac(XMAC_ADD_FILT0, 0);
 5644	nw64_mac(XMAC_ADD_FILT1, 0);
 5645	nw64_mac(XMAC_ADD_FILT2, 0);
 5646	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
 5647	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
 5648	for (i = 0; i < MAC_NUM_HASH; i++)
 5649		nw64_mac(XMAC_HASH_TBL(i), 0);
 5650	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
 5651	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5652	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5653
 5654	val = nr64_mac(XMAC_CONFIG);
 5655	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
 5656		 XMAC_CONFIG_PROMISCUOUS |
 5657		 XMAC_CONFIG_PROMISC_GROUP |
 5658		 XMAC_CONFIG_ERR_CHK_DIS |
 5659		 XMAC_CONFIG_RX_CRC_CHK_DIS |
 5660		 XMAC_CONFIG_RESERVED_MULTICAST |
 5661		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
 5662		 XMAC_CONFIG_ADDR_FILTER_EN |
 5663		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
 5664		 XMAC_CONFIG_STRIP_CRC |
 5665		 XMAC_CONFIG_PASS_FLOW_CTRL |
 5666		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
 5667	val |= (XMAC_CONFIG_HASH_FILTER_EN);
 5668	nw64_mac(XMAC_CONFIG, val);
 5669
 5670	nw64_mac(RXMAC_BT_CNT, 0);
 5671	nw64_mac(RXMAC_BC_FRM_CNT, 0);
 5672	nw64_mac(RXMAC_MC_FRM_CNT, 0);
 5673	nw64_mac(RXMAC_FRAG_CNT, 0);
 5674	nw64_mac(RXMAC_HIST_CNT1, 0);
 5675	nw64_mac(RXMAC_HIST_CNT2, 0);
 5676	nw64_mac(RXMAC_HIST_CNT3, 0);
 5677	nw64_mac(RXMAC_HIST_CNT4, 0);
 5678	nw64_mac(RXMAC_HIST_CNT5, 0);
 5679	nw64_mac(RXMAC_HIST_CNT6, 0);
 5680	nw64_mac(RXMAC_HIST_CNT7, 0);
 5681	nw64_mac(RXMAC_MPSZER_CNT, 0);
 5682	nw64_mac(RXMAC_CRC_ER_CNT, 0);
 5683	nw64_mac(RXMAC_CD_VIO_CNT, 0);
 5684	nw64_mac(LINK_FAULT_CNT, 0);
 5685}
 5686
 5687static void niu_init_rx_bmac(struct niu *np)
 5688{
 5689	struct niu_parent *parent = np->parent;
 5690	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
 5691	int first_rdc_table = tp->first_table_num;
 5692	unsigned long i;
 5693	u64 val;
 5694
 5695	nw64_mac(BMAC_ADD_FILT0, 0);
 5696	nw64_mac(BMAC_ADD_FILT1, 0);
 5697	nw64_mac(BMAC_ADD_FILT2, 0);
 5698	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
 5699	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
 5700	for (i = 0; i < MAC_NUM_HASH; i++)
 5701		nw64_mac(BMAC_HASH_TBL(i), 0);
 5702	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
 5703	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
 5704	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
 5705
 5706	val = nr64_mac(BRXMAC_CONFIG);
 5707	val &= ~(BRXMAC_CONFIG_ENABLE |
 5708		 BRXMAC_CONFIG_STRIP_PAD |
 5709		 BRXMAC_CONFIG_STRIP_FCS |
 5710		 BRXMAC_CONFIG_PROMISC |
 5711		 BRXMAC_CONFIG_PROMISC_GRP |
 5712		 BRXMAC_CONFIG_ADDR_FILT_EN |
 5713		 BRXMAC_CONFIG_DISCARD_DIS);
 5714	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
 5715	nw64_mac(BRXMAC_CONFIG, val);
 5716
 5717	val = nr64_mac(BMAC_ADDR_CMPEN);
 5718	val |= BMAC_ADDR_CMPEN_EN0;
 5719	nw64_mac(BMAC_ADDR_CMPEN, val);
 5720}
 5721
 5722static void niu_init_rx_mac(struct niu *np)
 5723{
 5724	niu_set_primary_mac(np, np->dev->dev_addr);
 5725
 5726	if (np->flags & NIU_FLAGS_XMAC)
 5727		niu_init_rx_xmac(np);
 5728	else
 5729		niu_init_rx_bmac(np);
 5730}
 5731
 5732static void niu_enable_tx_xmac(struct niu *np, int on)
 5733{
 5734	u64 val = nr64_mac(XMAC_CONFIG);
 5735
 5736	if (on)
 5737		val |= XMAC_CONFIG_TX_ENABLE;
 5738	else
 5739		val &= ~XMAC_CONFIG_TX_ENABLE;
 5740	nw64_mac(XMAC_CONFIG, val);
 5741}
 5742
 5743static void niu_enable_tx_bmac(struct niu *np, int on)
 5744{
 5745	u64 val = nr64_mac(BTXMAC_CONFIG);
 5746
 5747	if (on)
 5748		val |= BTXMAC_CONFIG_ENABLE;
 5749	else
 5750		val &= ~BTXMAC_CONFIG_ENABLE;
 5751	nw64_mac(BTXMAC_CONFIG, val);
 5752}
 5753
 5754static void niu_enable_tx_mac(struct niu *np, int on)
 5755{
 5756	if (np->flags & NIU_FLAGS_XMAC)
 5757		niu_enable_tx_xmac(np, on);
 5758	else
 5759		niu_enable_tx_bmac(np, on);
 5760}
 5761
 5762static void niu_enable_rx_xmac(struct niu *np, int on)
 5763{
 5764	u64 val = nr64_mac(XMAC_CONFIG);
 5765
 5766	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
 5767		 XMAC_CONFIG_PROMISCUOUS);
 5768
 5769	if (np->flags & NIU_FLAGS_MCAST)
 5770		val |= XMAC_CONFIG_HASH_FILTER_EN;
 5771	if (np->flags & NIU_FLAGS_PROMISC)
 5772		val |= XMAC_CONFIG_PROMISCUOUS;
 5773
 5774	if (on)
 5775		val |= XMAC_CONFIG_RX_MAC_ENABLE;
 5776	else
 5777		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
 5778	nw64_mac(XMAC_CONFIG, val);
 5779}
 5780
 5781static void niu_enable_rx_bmac(struct niu *np, int on)
 5782{
 5783	u64 val = nr64_mac(BRXMAC_CONFIG);
 5784
 5785	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
 5786		 BRXMAC_CONFIG_PROMISC);
 5787
 5788	if (np->flags & NIU_FLAGS_MCAST)
 5789		val |= BRXMAC_CONFIG_HASH_FILT_EN;
 5790	if (np->flags & NIU_FLAGS_PROMISC)
 5791		val |= BRXMAC_CONFIG_PROMISC;
 5792
 5793	if (on)
 5794		val |= BRXMAC_CONFIG_ENABLE;
 5795	else
 5796		val &= ~BRXMAC_CONFIG_ENABLE;
 5797	nw64_mac(BRXMAC_CONFIG, val);
 5798}
 5799
 5800static void niu_enable_rx_mac(struct niu *np, int on)
 5801{
 5802	if (np->flags & NIU_FLAGS_XMAC)
 5803		niu_enable_rx_xmac(np, on);
 5804	else
 5805		niu_enable_rx_bmac(np, on);
 5806}
 5807
 5808static int niu_init_mac(struct niu *np)
 5809{
 5810	int err;
 5811
 5812	niu_init_xif(np);
 5813	err = niu_init_pcs(np);
 5814	if (err)
 5815		return err;
 5816
 5817	err = niu_reset_tx_mac(np);
 5818	if (err)
 5819		return err;
 5820	niu_init_tx_mac(np);
 5821	err = niu_reset_rx_mac(np);
 5822	if (err)
 5823		return err;
 5824	niu_init_rx_mac(np);
 5825
 5826	/* This looks hookey but the RX MAC reset we just did will
 5827	 * undo some of the state we setup in niu_init_tx_mac() so we
 5828	 * have to call it again.  In particular, the RX MAC reset will
 5829	 * set the XMAC_MAX register back to it's default value.
 5830	 */
 5831	niu_init_tx_mac(np);
 5832	niu_enable_tx_mac(np, 1);
 5833
 5834	niu_enable_rx_mac(np, 1);
 5835
 5836	return 0;
 5837}
 5838
 5839static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5840{
 5841	(void) niu_tx_channel_stop(np, rp->tx_channel);
 5842}
 5843
 5844static void niu_stop_tx_channels(struct niu *np)
 5845{
 5846	int i;
 5847
 5848	for (i = 0; i < np->num_tx_rings; i++) {
 5849		struct tx_ring_info *rp = &np->tx_rings[i];
 5850
 5851		niu_stop_one_tx_channel(np, rp);
 5852	}
 5853}
 5854
 5855static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
 5856{
 5857	(void) niu_tx_channel_reset(np, rp->tx_channel);
 5858}
 5859
 5860static void niu_reset_tx_channels(struct niu *np)
 5861{
 5862	int i;
 5863
 5864	for (i = 0; i < np->num_tx_rings; i++) {
 5865		struct tx_ring_info *rp = &np->tx_rings[i];
 5866
 5867		niu_reset_one_tx_channel(np, rp);
 5868	}
 5869}
 5870
 5871static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5872{
 5873	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
 5874}
 5875
 5876static void niu_stop_rx_channels(struct niu *np)
 5877{
 5878	int i;
 5879
 5880	for (i = 0; i < np->num_rx_rings; i++) {
 5881		struct rx_ring_info *rp = &np->rx_rings[i];
 5882
 5883		niu_stop_one_rx_channel(np, rp);
 5884	}
 5885}
 5886
 5887static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
 5888{
 5889	int channel = rp->rx_channel;
 5890
 5891	(void) niu_rx_channel_reset(np, channel);
 5892	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
 5893	nw64(RX_DMA_CTL_STAT(channel), 0);
 5894	(void) niu_enable_rx_channel(np, channel, 0);
 5895}
 5896
 5897static void niu_reset_rx_channels(struct niu *np)
 5898{
 5899	int i;
 5900
 5901	for (i = 0; i < np->num_rx_rings; i++) {
 5902		struct rx_ring_info *rp = &np->rx_rings[i];
 5903
 5904		niu_reset_one_rx_channel(np, rp);
 5905	}
 5906}
 5907
 5908static void niu_disable_ipp(struct niu *np)
 5909{
 5910	u64 rd, wr, val;
 5911	int limit;
 5912
 5913	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5914	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5915	limit = 100;
 5916	while (--limit >= 0 && (rd != wr)) {
 5917		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
 5918		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
 5919	}
 5920	if (limit < 0 &&
 5921	    (rd != 0 && wr != 1)) {
 5922		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
 5923			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
 5924			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
 5925	}
 5926
 5927	val = nr64_ipp(IPP_CFIG);
 5928	val &= ~(IPP_CFIG_IPP_ENABLE |
 5929		 IPP_CFIG_DFIFO_ECC_EN |
 5930		 IPP_CFIG_DROP_BAD_CRC |
 5931		 IPP_CFIG_CKSUM_EN);
 5932	nw64_ipp(IPP_CFIG, val);
 5933
 5934	(void) niu_ipp_reset(np);
 5935}
 5936
 5937static int niu_init_hw(struct niu *np)
 5938{
 5939	int i, err;
 5940
 5941	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
 5942	niu_txc_enable_port(np, 1);
 5943	niu_txc_port_dma_enable(np, 1);
 5944	niu_txc_set_imask(np, 0);
 5945
 5946	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
 5947	for (i = 0; i < np->num_tx_rings; i++) {
 5948		struct tx_ring_info *rp = &np->tx_rings[i];
 5949
 5950		err = niu_init_one_tx_channel(np, rp);
 5951		if (err)
 5952			return err;
 5953	}
 5954
 5955	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
 5956	err = niu_init_rx_channels(np);
 5957	if (err)
 5958		goto out_uninit_tx_channels;
 5959
 5960	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
 5961	err = niu_init_classifier_hw(np);
 5962	if (err)
 5963		goto out_uninit_rx_channels;
 5964
 5965	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
 5966	err = niu_init_zcp(np);
 5967	if (err)
 5968		goto out_uninit_rx_channels;
 5969
 5970	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
 5971	err = niu_init_ipp(np);
 5972	if (err)
 5973		goto out_uninit_rx_channels;
 5974
 5975	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
 5976	err = niu_init_mac(np);
 5977	if (err)
 5978		goto out_uninit_ipp;
 5979
 5980	return 0;
 5981
 5982out_uninit_ipp:
 5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
 5984	niu_disable_ipp(np);
 5985
 5986out_uninit_rx_channels:
 5987	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
 5988	niu_stop_rx_channels(np);
 5989	niu_reset_rx_channels(np);
 5990
 5991out_uninit_tx_channels:
 5992	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
 5993	niu_stop_tx_channels(np);
 5994	niu_reset_tx_channels(np);
 5995
 5996	return err;
 5997}
 5998
 5999static void niu_stop_hw(struct niu *np)
 6000{
 6001	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
 6002	niu_enable_interrupts(np, 0);
 6003
 6004	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
 6005	niu_enable_rx_mac(np, 0);
 6006
 6007	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
 6008	niu_disable_ipp(np);
 6009
 6010	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
 6011	niu_stop_tx_channels(np);
 6012
 6013	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
 6014	niu_stop_rx_channels(np);
 6015
 6016	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
 6017	niu_reset_tx_channels(np);
 6018
 6019	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
 6020	niu_reset_rx_channels(np);
 6021}
 6022
 6023static void niu_set_irq_name(struct niu *np)
 6024{
 6025	int port = np->port;
 6026	int i, j = 1;
 6027
 6028	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
 6029
 6030	if (port == 0) {
 6031		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
 6032		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
 6033		j = 3;
 6034	}
 6035
 6036	for (i = 0; i < np->num_ldg - j; i++) {
 6037		if (i < np->num_rx_rings)
 6038			sprintf(np->irq_name[i+j], "%s-rx-%d",
 6039				np->dev->name, i);
 6040		else if (i < np->num_tx_rings + np->num_rx_rings)
 6041			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
 6042				i - np->num_rx_rings);
 6043	}
 6044}
 6045
 6046static int niu_request_irq(struct niu *np)
 6047{
 6048	int i, j, err;
 6049
 6050	niu_set_irq_name(np);
 6051
 6052	err = 0;
 6053	for (i = 0; i < np->num_ldg; i++) {
 6054		struct niu_ldg *lp = &np->ldg[i];
 6055
 6056		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
 6057				  np->irq_name[i], lp);
 6058		if (err)
 6059			goto out_free_irqs;
 6060
 6061	}
 6062
 6063	return 0;
 6064
 6065out_free_irqs:
 6066	for (j = 0; j < i; j++) {
 6067		struct niu_ldg *lp = &np->ldg[j];
 6068
 6069		free_irq(lp->irq, lp);
 6070	}
 6071	return err;
 6072}
 6073
 6074static void niu_free_irq(struct niu *np)
 6075{
 6076	int i;
 6077
 6078	for (i = 0; i < np->num_ldg; i++) {
 6079		struct niu_ldg *lp = &np->ldg[i];
 6080
 6081		free_irq(lp->irq, lp);
 6082	}
 6083}
 6084
 6085static void niu_enable_napi(struct niu *np)
 6086{
 6087	int i;
 6088
 6089	for (i = 0; i < np->num_ldg; i++)
 6090		napi_enable(&np->ldg[i].napi);
 6091}
 6092
 6093static void niu_disable_napi(struct niu *np)
 6094{
 6095	int i;
 6096
 6097	for (i = 0; i < np->num_ldg; i++)
 6098		napi_disable(&np->ldg[i].napi);
 6099}
 6100
 6101static int niu_open(struct net_device *dev)
 6102{
 6103	struct niu *np = netdev_priv(dev);
 6104	int err;
 6105
 6106	netif_carrier_off(dev);
 6107
 6108	err = niu_alloc_channels(np);
 6109	if (err)
 6110		goto out_err;
 6111
 6112	err = niu_enable_interrupts(np, 0);
 6113	if (err)
 6114		goto out_free_channels;
 6115
 6116	err = niu_request_irq(np);
 6117	if (err)
 6118		goto out_free_channels;
 6119
 6120	niu_enable_napi(np);
 6121
 6122	spin_lock_irq(&np->lock);
 6123
 6124	err = niu_init_hw(np);
 6125	if (!err) {
 6126		init_timer(&np->timer);
 6127		np->timer.expires = jiffies + HZ;
 6128		np->timer.data = (unsigned long) np;
 6129		np->timer.function = niu_timer;
 6130
 6131		err = niu_enable_interrupts(np, 1);
 6132		if (err)
 6133			niu_stop_hw(np);
 6134	}
 6135
 6136	spin_unlock_irq(&np->lock);
 6137
 6138	if (err) {
 6139		niu_disable_napi(np);
 6140		goto out_free_irq;
 6141	}
 6142
 6143	netif_tx_start_all_queues(dev);
 6144
 6145	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6146		netif_carrier_on(dev);
 6147
 6148	add_timer(&np->timer);
 6149
 6150	return 0;
 6151
 6152out_free_irq:
 6153	niu_free_irq(np);
 6154
 6155out_free_channels:
 6156	niu_free_channels(np);
 6157
 6158out_err:
 6159	return err;
 6160}
 6161
 6162static void niu_full_shutdown(struct niu *np, struct net_device *dev)
 6163{
 6164	cancel_work_sync(&np->reset_task);
 6165
 6166	niu_disable_napi(np);
 6167	netif_tx_stop_all_queues(dev);
 6168
 6169	del_timer_sync(&np->timer);
 6170
 6171	spin_lock_irq(&np->lock);
 6172
 6173	niu_stop_hw(np);
 6174
 6175	spin_unlock_irq(&np->lock);
 6176}
 6177
 6178static int niu_close(struct net_device *dev)
 6179{
 6180	struct niu *np = netdev_priv(dev);
 6181
 6182	niu_full_shutdown(np, dev);
 6183
 6184	niu_free_irq(np);
 6185
 6186	niu_free_channels(np);
 6187
 6188	niu_handle_led(np, 0);
 6189
 6190	return 0;
 6191}
 6192
 6193static void niu_sync_xmac_stats(struct niu *np)
 6194{
 6195	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
 6196
 6197	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
 6198	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
 6199
 6200	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
 6201	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
 6202	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
 6203	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
 6204	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
 6205	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
 6206	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
 6207	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
 6208	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
 6209	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
 6210	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
 6211	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
 6212	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
 6213	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
 6214	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
 6215	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
 6216}
 6217
 6218static void niu_sync_bmac_stats(struct niu *np)
 6219{
 6220	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
 6221
 6222	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
 6223	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
 6224
 6225	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
 6226	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6227	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
 6228	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
 6229}
 6230
 6231static void niu_sync_mac_stats(struct niu *np)
 6232{
 6233	if (np->flags & NIU_FLAGS_XMAC)
 6234		niu_sync_xmac_stats(np);
 6235	else
 6236		niu_sync_bmac_stats(np);
 6237}
 6238
 6239static void niu_get_rx_stats(struct niu *np,
 6240			     struct rtnl_link_stats64 *stats)
 6241{
 6242	u64 pkts, dropped, errors, bytes;
 6243	struct rx_ring_info *rx_rings;
 6244	int i;
 6245
 6246	pkts = dropped = errors = bytes = 0;
 6247
 6248	rx_rings = ACCESS_ONCE(np->rx_rings);
 6249	if (!rx_rings)
 6250		goto no_rings;
 6251
 6252	for (i = 0; i < np->num_rx_rings; i++) {
 6253		struct rx_ring_info *rp = &rx_rings[i];
 6254
 6255		niu_sync_rx_discard_stats(np, rp, 0);
 6256
 6257		pkts += rp->rx_packets;
 6258		bytes += rp->rx_bytes;
 6259		dropped += rp->rx_dropped;
 6260		errors += rp->rx_errors;
 6261	}
 6262
 6263no_rings:
 6264	stats->rx_packets = pkts;
 6265	stats->rx_bytes = bytes;
 6266	stats->rx_dropped = dropped;
 6267	stats->rx_errors = errors;
 6268}
 6269
 6270static void niu_get_tx_stats(struct niu *np,
 6271			     struct rtnl_link_stats64 *stats)
 6272{
 6273	u64 pkts, errors, bytes;
 6274	struct tx_ring_info *tx_rings;
 6275	int i;
 6276
 6277	pkts = errors = bytes = 0;
 6278
 6279	tx_rings = ACCESS_ONCE(np->tx_rings);
 6280	if (!tx_rings)
 6281		goto no_rings;
 6282
 6283	for (i = 0; i < np->num_tx_rings; i++) {
 6284		struct tx_ring_info *rp = &tx_rings[i];
 6285
 6286		pkts += rp->tx_packets;
 6287		bytes += rp->tx_bytes;
 6288		errors += rp->tx_errors;
 6289	}
 6290
 6291no_rings:
 6292	stats->tx_packets = pkts;
 6293	stats->tx_bytes = bytes;
 6294	stats->tx_errors = errors;
 6295}
 6296
 6297static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
 6298					       struct rtnl_link_stats64 *stats)
 6299{
 6300	struct niu *np = netdev_priv(dev);
 6301
 6302	if (netif_running(dev)) {
 6303		niu_get_rx_stats(np, stats);
 6304		niu_get_tx_stats(np, stats);
 6305	}
 6306
 6307	return stats;
 6308}
 6309
 6310static void niu_load_hash_xmac(struct niu *np, u16 *hash)
 6311{
 6312	int i;
 6313
 6314	for (i = 0; i < 16; i++)
 6315		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
 6316}
 6317
 6318static void niu_load_hash_bmac(struct niu *np, u16 *hash)
 6319{
 6320	int i;
 6321
 6322	for (i = 0; i < 16; i++)
 6323		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
 6324}
 6325
 6326static void niu_load_hash(struct niu *np, u16 *hash)
 6327{
 6328	if (np->flags & NIU_FLAGS_XMAC)
 6329		niu_load_hash_xmac(np, hash);
 6330	else
 6331		niu_load_hash_bmac(np, hash);
 6332}
 6333
 6334static void niu_set_rx_mode(struct net_device *dev)
 6335{
 6336	struct niu *np = netdev_priv(dev);
 6337	int i, alt_cnt, err;
 6338	struct netdev_hw_addr *ha;
 6339	unsigned long flags;
 6340	u16 hash[16] = { 0, };
 6341
 6342	spin_lock_irqsave(&np->lock, flags);
 6343	niu_enable_rx_mac(np, 0);
 6344
 6345	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
 6346	if (dev->flags & IFF_PROMISC)
 6347		np->flags |= NIU_FLAGS_PROMISC;
 6348	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
 6349		np->flags |= NIU_FLAGS_MCAST;
 6350
 6351	alt_cnt = netdev_uc_count(dev);
 6352	if (alt_cnt > niu_num_alt_addr(np)) {
 6353		alt_cnt = 0;
 6354		np->flags |= NIU_FLAGS_PROMISC;
 6355	}
 6356
 6357	if (alt_cnt) {
 6358		int index = 0;
 6359
 6360		netdev_for_each_uc_addr(ha, dev) {
 6361			err = niu_set_alt_mac(np, index, ha->addr);
 6362			if (err)
 6363				netdev_warn(dev, "Error %d adding alt mac %d\n",
 6364					    err, index);
 6365			err = niu_enable_alt_mac(np, index, 1);
 6366			if (err)
 6367				netdev_warn(dev, "Error %d enabling alt mac %d\n",
 6368					    err, index);
 6369
 6370			index++;
 6371		}
 6372	} else {
 6373		int alt_start;
 6374		if (np->flags & NIU_FLAGS_XMAC)
 6375			alt_start = 0;
 6376		else
 6377			alt_start = 1;
 6378		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
 6379			err = niu_enable_alt_mac(np, i, 0);
 6380			if (err)
 6381				netdev_warn(dev, "Error %d disabling alt mac %d\n",
 6382					    err, i);
 6383		}
 6384	}
 6385	if (dev->flags & IFF_ALLMULTI) {
 6386		for (i = 0; i < 16; i++)
 6387			hash[i] = 0xffff;
 6388	} else if (!netdev_mc_empty(dev)) {
 6389		netdev_for_each_mc_addr(ha, dev) {
 6390			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
 6391
 6392			crc >>= 24;
 6393			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
 6394		}
 6395	}
 6396
 6397	if (np->flags & NIU_FLAGS_MCAST)
 6398		niu_load_hash(np, hash);
 6399
 6400	niu_enable_rx_mac(np, 1);
 6401	spin_unlock_irqrestore(&np->lock, flags);
 6402}
 6403
 6404static int niu_set_mac_addr(struct net_device *dev, void *p)
 6405{
 6406	struct niu *np = netdev_priv(dev);
 6407	struct sockaddr *addr = p;
 6408	unsigned long flags;
 6409
 6410	if (!is_valid_ether_addr(addr->sa_data))
 6411		return -EADDRNOTAVAIL;
 6412
 6413	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 6414
 6415	if (!netif_running(dev))
 6416		return 0;
 6417
 6418	spin_lock_irqsave(&np->lock, flags);
 6419	niu_enable_rx_mac(np, 0);
 6420	niu_set_primary_mac(np, dev->dev_addr);
 6421	niu_enable_rx_mac(np, 1);
 6422	spin_unlock_irqrestore(&np->lock, flags);
 6423
 6424	return 0;
 6425}
 6426
 6427static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 6428{
 6429	return -EOPNOTSUPP;
 6430}
 6431
 6432static void niu_netif_stop(struct niu *np)
 6433{
 6434	netif_trans_update(np->dev);	/* prevent tx timeout */
 6435
 6436	niu_disable_napi(np);
 6437
 6438	netif_tx_disable(np->dev);
 6439}
 6440
 6441static void niu_netif_start(struct niu *np)
 6442{
 6443	/* NOTE: unconditional netif_wake_queue is only appropriate
 6444	 * so long as all callers are assured to have free tx slots
 6445	 * (such as after niu_init_hw).
 6446	 */
 6447	netif_tx_wake_all_queues(np->dev);
 6448
 6449	niu_enable_napi(np);
 6450
 6451	niu_enable_interrupts(np, 1);
 6452}
 6453
 6454static void niu_reset_buffers(struct niu *np)
 6455{
 6456	int i, j, k, err;
 6457
 6458	if (np->rx_rings) {
 6459		for (i = 0; i < np->num_rx_rings; i++) {
 6460			struct rx_ring_info *rp = &np->rx_rings[i];
 6461
 6462			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
 6463				struct page *page;
 6464
 6465				page = rp->rxhash[j];
 6466				while (page) {
 6467					struct page *next =
 6468						(struct page *) page->mapping;
 6469					u64 base = page->index;
 6470					base = base >> RBR_DESCR_ADDR_SHIFT;
 6471					rp->rbr[k++] = cpu_to_le32(base);
 6472					page = next;
 6473				}
 6474			}
 6475			for (; k < MAX_RBR_RING_SIZE; k++) {
 6476				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
 6477				if (unlikely(err))
 6478					break;
 6479			}
 6480
 6481			rp->rbr_index = rp->rbr_table_size - 1;
 6482			rp->rcr_index = 0;
 6483			rp->rbr_pending = 0;
 6484			rp->rbr_refill_pending = 0;
 6485		}
 6486	}
 6487	if (np->tx_rings) {
 6488		for (i = 0; i < np->num_tx_rings; i++) {
 6489			struct tx_ring_info *rp = &np->tx_rings[i];
 6490
 6491			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
 6492				if (rp->tx_buffs[j].skb)
 6493					(void) release_tx_packet(np, rp, j);
 6494			}
 6495
 6496			rp->pending = MAX_TX_RING_SIZE;
 6497			rp->prod = 0;
 6498			rp->cons = 0;
 6499			rp->wrap_bit = 0;
 6500		}
 6501	}
 6502}
 6503
 6504static void niu_reset_task(struct work_struct *work)
 6505{
 6506	struct niu *np = container_of(work, struct niu, reset_task);
 6507	unsigned long flags;
 6508	int err;
 6509
 6510	spin_lock_irqsave(&np->lock, flags);
 6511	if (!netif_running(np->dev)) {
 6512		spin_unlock_irqrestore(&np->lock, flags);
 6513		return;
 6514	}
 6515
 6516	spin_unlock_irqrestore(&np->lock, flags);
 6517
 6518	del_timer_sync(&np->timer);
 6519
 6520	niu_netif_stop(np);
 6521
 6522	spin_lock_irqsave(&np->lock, flags);
 6523
 6524	niu_stop_hw(np);
 6525
 6526	spin_unlock_irqrestore(&np->lock, flags);
 6527
 6528	niu_reset_buffers(np);
 6529
 6530	spin_lock_irqsave(&np->lock, flags);
 6531
 6532	err = niu_init_hw(np);
 6533	if (!err) {
 6534		np->timer.expires = jiffies + HZ;
 6535		add_timer(&np->timer);
 6536		niu_netif_start(np);
 6537	}
 6538
 6539	spin_unlock_irqrestore(&np->lock, flags);
 6540}
 6541
 6542static void niu_tx_timeout(struct net_device *dev)
 6543{
 6544	struct niu *np = netdev_priv(dev);
 6545
 6546	dev_err(np->device, "%s: Transmit timed out, resetting\n",
 6547		dev->name);
 6548
 6549	schedule_work(&np->reset_task);
 6550}
 6551
 6552static void niu_set_txd(struct tx_ring_info *rp, int index,
 6553			u64 mapping, u64 len, u64 mark,
 6554			u64 n_frags)
 6555{
 6556	__le64 *desc = &rp->descr[index];
 6557
 6558	*desc = cpu_to_le64(mark |
 6559			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
 6560			    (len << TX_DESC_TR_LEN_SHIFT) |
 6561			    (mapping & TX_DESC_SAD));
 6562}
 6563
 6564static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
 6565				u64 pad_bytes, u64 len)
 6566{
 6567	u16 eth_proto, eth_proto_inner;
 6568	u64 csum_bits, l3off, ihl, ret;
 6569	u8 ip_proto;
 6570	int ipv6;
 6571
 6572	eth_proto = be16_to_cpu(ehdr->h_proto);
 6573	eth_proto_inner = eth_proto;
 6574	if (eth_proto == ETH_P_8021Q) {
 6575		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
 6576		__be16 val = vp->h_vlan_encapsulated_proto;
 6577
 6578		eth_proto_inner = be16_to_cpu(val);
 6579	}
 6580
 6581	ipv6 = ihl = 0;
 6582	switch (skb->protocol) {
 6583	case cpu_to_be16(ETH_P_IP):
 6584		ip_proto = ip_hdr(skb)->protocol;
 6585		ihl = ip_hdr(skb)->ihl;
 6586		break;
 6587	case cpu_to_be16(ETH_P_IPV6):
 6588		ip_proto = ipv6_hdr(skb)->nexthdr;
 6589		ihl = (40 >> 2);
 6590		ipv6 = 1;
 6591		break;
 6592	default:
 6593		ip_proto = ihl = 0;
 6594		break;
 6595	}
 6596
 6597	csum_bits = TXHDR_CSUM_NONE;
 6598	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 6599		u64 start, stuff;
 6600
 6601		csum_bits = (ip_proto == IPPROTO_TCP ?
 6602			     TXHDR_CSUM_TCP :
 6603			     (ip_proto == IPPROTO_UDP ?
 6604			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
 6605
 6606		start = skb_checksum_start_offset(skb) -
 6607			(pad_bytes + sizeof(struct tx_pkt_hdr));
 6608		stuff = start + skb->csum_offset;
 6609
 6610		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
 6611		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
 6612	}
 6613
 6614	l3off = skb_network_offset(skb) -
 6615		(pad_bytes + sizeof(struct tx_pkt_hdr));
 6616
 6617	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
 6618	       (len << TXHDR_LEN_SHIFT) |
 6619	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
 6620	       (ihl << TXHDR_IHL_SHIFT) |
 6621	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
 6622	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
 6623	       (ipv6 ? TXHDR_IP_VER : 0) |
 6624	       csum_bits);
 6625
 6626	return ret;
 6627}
 6628
 6629static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 6630				  struct net_device *dev)
 6631{
 6632	struct niu *np = netdev_priv(dev);
 6633	unsigned long align, headroom;
 6634	struct netdev_queue *txq;
 6635	struct tx_ring_info *rp;
 6636	struct tx_pkt_hdr *tp;
 6637	unsigned int len, nfg;
 6638	struct ethhdr *ehdr;
 6639	int prod, i, tlen;
 6640	u64 mapping, mrk;
 6641
 6642	i = skb_get_queue_mapping(skb);
 6643	rp = &np->tx_rings[i];
 6644	txq = netdev_get_tx_queue(dev, i);
 6645
 6646	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 6647		netif_tx_stop_queue(txq);
 6648		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
 6649		rp->tx_errors++;
 6650		return NETDEV_TX_BUSY;
 6651	}
 6652
 6653	if (eth_skb_pad(skb))
 6654		goto out;
 6655
 6656	len = sizeof(struct tx_pkt_hdr) + 15;
 6657	if (skb_headroom(skb) < len) {
 6658		struct sk_buff *skb_new;
 6659
 6660		skb_new = skb_realloc_headroom(skb, len);
 6661		if (!skb_new)
 6662			goto out_drop;
 6663		kfree_skb(skb);
 6664		skb = skb_new;
 6665	} else
 6666		skb_orphan(skb);
 6667
 6668	align = ((unsigned long) skb->data & (16 - 1));
 6669	headroom = align + sizeof(struct tx_pkt_hdr);
 6670
 6671	ehdr = (struct ethhdr *) skb->data;
 6672	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
 6673
 6674	len = skb->len - sizeof(struct tx_pkt_hdr);
 6675	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
 6676	tp->resv = 0;
 6677
 6678	len = skb_headlen(skb);
 6679	mapping = np->ops->map_single(np->device, skb->data,
 6680				      len, DMA_TO_DEVICE);
 6681
 6682	prod = rp->prod;
 6683
 6684	rp->tx_buffs[prod].skb = skb;
 6685	rp->tx_buffs[prod].mapping = mapping;
 6686
 6687	mrk = TX_DESC_SOP;
 6688	if (++rp->mark_counter == rp->mark_freq) {
 6689		rp->mark_counter = 0;
 6690		mrk |= TX_DESC_MARK;
 6691		rp->mark_pending++;
 6692	}
 6693
 6694	tlen = len;
 6695	nfg = skb_shinfo(skb)->nr_frags;
 6696	while (tlen > 0) {
 6697		tlen -= MAX_TX_DESC_LEN;
 6698		nfg++;
 6699	}
 6700
 6701	while (len > 0) {
 6702		unsigned int this_len = len;
 6703
 6704		if (this_len > MAX_TX_DESC_LEN)
 6705			this_len = MAX_TX_DESC_LEN;
 6706
 6707		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
 6708		mrk = nfg = 0;
 6709
 6710		prod = NEXT_TX(rp, prod);
 6711		mapping += this_len;
 6712		len -= this_len;
 6713	}
 6714
 6715	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
 6716		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6717
 6718		len = skb_frag_size(frag);
 6719		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
 6720					    frag->page_offset, len,
 6721					    DMA_TO_DEVICE);
 6722
 6723		rp->tx_buffs[prod].skb = NULL;
 6724		rp->tx_buffs[prod].mapping = mapping;
 6725
 6726		niu_set_txd(rp, prod, mapping, len, 0, 0);
 6727
 6728		prod = NEXT_TX(rp, prod);
 6729	}
 6730
 6731	if (prod < rp->prod)
 6732		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 6733	rp->prod = prod;
 6734
 6735	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
 6736
 6737	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
 6738		netif_tx_stop_queue(txq);
 6739		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
 6740			netif_tx_wake_queue(txq);
 6741	}
 6742
 6743out:
 6744	return NETDEV_TX_OK;
 6745
 6746out_drop:
 6747	rp->tx_errors++;
 6748	kfree_skb(skb);
 6749	goto out;
 6750}
 6751
 6752static int niu_change_mtu(struct net_device *dev, int new_mtu)
 6753{
 6754	struct niu *np = netdev_priv(dev);
 6755	int err, orig_jumbo, new_jumbo;
 6756
 6757	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
 6758	new_jumbo = (new_mtu > ETH_DATA_LEN);
 6759
 6760	dev->mtu = new_mtu;
 6761
 6762	if (!netif_running(dev) ||
 6763	    (orig_jumbo == new_jumbo))
 6764		return 0;
 6765
 6766	niu_full_shutdown(np, dev);
 6767
 6768	niu_free_channels(np);
 6769
 6770	niu_enable_napi(np);
 6771
 6772	err = niu_alloc_channels(np);
 6773	if (err)
 6774		return err;
 6775
 6776	spin_lock_irq(&np->lock);
 6777
 6778	err = niu_init_hw(np);
 6779	if (!err) {
 6780		init_timer(&np->timer);
 6781		np->timer.expires = jiffies + HZ;
 6782		np->timer.data = (unsigned long) np;
 6783		np->timer.function = niu_timer;
 6784
 6785		err = niu_enable_interrupts(np, 1);
 6786		if (err)
 6787			niu_stop_hw(np);
 6788	}
 6789
 6790	spin_unlock_irq(&np->lock);
 6791
 6792	if (!err) {
 6793		netif_tx_start_all_queues(dev);
 6794		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
 6795			netif_carrier_on(dev);
 6796
 6797		add_timer(&np->timer);
 6798	}
 6799
 6800	return err;
 6801}
 6802
 6803static void niu_get_drvinfo(struct net_device *dev,
 6804			    struct ethtool_drvinfo *info)
 6805{
 6806	struct niu *np = netdev_priv(dev);
 6807	struct niu_vpd *vpd = &np->vpd;
 6808
 6809	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 6810	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 6811	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 6812		vpd->fcode_major, vpd->fcode_minor);
 6813	if (np->parent->plat_type != PLAT_TYPE_NIU)
 6814		strlcpy(info->bus_info, pci_name(np->pdev),
 6815			sizeof(info->bus_info));
 6816}
 6817
 6818static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6819{
 6820	struct niu *np = netdev_priv(dev);
 6821	struct niu_link_config *lp;
 6822
 6823	lp = &np->link_config;
 6824
 6825	memset(cmd, 0, sizeof(*cmd));
 6826	cmd->phy_address = np->phy_addr;
 6827	cmd->supported = lp->supported;
 6828	cmd->advertising = lp->active_advertising;
 6829	cmd->autoneg = lp->active_autoneg;
 6830	ethtool_cmd_speed_set(cmd, lp->active_speed);
 6831	cmd->duplex = lp->active_duplex;
 6832	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 6833	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
 6834		XCVR_EXTERNAL : XCVR_INTERNAL;
 6835
 6836	return 0;
 6837}
 6838
 6839static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 6840{
 6841	struct niu *np = netdev_priv(dev);
 6842	struct niu_link_config *lp = &np->link_config;
 6843
 6844	lp->advertising = cmd->advertising;
 6845	lp->speed = ethtool_cmd_speed(cmd);
 6846	lp->duplex = cmd->duplex;
 6847	lp->autoneg = cmd->autoneg;
 6848	return niu_init_link(np);
 6849}
 6850
 6851static u32 niu_get_msglevel(struct net_device *dev)
 6852{
 6853	struct niu *np = netdev_priv(dev);
 6854	return np->msg_enable;
 6855}
 6856
 6857static void niu_set_msglevel(struct net_device *dev, u32 value)
 6858{
 6859	struct niu *np = netdev_priv(dev);
 6860	np->msg_enable = value;
 6861}
 6862
 6863static int niu_nway_reset(struct net_device *dev)
 6864{
 6865	struct niu *np = netdev_priv(dev);
 6866
 6867	if (np->link_config.autoneg)
 6868		return niu_init_link(np);
 6869
 6870	return 0;
 6871}
 6872
 6873static int niu_get_eeprom_len(struct net_device *dev)
 6874{
 6875	struct niu *np = netdev_priv(dev);
 6876
 6877	return np->eeprom_len;
 6878}
 6879
 6880static int niu_get_eeprom(struct net_device *dev,
 6881			  struct ethtool_eeprom *eeprom, u8 *data)
 6882{
 6883	struct niu *np = netdev_priv(dev);
 6884	u32 offset, len, val;
 6885
 6886	offset = eeprom->offset;
 6887	len = eeprom->len;
 6888
 6889	if (offset + len < offset)
 6890		return -EINVAL;
 6891	if (offset >= np->eeprom_len)
 6892		return -EINVAL;
 6893	if (offset + len > np->eeprom_len)
 6894		len = eeprom->len = np->eeprom_len - offset;
 6895
 6896	if (offset & 3) {
 6897		u32 b_offset, b_count;
 6898
 6899		b_offset = offset & 3;
 6900		b_count = 4 - b_offset;
 6901		if (b_count > len)
 6902			b_count = len;
 6903
 6904		val = nr64(ESPC_NCR((offset - b_offset) / 4));
 6905		memcpy(data, ((char *)&val) + b_offset, b_count);
 6906		data += b_count;
 6907		len -= b_count;
 6908		offset += b_count;
 6909	}
 6910	while (len >= 4) {
 6911		val = nr64(ESPC_NCR(offset / 4));
 6912		memcpy(data, &val, 4);
 6913		data += 4;
 6914		len -= 4;
 6915		offset += 4;
 6916	}
 6917	if (len) {
 6918		val = nr64(ESPC_NCR(offset / 4));
 6919		memcpy(data, &val, len);
 6920	}
 6921	return 0;
 6922}
 6923
 6924static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
 6925{
 6926	switch (flow_type) {
 6927	case TCP_V4_FLOW:
 6928	case TCP_V6_FLOW:
 6929		*pid = IPPROTO_TCP;
 6930		break;
 6931	case UDP_V4_FLOW:
 6932	case UDP_V6_FLOW:
 6933		*pid = IPPROTO_UDP;
 6934		break;
 6935	case SCTP_V4_FLOW:
 6936	case SCTP_V6_FLOW:
 6937		*pid = IPPROTO_SCTP;
 6938		break;
 6939	case AH_V4_FLOW:
 6940	case AH_V6_FLOW:
 6941		*pid = IPPROTO_AH;
 6942		break;
 6943	case ESP_V4_FLOW:
 6944	case ESP_V6_FLOW:
 6945		*pid = IPPROTO_ESP;
 6946		break;
 6947	default:
 6948		*pid = 0;
 6949		break;
 6950	}
 6951}
 6952
 6953static int niu_class_to_ethflow(u64 class, int *flow_type)
 6954{
 6955	switch (class) {
 6956	case CLASS_CODE_TCP_IPV4:
 6957		*flow_type = TCP_V4_FLOW;
 6958		break;
 6959	case CLASS_CODE_UDP_IPV4:
 6960		*flow_type = UDP_V4_FLOW;
 6961		break;
 6962	case CLASS_CODE_AH_ESP_IPV4:
 6963		*flow_type = AH_V4_FLOW;
 6964		break;
 6965	case CLASS_CODE_SCTP_IPV4:
 6966		*flow_type = SCTP_V4_FLOW;
 6967		break;
 6968	case CLASS_CODE_TCP_IPV6:
 6969		*flow_type = TCP_V6_FLOW;
 6970		break;
 6971	case CLASS_CODE_UDP_IPV6:
 6972		*flow_type = UDP_V6_FLOW;
 6973		break;
 6974	case CLASS_CODE_AH_ESP_IPV6:
 6975		*flow_type = AH_V6_FLOW;
 6976		break;
 6977	case CLASS_CODE_SCTP_IPV6:
 6978		*flow_type = SCTP_V6_FLOW;
 6979		break;
 6980	case CLASS_CODE_USER_PROG1:
 6981	case CLASS_CODE_USER_PROG2:
 6982	case CLASS_CODE_USER_PROG3:
 6983	case CLASS_CODE_USER_PROG4:
 6984		*flow_type = IP_USER_FLOW;
 6985		break;
 6986	default:
 6987		return -EINVAL;
 6988	}
 6989
 6990	return 0;
 6991}
 6992
 6993static int niu_ethflow_to_class(int flow_type, u64 *class)
 6994{
 6995	switch (flow_type) {
 6996	case TCP_V4_FLOW:
 6997		*class = CLASS_CODE_TCP_IPV4;
 6998		break;
 6999	case UDP_V4_FLOW:
 7000		*class = CLASS_CODE_UDP_IPV4;
 7001		break;
 7002	case AH_ESP_V4_FLOW:
 7003	case AH_V4_FLOW:
 7004	case ESP_V4_FLOW:
 7005		*class = CLASS_CODE_AH_ESP_IPV4;
 7006		break;
 7007	case SCTP_V4_FLOW:
 7008		*class = CLASS_CODE_SCTP_IPV4;
 7009		break;
 7010	case TCP_V6_FLOW:
 7011		*class = CLASS_CODE_TCP_IPV6;
 7012		break;
 7013	case UDP_V6_FLOW:
 7014		*class = CLASS_CODE_UDP_IPV6;
 7015		break;
 7016	case AH_ESP_V6_FLOW:
 7017	case AH_V6_FLOW:
 7018	case ESP_V6_FLOW:
 7019		*class = CLASS_CODE_AH_ESP_IPV6;
 7020		break;
 7021	case SCTP_V6_FLOW:
 7022		*class = CLASS_CODE_SCTP_IPV6;
 7023		break;
 7024	default:
 7025		return 0;
 7026	}
 7027
 7028	return 1;
 7029}
 7030
 7031static u64 niu_flowkey_to_ethflow(u64 flow_key)
 7032{
 7033	u64 ethflow = 0;
 7034
 7035	if (flow_key & FLOW_KEY_L2DA)
 7036		ethflow |= RXH_L2DA;
 7037	if (flow_key & FLOW_KEY_VLAN)
 7038		ethflow |= RXH_VLAN;
 7039	if (flow_key & FLOW_KEY_IPSA)
 7040		ethflow |= RXH_IP_SRC;
 7041	if (flow_key & FLOW_KEY_IPDA)
 7042		ethflow |= RXH_IP_DST;
 7043	if (flow_key & FLOW_KEY_PROTO)
 7044		ethflow |= RXH_L3_PROTO;
 7045	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
 7046		ethflow |= RXH_L4_B_0_1;
 7047	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
 7048		ethflow |= RXH_L4_B_2_3;
 7049
 7050	return ethflow;
 7051
 7052}
 7053
 7054static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
 7055{
 7056	u64 key = 0;
 7057
 7058	if (ethflow & RXH_L2DA)
 7059		key |= FLOW_KEY_L2DA;
 7060	if (ethflow & RXH_VLAN)
 7061		key |= FLOW_KEY_VLAN;
 7062	if (ethflow & RXH_IP_SRC)
 7063		key |= FLOW_KEY_IPSA;
 7064	if (ethflow & RXH_IP_DST)
 7065		key |= FLOW_KEY_IPDA;
 7066	if (ethflow & RXH_L3_PROTO)
 7067		key |= FLOW_KEY_PROTO;
 7068	if (ethflow & RXH_L4_B_0_1)
 7069		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
 7070	if (ethflow & RXH_L4_B_2_3)
 7071		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
 7072
 7073	*flow_key = key;
 7074
 7075	return 1;
 7076
 7077}
 7078
 7079static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7080{
 7081	u64 class;
 7082
 7083	nfc->data = 0;
 7084
 7085	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7086		return -EINVAL;
 7087
 7088	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7089	    TCAM_KEY_DISC)
 7090		nfc->data = RXH_DISCARD;
 7091	else
 7092		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
 7093						      CLASS_CODE_USER_PROG1]);
 7094	return 0;
 7095}
 7096
 7097static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
 7098					struct ethtool_rx_flow_spec *fsp)
 7099{
 7100	u32 tmp;
 7101	u16 prt;
 7102
 7103	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7104	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7105
 7106	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7107	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7108
 7109	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
 7110	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
 7111
 7112	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
 7113	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 7114
 7115	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
 7116		TCAM_V4KEY2_TOS_SHIFT;
 7117	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
 7118		TCAM_V4KEY2_TOS_SHIFT;
 7119
 7120	switch (fsp->flow_type) {
 7121	case TCP_V4_FLOW:
 7122	case UDP_V4_FLOW:
 7123	case SCTP_V4_FLOW:
 7124		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7125			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7126		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7127
 7128		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7129			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7130		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7131
 7132		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7133			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
 7134		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
 7135
 7136		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7137			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
 7138		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 7139		break;
 7140	case AH_V4_FLOW:
 7141	case ESP_V4_FLOW:
 7142		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7143			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7144		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7145
 7146		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7147			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7148		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 7149		break;
 7150	case IP_USER_FLOW:
 7151		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
 7152			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7153		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7154
 7155		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
 7156			TCAM_V4KEY2_PORT_SPI_SHIFT;
 7157		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 7158
 7159		fsp->h_u.usr_ip4_spec.proto =
 7160			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7161			TCAM_V4KEY2_PROTO_SHIFT;
 7162		fsp->m_u.usr_ip4_spec.proto =
 7163			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
 7164			TCAM_V4KEY2_PROTO_SHIFT;
 7165
 7166		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 7167		break;
 7168	default:
 7169		break;
 7170	}
 7171}
 7172
 7173static int niu_get_ethtool_tcam_entry(struct niu *np,
 7174				      struct ethtool_rxnfc *nfc)
 7175{
 7176	struct niu_parent *parent = np->parent;
 7177	struct niu_tcam_entry *tp;
 7178	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7179	u16 idx;
 7180	u64 class;
 7181	int ret = 0;
 7182
 7183	idx = tcam_get_index(np, (u16)nfc->fs.location);
 7184
 7185	tp = &parent->tcam[idx];
 7186	if (!tp->valid) {
 7187		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
 7188			    parent->index, (u16)nfc->fs.location, idx);
 7189		return -EINVAL;
 7190	}
 7191
 7192	/* fill the flow spec entry */
 7193	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7194		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7195	ret = niu_class_to_ethflow(class, &fsp->flow_type);
 7196	if (ret < 0) {
 7197		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 7198			    parent->index);
 7199		goto out;
 7200	}
 7201
 7202	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
 7203		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
 7204			TCAM_V4KEY2_PROTO_SHIFT;
 7205		if (proto == IPPROTO_ESP) {
 7206			if (fsp->flow_type == AH_V4_FLOW)
 7207				fsp->flow_type = ESP_V4_FLOW;
 7208			else
 7209				fsp->flow_type = ESP_V6_FLOW;
 7210		}
 7211	}
 7212
 7213	switch (fsp->flow_type) {
 7214	case TCP_V4_FLOW:
 7215	case UDP_V4_FLOW:
 7216	case SCTP_V4_FLOW:
 7217	case AH_V4_FLOW:
 7218	case ESP_V4_FLOW:
 7219		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7220		break;
 7221	case TCP_V6_FLOW:
 7222	case UDP_V6_FLOW:
 7223	case SCTP_V6_FLOW:
 7224	case AH_V6_FLOW:
 7225	case ESP_V6_FLOW:
 7226		/* Not yet implemented */
 7227		ret = -EINVAL;
 7228		break;
 7229	case IP_USER_FLOW:
 7230		niu_get_ip4fs_from_tcam_key(tp, fsp);
 7231		break;
 7232	default:
 7233		ret = -EINVAL;
 7234		break;
 7235	}
 7236
 7237	if (ret < 0)
 7238		goto out;
 7239
 7240	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
 7241		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 7242	else
 7243		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
 7244			TCAM_ASSOCDATA_OFFSET_SHIFT;
 7245
 7246	/* put the tcam size here */
 7247	nfc->data = tcam_get_size(np);
 7248out:
 7249	return ret;
 7250}
 7251
 7252static int niu_get_ethtool_tcam_all(struct niu *np,
 7253				    struct ethtool_rxnfc *nfc,
 7254				    u32 *rule_locs)
 7255{
 7256	struct niu_parent *parent = np->parent;
 7257	struct niu_tcam_entry *tp;
 7258	int i, idx, cnt;
 7259	unsigned long flags;
 7260	int ret = 0;
 7261
 7262	/* put the tcam size here */
 7263	nfc->data = tcam_get_size(np);
 7264
 7265	niu_lock_parent(np, flags);
 7266	for (cnt = 0, i = 0; i < nfc->data; i++) {
 7267		idx = tcam_get_index(np, i);
 7268		tp = &parent->tcam[idx];
 7269		if (!tp->valid)
 7270			continue;
 7271		if (cnt == nfc->rule_cnt) {
 7272			ret = -EMSGSIZE;
 7273			break;
 7274		}
 7275		rule_locs[cnt] = i;
 7276		cnt++;
 7277	}
 7278	niu_unlock_parent(np, flags);
 7279
 7280	nfc->rule_cnt = cnt;
 7281
 7282	return ret;
 7283}
 7284
 7285static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
 7286		       u32 *rule_locs)
 7287{
 7288	struct niu *np = netdev_priv(dev);
 7289	int ret = 0;
 7290
 7291	switch (cmd->cmd) {
 7292	case ETHTOOL_GRXFH:
 7293		ret = niu_get_hash_opts(np, cmd);
 7294		break;
 7295	case ETHTOOL_GRXRINGS:
 7296		cmd->data = np->num_rx_rings;
 7297		break;
 7298	case ETHTOOL_GRXCLSRLCNT:
 7299		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
 7300		break;
 7301	case ETHTOOL_GRXCLSRULE:
 7302		ret = niu_get_ethtool_tcam_entry(np, cmd);
 7303		break;
 7304	case ETHTOOL_GRXCLSRLALL:
 7305		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
 7306		break;
 7307	default:
 7308		ret = -EINVAL;
 7309		break;
 7310	}
 7311
 7312	return ret;
 7313}
 7314
 7315static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 7316{
 7317	u64 class;
 7318	u64 flow_key = 0;
 7319	unsigned long flags;
 7320
 7321	if (!niu_ethflow_to_class(nfc->flow_type, &class))
 7322		return -EINVAL;
 7323
 7324	if (class < CLASS_CODE_USER_PROG1 ||
 7325	    class > CLASS_CODE_SCTP_IPV6)
 7326		return -EINVAL;
 7327
 7328	if (nfc->data & RXH_DISCARD) {
 7329		niu_lock_parent(np, flags);
 7330		flow_key = np->parent->tcam_key[class -
 7331					       CLASS_CODE_USER_PROG1];
 7332		flow_key |= TCAM_KEY_DISC;
 7333		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7334		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7335		niu_unlock_parent(np, flags);
 7336		return 0;
 7337	} else {
 7338		/* Discard was set before, but is not set now */
 7339		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
 7340		    TCAM_KEY_DISC) {
 7341			niu_lock_parent(np, flags);
 7342			flow_key = np->parent->tcam_key[class -
 7343					       CLASS_CODE_USER_PROG1];
 7344			flow_key &= ~TCAM_KEY_DISC;
 7345			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
 7346			     flow_key);
 7347			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
 7348				flow_key;
 7349			niu_unlock_parent(np, flags);
 7350		}
 7351	}
 7352
 7353	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
 7354		return -EINVAL;
 7355
 7356	niu_lock_parent(np, flags);
 7357	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
 7358	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
 7359	niu_unlock_parent(np, flags);
 7360
 7361	return 0;
 7362}
 7363
 7364static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
 7365				       struct niu_tcam_entry *tp,
 7366				       int l2_rdc_tab, u64 class)
 7367{
 7368	u8 pid = 0;
 7369	u32 sip, dip, sipm, dipm, spi, spim;
 7370	u16 sport, dport, spm, dpm;
 7371
 7372	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
 7373	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
 7374	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
 7375	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
 7376
 7377	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7378	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
 7379	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
 7380	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
 7381
 7382	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
 7383	tp->key[3] |= dip;
 7384
 7385	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
 7386	tp->key_mask[3] |= dipm;
 7387
 7388	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
 7389		       TCAM_V4KEY2_TOS_SHIFT);
 7390	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
 7391			    TCAM_V4KEY2_TOS_SHIFT);
 7392	switch (fsp->flow_type) {
 7393	case TCP_V4_FLOW:
 7394	case UDP_V4_FLOW:
 7395	case SCTP_V4_FLOW:
 7396		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
 7397		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
 7398		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
 7399		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
 7400
 7401		tp->key[2] |= (((u64)sport << 16) | dport);
 7402		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
 7403		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7404		break;
 7405	case AH_V4_FLOW:
 7406	case ESP_V4_FLOW:
 7407		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
 7408		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
 7409
 7410		tp->key[2] |= spi;
 7411		tp->key_mask[2] |= spim;
 7412		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
 7413		break;
 7414	case IP_USER_FLOW:
 7415		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
 7416		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
 7417
 7418		tp->key[2] |= spi;
 7419		tp->key_mask[2] |= spim;
 7420		pid = fsp->h_u.usr_ip4_spec.proto;
 7421		break;
 7422	default:
 7423		break;
 7424	}
 7425
 7426	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
 7427	if (pid) {
 7428		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
 7429	}
 7430}
 7431
 7432static int niu_add_ethtool_tcam_entry(struct niu *np,
 7433				      struct ethtool_rxnfc *nfc)
 7434{
 7435	struct niu_parent *parent = np->parent;
 7436	struct niu_tcam_entry *tp;
 7437	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
 7438	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
 7439	int l2_rdc_table = rdc_table->first_table_num;
 7440	u16 idx;
 7441	u64 class;
 7442	unsigned long flags;
 7443	int err, ret;
 7444
 7445	ret = 0;
 7446
 7447	idx = nfc->fs.location;
 7448	if (idx >= tcam_get_size(np))
 7449		return -EINVAL;
 7450
 7451	if (fsp->flow_type == IP_USER_FLOW) {
 7452		int i;
 7453		int add_usr_cls = 0;
 7454		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
 7455		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
 7456
 7457		if (uspec->ip_ver != ETH_RX_NFC_IP4)
 7458			return -EINVAL;
 7459
 7460		niu_lock_parent(np, flags);
 7461
 7462		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7463			if (parent->l3_cls[i]) {
 7464				if (uspec->proto == parent->l3_cls_pid[i]) {
 7465					class = parent->l3_cls[i];
 7466					parent->l3_cls_refcnt[i]++;
 7467					add_usr_cls = 1;
 7468					break;
 7469				}
 7470			} else {
 7471				/* Program new user IP class */
 7472				switch (i) {
 7473				case 0:
 7474					class = CLASS_CODE_USER_PROG1;
 7475					break;
 7476				case 1:
 7477					class = CLASS_CODE_USER_PROG2;
 7478					break;
 7479				case 2:
 7480					class = CLASS_CODE_USER_PROG3;
 7481					break;
 7482				case 3:
 7483					class = CLASS_CODE_USER_PROG4;
 7484					break;
 7485				default:
 7486					break;
 7487				}
 7488				ret = tcam_user_ip_class_set(np, class, 0,
 7489							     uspec->proto,
 7490							     uspec->tos,
 7491							     umask->tos);
 7492				if (ret)
 7493					goto out;
 7494
 7495				ret = tcam_user_ip_class_enable(np, class, 1);
 7496				if (ret)
 7497					goto out;
 7498				parent->l3_cls[i] = class;
 7499				parent->l3_cls_pid[i] = uspec->proto;
 7500				parent->l3_cls_refcnt[i]++;
 7501				add_usr_cls = 1;
 7502				break;
 7503			}
 7504		}
 7505		if (!add_usr_cls) {
 7506			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
 7507				    parent->index, __func__, uspec->proto);
 7508			ret = -EINVAL;
 7509			goto out;
 7510		}
 7511		niu_unlock_parent(np, flags);
 7512	} else {
 7513		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
 7514			return -EINVAL;
 7515		}
 7516	}
 7517
 7518	niu_lock_parent(np, flags);
 7519
 7520	idx = tcam_get_index(np, idx);
 7521	tp = &parent->tcam[idx];
 7522
 7523	memset(tp, 0, sizeof(*tp));
 7524
 7525	/* fill in the tcam key and mask */
 7526	switch (fsp->flow_type) {
 7527	case TCP_V4_FLOW:
 7528	case UDP_V4_FLOW:
 7529	case SCTP_V4_FLOW:
 7530	case AH_V4_FLOW:
 7531	case ESP_V4_FLOW:
 7532		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7533		break;
 7534	case TCP_V6_FLOW:
 7535	case UDP_V6_FLOW:
 7536	case SCTP_V6_FLOW:
 7537	case AH_V6_FLOW:
 7538	case ESP_V6_FLOW:
 7539		/* Not yet implemented */
 7540		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
 7541			    parent->index, __func__, fsp->flow_type);
 7542		ret = -EINVAL;
 7543		goto out;
 7544	case IP_USER_FLOW:
 7545		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
 7546		break;
 7547	default:
 7548		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
 7549			    parent->index, __func__, fsp->flow_type);
 7550		ret = -EINVAL;
 7551		goto out;
 7552	}
 7553
 7554	/* fill in the assoc data */
 7555	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
 7556		tp->assoc_data = TCAM_ASSOCDATA_DISC;
 7557	} else {
 7558		if (fsp->ring_cookie >= np->num_rx_rings) {
 7559			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
 7560				    parent->index, __func__,
 7561				    (long long)fsp->ring_cookie);
 7562			ret = -EINVAL;
 7563			goto out;
 7564		}
 7565		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
 7566				  (fsp->ring_cookie <<
 7567				   TCAM_ASSOCDATA_OFFSET_SHIFT));
 7568	}
 7569
 7570	err = tcam_write(np, idx, tp->key, tp->key_mask);
 7571	if (err) {
 7572		ret = -EINVAL;
 7573		goto out;
 7574	}
 7575	err = tcam_assoc_write(np, idx, tp->assoc_data);
 7576	if (err) {
 7577		ret = -EINVAL;
 7578		goto out;
 7579	}
 7580
 7581	/* validate the entry */
 7582	tp->valid = 1;
 7583	np->clas.tcam_valid_entries++;
 7584out:
 7585	niu_unlock_parent(np, flags);
 7586
 7587	return ret;
 7588}
 7589
 7590static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
 7591{
 7592	struct niu_parent *parent = np->parent;
 7593	struct niu_tcam_entry *tp;
 7594	u16 idx;
 7595	unsigned long flags;
 7596	u64 class;
 7597	int ret = 0;
 7598
 7599	if (loc >= tcam_get_size(np))
 7600		return -EINVAL;
 7601
 7602	niu_lock_parent(np, flags);
 7603
 7604	idx = tcam_get_index(np, loc);
 7605	tp = &parent->tcam[idx];
 7606
 7607	/* if the entry is of a user defined class, then update*/
 7608	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 7609		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 7610
 7611	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
 7612		int i;
 7613		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
 7614			if (parent->l3_cls[i] == class) {
 7615				parent->l3_cls_refcnt[i]--;
 7616				if (!parent->l3_cls_refcnt[i]) {
 7617					/* disable class */
 7618					ret = tcam_user_ip_class_enable(np,
 7619									class,
 7620									0);
 7621					if (ret)
 7622						goto out;
 7623					parent->l3_cls[i] = 0;
 7624					parent->l3_cls_pid[i] = 0;
 7625				}
 7626				break;
 7627			}
 7628		}
 7629		if (i == NIU_L3_PROG_CLS) {
 7630			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
 7631				    parent->index, __func__,
 7632				    (unsigned long long)class);
 7633			ret = -EINVAL;
 7634			goto out;
 7635		}
 7636	}
 7637
 7638	ret = tcam_flush(np, idx);
 7639	if (ret)
 7640		goto out;
 7641
 7642	/* invalidate the entry */
 7643	tp->valid = 0;
 7644	np->clas.tcam_valid_entries--;
 7645out:
 7646	niu_unlock_parent(np, flags);
 7647
 7648	return ret;
 7649}
 7650
 7651static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 7652{
 7653	struct niu *np = netdev_priv(dev);
 7654	int ret = 0;
 7655
 7656	switch (cmd->cmd) {
 7657	case ETHTOOL_SRXFH:
 7658		ret = niu_set_hash_opts(np, cmd);
 7659		break;
 7660	case ETHTOOL_SRXCLSRLINS:
 7661		ret = niu_add_ethtool_tcam_entry(np, cmd);
 7662		break;
 7663	case ETHTOOL_SRXCLSRLDEL:
 7664		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
 7665		break;
 7666	default:
 7667		ret = -EINVAL;
 7668		break;
 7669	}
 7670
 7671	return ret;
 7672}
 7673
 7674static const struct {
 7675	const char string[ETH_GSTRING_LEN];
 7676} niu_xmac_stat_keys[] = {
 7677	{ "tx_frames" },
 7678	{ "tx_bytes" },
 7679	{ "tx_fifo_errors" },
 7680	{ "tx_overflow_errors" },
 7681	{ "tx_max_pkt_size_errors" },
 7682	{ "tx_underflow_errors" },
 7683	{ "rx_local_faults" },
 7684	{ "rx_remote_faults" },
 7685	{ "rx_link_faults" },
 7686	{ "rx_align_errors" },
 7687	{ "rx_frags" },
 7688	{ "rx_mcasts" },
 7689	{ "rx_bcasts" },
 7690	{ "rx_hist_cnt1" },
 7691	{ "rx_hist_cnt2" },
 7692	{ "rx_hist_cnt3" },
 7693	{ "rx_hist_cnt4" },
 7694	{ "rx_hist_cnt5" },
 7695	{ "rx_hist_cnt6" },
 7696	{ "rx_hist_cnt7" },
 7697	{ "rx_octets" },
 7698	{ "rx_code_violations" },
 7699	{ "rx_len_errors" },
 7700	{ "rx_crc_errors" },
 7701	{ "rx_underflows" },
 7702	{ "rx_overflows" },
 7703	{ "pause_off_state" },
 7704	{ "pause_on_state" },
 7705	{ "pause_received" },
 7706};
 7707
 7708#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
 7709
 7710static const struct {
 7711	const char string[ETH_GSTRING_LEN];
 7712} niu_bmac_stat_keys[] = {
 7713	{ "tx_underflow_errors" },
 7714	{ "tx_max_pkt_size_errors" },
 7715	{ "tx_bytes" },
 7716	{ "tx_frames" },
 7717	{ "rx_overflows" },
 7718	{ "rx_frames" },
 7719	{ "rx_align_errors" },
 7720	{ "rx_crc_errors" },
 7721	{ "rx_len_errors" },
 7722	{ "pause_off_state" },
 7723	{ "pause_on_state" },
 7724	{ "pause_received" },
 7725};
 7726
 7727#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
 7728
 7729static const struct {
 7730	const char string[ETH_GSTRING_LEN];
 7731} niu_rxchan_stat_keys[] = {
 7732	{ "rx_channel" },
 7733	{ "rx_packets" },
 7734	{ "rx_bytes" },
 7735	{ "rx_dropped" },
 7736	{ "rx_errors" },
 7737};
 7738
 7739#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
 7740
 7741static const struct {
 7742	const char string[ETH_GSTRING_LEN];
 7743} niu_txchan_stat_keys[] = {
 7744	{ "tx_channel" },
 7745	{ "tx_packets" },
 7746	{ "tx_bytes" },
 7747	{ "tx_errors" },
 7748};
 7749
 7750#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
 7751
 7752static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 7753{
 7754	struct niu *np = netdev_priv(dev);
 7755	int i;
 7756
 7757	if (stringset != ETH_SS_STATS)
 7758		return;
 7759
 7760	if (np->flags & NIU_FLAGS_XMAC) {
 7761		memcpy(data, niu_xmac_stat_keys,
 7762		       sizeof(niu_xmac_stat_keys));
 7763		data += sizeof(niu_xmac_stat_keys);
 7764	} else {
 7765		memcpy(data, niu_bmac_stat_keys,
 7766		       sizeof(niu_bmac_stat_keys));
 7767		data += sizeof(niu_bmac_stat_keys);
 7768	}
 7769	for (i = 0; i < np->num_rx_rings; i++) {
 7770		memcpy(data, niu_rxchan_stat_keys,
 7771		       sizeof(niu_rxchan_stat_keys));
 7772		data += sizeof(niu_rxchan_stat_keys);
 7773	}
 7774	for (i = 0; i < np->num_tx_rings; i++) {
 7775		memcpy(data, niu_txchan_stat_keys,
 7776		       sizeof(niu_txchan_stat_keys));
 7777		data += sizeof(niu_txchan_stat_keys);
 7778	}
 7779}
 7780
 7781static int niu_get_sset_count(struct net_device *dev, int stringset)
 7782{
 7783	struct niu *np = netdev_priv(dev);
 7784
 7785	if (stringset != ETH_SS_STATS)
 7786		return -EINVAL;
 7787
 7788	return (np->flags & NIU_FLAGS_XMAC ?
 7789		 NUM_XMAC_STAT_KEYS :
 7790		 NUM_BMAC_STAT_KEYS) +
 7791		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
 7792		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
 7793}
 7794
 7795static void niu_get_ethtool_stats(struct net_device *dev,
 7796				  struct ethtool_stats *stats, u64 *data)
 7797{
 7798	struct niu *np = netdev_priv(dev);
 7799	int i;
 7800
 7801	niu_sync_mac_stats(np);
 7802	if (np->flags & NIU_FLAGS_XMAC) {
 7803		memcpy(data, &np->mac_stats.xmac,
 7804		       sizeof(struct niu_xmac_stats));
 7805		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
 7806	} else {
 7807		memcpy(data, &np->mac_stats.bmac,
 7808		       sizeof(struct niu_bmac_stats));
 7809		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
 7810	}
 7811	for (i = 0; i < np->num_rx_rings; i++) {
 7812		struct rx_ring_info *rp = &np->rx_rings[i];
 7813
 7814		niu_sync_rx_discard_stats(np, rp, 0);
 7815
 7816		data[0] = rp->rx_channel;
 7817		data[1] = rp->rx_packets;
 7818		data[2] = rp->rx_bytes;
 7819		data[3] = rp->rx_dropped;
 7820		data[4] = rp->rx_errors;
 7821		data += 5;
 7822	}
 7823	for (i = 0; i < np->num_tx_rings; i++) {
 7824		struct tx_ring_info *rp = &np->tx_rings[i];
 7825
 7826		data[0] = rp->tx_channel;
 7827		data[1] = rp->tx_packets;
 7828		data[2] = rp->tx_bytes;
 7829		data[3] = rp->tx_errors;
 7830		data += 4;
 7831	}
 7832}
 7833
 7834static u64 niu_led_state_save(struct niu *np)
 7835{
 7836	if (np->flags & NIU_FLAGS_XMAC)
 7837		return nr64_mac(XMAC_CONFIG);
 7838	else
 7839		return nr64_mac(BMAC_XIF_CONFIG);
 7840}
 7841
 7842static void niu_led_state_restore(struct niu *np, u64 val)
 7843{
 7844	if (np->flags & NIU_FLAGS_XMAC)
 7845		nw64_mac(XMAC_CONFIG, val);
 7846	else
 7847		nw64_mac(BMAC_XIF_CONFIG, val);
 7848}
 7849
 7850static void niu_force_led(struct niu *np, int on)
 7851{
 7852	u64 val, reg, bit;
 7853
 7854	if (np->flags & NIU_FLAGS_XMAC) {
 7855		reg = XMAC_CONFIG;
 7856		bit = XMAC_CONFIG_FORCE_LED_ON;
 7857	} else {
 7858		reg = BMAC_XIF_CONFIG;
 7859		bit = BMAC_XIF_CONFIG_LINK_LED;
 7860	}
 7861
 7862	val = nr64_mac(reg);
 7863	if (on)
 7864		val |= bit;
 7865	else
 7866		val &= ~bit;
 7867	nw64_mac(reg, val);
 7868}
 7869
 7870static int niu_set_phys_id(struct net_device *dev,
 7871			   enum ethtool_phys_id_state state)
 7872
 7873{
 7874	struct niu *np = netdev_priv(dev);
 7875
 7876	if (!netif_running(dev))
 7877		return -EAGAIN;
 7878
 7879	switch (state) {
 7880	case ETHTOOL_ID_ACTIVE:
 7881		np->orig_led_state = niu_led_state_save(np);
 7882		return 1;	/* cycle on/off once per second */
 7883
 7884	case ETHTOOL_ID_ON:
 7885		niu_force_led(np, 1);
 7886		break;
 7887
 7888	case ETHTOOL_ID_OFF:
 7889		niu_force_led(np, 0);
 7890		break;
 7891
 7892	case ETHTOOL_ID_INACTIVE:
 7893		niu_led_state_restore(np, np->orig_led_state);
 7894	}
 7895
 7896	return 0;
 7897}
 7898
 7899static const struct ethtool_ops niu_ethtool_ops = {
 7900	.get_drvinfo		= niu_get_drvinfo,
 7901	.get_link		= ethtool_op_get_link,
 7902	.get_msglevel		= niu_get_msglevel,
 7903	.set_msglevel		= niu_set_msglevel,
 7904	.nway_reset		= niu_nway_reset,
 7905	.get_eeprom_len		= niu_get_eeprom_len,
 7906	.get_eeprom		= niu_get_eeprom,
 7907	.get_settings		= niu_get_settings,
 7908	.set_settings		= niu_set_settings,
 7909	.get_strings		= niu_get_strings,
 7910	.get_sset_count		= niu_get_sset_count,
 7911	.get_ethtool_stats	= niu_get_ethtool_stats,
 7912	.set_phys_id		= niu_set_phys_id,
 7913	.get_rxnfc		= niu_get_nfc,
 7914	.set_rxnfc		= niu_set_nfc,
 7915};
 7916
 7917static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
 7918			      int ldg, int ldn)
 7919{
 7920	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
 7921		return -EINVAL;
 7922	if (ldn < 0 || ldn > LDN_MAX)
 7923		return -EINVAL;
 7924
 7925	parent->ldg_map[ldn] = ldg;
 7926
 7927	if (np->parent->plat_type == PLAT_TYPE_NIU) {
 7928		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
 7929		 * the firmware, and we're not supposed to change them.
 7930		 * Validate the mapping, because if it's wrong we probably
 7931		 * won't get any interrupts and that's painful to debug.
 7932		 */
 7933		if (nr64(LDG_NUM(ldn)) != ldg) {
 7934			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
 7935				np->port, ldn, ldg,
 7936				(unsigned long long) nr64(LDG_NUM(ldn)));
 7937			return -EINVAL;
 7938		}
 7939	} else
 7940		nw64(LDG_NUM(ldn), ldg);
 7941
 7942	return 0;
 7943}
 7944
 7945static int niu_set_ldg_timer_res(struct niu *np, int res)
 7946{
 7947	if (res < 0 || res > LDG_TIMER_RES_VAL)
 7948		return -EINVAL;
 7949
 7950
 7951	nw64(LDG_TIMER_RES, res);
 7952
 7953	return 0;
 7954}
 7955
 7956static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
 7957{
 7958	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
 7959	    (func < 0 || func > 3) ||
 7960	    (vector < 0 || vector > 0x1f))
 7961		return -EINVAL;
 7962
 7963	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
 7964
 7965	return 0;
 7966}
 7967
 7968static int niu_pci_eeprom_read(struct niu *np, u32 addr)
 7969{
 7970	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
 7971				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
 7972	int limit;
 7973
 7974	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
 7975		return -EINVAL;
 7976
 7977	frame = frame_base;
 7978	nw64(ESPC_PIO_STAT, frame);
 7979	limit = 64;
 7980	do {
 7981		udelay(5);
 7982		frame = nr64(ESPC_PIO_STAT);
 7983		if (frame & ESPC_PIO_STAT_READ_END)
 7984			break;
 7985	} while (limit--);
 7986	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 7987		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 7988			(unsigned long long) frame);
 7989		return -ENODEV;
 7990	}
 7991
 7992	frame = frame_base;
 7993	nw64(ESPC_PIO_STAT, frame);
 7994	limit = 64;
 7995	do {
 7996		udelay(5);
 7997		frame = nr64(ESPC_PIO_STAT);
 7998		if (frame & ESPC_PIO_STAT_READ_END)
 7999			break;
 8000	} while (limit--);
 8001	if (!(frame & ESPC_PIO_STAT_READ_END)) {
 8002		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
 8003			(unsigned long long) frame);
 8004		return -ENODEV;
 8005	}
 8006
 8007	frame = nr64(ESPC_PIO_STAT);
 8008	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
 8009}
 8010
 8011static int niu_pci_eeprom_read16(struct niu *np, u32 off)
 8012{
 8013	int err = niu_pci_eeprom_read(np, off);
 8014	u16 val;
 8015
 8016	if (err < 0)
 8017		return err;
 8018	val = (err << 8);
 8019	err = niu_pci_eeprom_read(np, off + 1);
 8020	if (err < 0)
 8021		return err;
 8022	val |= (err & 0xff);
 8023
 8024	return val;
 8025}
 8026
 8027static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
 8028{
 8029	int err = niu_pci_eeprom_read(np, off);
 8030	u16 val;
 8031
 8032	if (err < 0)
 8033		return err;
 8034
 8035	val = (err & 0xff);
 8036	err = niu_pci_eeprom_read(np, off + 1);
 8037	if (err < 0)
 8038		return err;
 8039
 8040	val |= (err & 0xff) << 8;
 8041
 8042	return val;
 8043}
 8044
 8045static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
 8046				    int namebuf_len)
 8047{
 8048	int i;
 8049
 8050	for (i = 0; i < namebuf_len; i++) {
 8051		int err = niu_pci_eeprom_read(np, off + i);
 8052		if (err < 0)
 8053			return err;
 8054		*namebuf++ = err;
 8055		if (!err)
 8056			break;
 8057	}
 8058	if (i >= namebuf_len)
 8059		return -EINVAL;
 8060
 8061	return i + 1;
 8062}
 8063
 8064static void niu_vpd_parse_version(struct niu *np)
 8065{
 8066	struct niu_vpd *vpd = &np->vpd;
 8067	int len = strlen(vpd->version) + 1;
 8068	const char *s = vpd->version;
 8069	int i;
 8070
 8071	for (i = 0; i < len - 5; i++) {
 8072		if (!strncmp(s + i, "FCode ", 6))
 8073			break;
 8074	}
 8075	if (i >= len - 5)
 8076		return;
 8077
 8078	s += i + 5;
 8079	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
 8080
 8081	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8082		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
 8083		     vpd->fcode_major, vpd->fcode_minor);
 8084	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
 8085	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
 8086	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
 8087		np->flags |= NIU_FLAGS_VPD_VALID;
 8088}
 8089
 8090/* ESPC_PIO_EN_ENABLE must be set */
 8091static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
 8092{
 8093	unsigned int found_mask = 0;
 8094#define FOUND_MASK_MODEL	0x00000001
 8095#define FOUND_MASK_BMODEL	0x00000002
 8096#define FOUND_MASK_VERS		0x00000004
 8097#define FOUND_MASK_MAC		0x00000008
 8098#define FOUND_MASK_NMAC		0x00000010
 8099#define FOUND_MASK_PHY		0x00000020
 8100#define FOUND_MASK_ALL		0x0000003f
 8101
 8102	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8103		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
 8104	while (start < end) {
 8105		int len, err, prop_len;
 8106		char namebuf[64];
 8107		u8 *prop_buf;
 8108		int max_len;
 8109
 8110		if (found_mask == FOUND_MASK_ALL) {
 8111			niu_vpd_parse_version(np);
 8112			return 1;
 8113		}
 8114
 8115		err = niu_pci_eeprom_read(np, start + 2);
 8116		if (err < 0)
 8117			return err;
 8118		len = err;
 8119		start += 3;
 8120
 8121		prop_len = niu_pci_eeprom_read(np, start + 4);
 8122		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
 8123		if (err < 0)
 8124			return err;
 8125
 8126		prop_buf = NULL;
 8127		max_len = 0;
 8128		if (!strcmp(namebuf, "model")) {
 8129			prop_buf = np->vpd.model;
 8130			max_len = NIU_VPD_MODEL_MAX;
 8131			found_mask |= FOUND_MASK_MODEL;
 8132		} else if (!strcmp(namebuf, "board-model")) {
 8133			prop_buf = np->vpd.board_model;
 8134			max_len = NIU_VPD_BD_MODEL_MAX;
 8135			found_mask |= FOUND_MASK_BMODEL;
 8136		} else if (!strcmp(namebuf, "version")) {
 8137			prop_buf = np->vpd.version;
 8138			max_len = NIU_VPD_VERSION_MAX;
 8139			found_mask |= FOUND_MASK_VERS;
 8140		} else if (!strcmp(namebuf, "local-mac-address")) {
 8141			prop_buf = np->vpd.local_mac;
 8142			max_len = ETH_ALEN;
 8143			found_mask |= FOUND_MASK_MAC;
 8144		} else if (!strcmp(namebuf, "num-mac-addresses")) {
 8145			prop_buf = &np->vpd.mac_num;
 8146			max_len = 1;
 8147			found_mask |= FOUND_MASK_NMAC;
 8148		} else if (!strcmp(namebuf, "phy-type")) {
 8149			prop_buf = np->vpd.phy_type;
 8150			max_len = NIU_VPD_PHY_TYPE_MAX;
 8151			found_mask |= FOUND_MASK_PHY;
 8152		}
 8153
 8154		if (max_len && prop_len > max_len) {
 8155			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
 8156			return -EINVAL;
 8157		}
 8158
 8159		if (prop_buf) {
 8160			u32 off = start + 5 + err;
 8161			int i;
 8162
 8163			netif_printk(np, probe, KERN_DEBUG, np->dev,
 8164				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
 8165				     namebuf, prop_len);
 8166			for (i = 0; i < prop_len; i++)
 8167				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
 8168		}
 8169
 8170		start += len;
 8171	}
 8172
 8173	return 0;
 8174}
 8175
 8176/* ESPC_PIO_EN_ENABLE must be set */
 8177static void niu_pci_vpd_fetch(struct niu *np, u32 start)
 8178{
 8179	u32 offset;
 8180	int err;
 8181
 8182	err = niu_pci_eeprom_read16_swp(np, start + 1);
 8183	if (err < 0)
 8184		return;
 8185
 8186	offset = err + 3;
 8187
 8188	while (start + offset < ESPC_EEPROM_SIZE) {
 8189		u32 here = start + offset;
 8190		u32 end;
 8191
 8192		err = niu_pci_eeprom_read(np, here);
 8193		if (err != 0x90)
 8194			return;
 8195
 8196		err = niu_pci_eeprom_read16_swp(np, here + 1);
 8197		if (err < 0)
 8198			return;
 8199
 8200		here = start + offset + 3;
 8201		end = start + offset + err;
 8202
 8203		offset += err;
 8204
 8205		err = niu_pci_vpd_scan_props(np, here, end);
 8206		if (err < 0 || err == 1)
 8207			return;
 8208	}
 8209}
 8210
 8211/* ESPC_PIO_EN_ENABLE must be set */
 8212static u32 niu_pci_vpd_offset(struct niu *np)
 8213{
 8214	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
 8215	int err;
 8216
 8217	while (start < end) {
 8218		ret = start;
 8219
 8220		/* ROM header signature?  */
 8221		err = niu_pci_eeprom_read16(np, start +  0);
 8222		if (err != 0x55aa)
 8223			return 0;
 8224
 8225		/* Apply offset to PCI data structure.  */
 8226		err = niu_pci_eeprom_read16(np, start + 23);
 8227		if (err < 0)
 8228			return 0;
 8229		start += err;
 8230
 8231		/* Check for "PCIR" signature.  */
 8232		err = niu_pci_eeprom_read16(np, start +  0);
 8233		if (err != 0x5043)
 8234			return 0;
 8235		err = niu_pci_eeprom_read16(np, start +  2);
 8236		if (err != 0x4952)
 8237			return 0;
 8238
 8239		/* Check for OBP image type.  */
 8240		err = niu_pci_eeprom_read(np, start + 20);
 8241		if (err < 0)
 8242			return 0;
 8243		if (err != 0x01) {
 8244			err = niu_pci_eeprom_read(np, ret + 2);
 8245			if (err < 0)
 8246				return 0;
 8247
 8248			start = ret + (err * 512);
 8249			continue;
 8250		}
 8251
 8252		err = niu_pci_eeprom_read16_swp(np, start + 8);
 8253		if (err < 0)
 8254			return err;
 8255		ret += err;
 8256
 8257		err = niu_pci_eeprom_read(np, ret + 0);
 8258		if (err != 0x82)
 8259			return 0;
 8260
 8261		return ret;
 8262	}
 8263
 8264	return 0;
 8265}
 8266
 8267static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
 8268{
 8269	if (!strcmp(phy_prop, "mif")) {
 8270		/* 1G copper, MII */
 8271		np->flags &= ~(NIU_FLAGS_FIBER |
 8272			       NIU_FLAGS_10G);
 8273		np->mac_xcvr = MAC_XCVR_MII;
 8274	} else if (!strcmp(phy_prop, "xgf")) {
 8275		/* 10G fiber, XPCS */
 8276		np->flags |= (NIU_FLAGS_10G |
 8277			      NIU_FLAGS_FIBER);
 8278		np->mac_xcvr = MAC_XCVR_XPCS;
 8279	} else if (!strcmp(phy_prop, "pcs")) {
 8280		/* 1G fiber, PCS */
 8281		np->flags &= ~NIU_FLAGS_10G;
 8282		np->flags |= NIU_FLAGS_FIBER;
 8283		np->mac_xcvr = MAC_XCVR_PCS;
 8284	} else if (!strcmp(phy_prop, "xgc")) {
 8285		/* 10G copper, XPCS */
 8286		np->flags |= NIU_FLAGS_10G;
 8287		np->flags &= ~NIU_FLAGS_FIBER;
 8288		np->mac_xcvr = MAC_XCVR_XPCS;
 8289	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
 8290		/* 10G Serdes or 1G Serdes, default to 10G */
 8291		np->flags |= NIU_FLAGS_10G;
 8292		np->flags &= ~NIU_FLAGS_FIBER;
 8293		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8294		np->mac_xcvr = MAC_XCVR_XPCS;
 8295	} else {
 8296		return -EINVAL;
 8297	}
 8298	return 0;
 8299}
 8300
 8301static int niu_pci_vpd_get_nports(struct niu *np)
 8302{
 8303	int ports = 0;
 8304
 8305	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
 8306	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
 8307	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
 8308	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
 8309	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 8310		ports = 4;
 8311	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
 8312		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
 8313		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
 8314		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 8315		ports = 2;
 8316	}
 8317
 8318	return ports;
 8319}
 8320
 8321static void niu_pci_vpd_validate(struct niu *np)
 8322{
 8323	struct net_device *dev = np->dev;
 8324	struct niu_vpd *vpd = &np->vpd;
 8325	u8 val8;
 8326
 8327	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
 8328		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
 8329
 8330		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8331		return;
 8332	}
 8333
 8334	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8335	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8336		np->flags |= NIU_FLAGS_10G;
 8337		np->flags &= ~NIU_FLAGS_FIBER;
 8338		np->flags |= NIU_FLAGS_XCVR_SERDES;
 8339		np->mac_xcvr = MAC_XCVR_PCS;
 8340		if (np->port > 1) {
 8341			np->flags |= NIU_FLAGS_FIBER;
 8342			np->flags &= ~NIU_FLAGS_10G;
 8343		}
 8344		if (np->flags & NIU_FLAGS_10G)
 8345			np->mac_xcvr = MAC_XCVR_XPCS;
 8346	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8347		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 8348			      NIU_FLAGS_HOTPLUG_PHY);
 8349	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 8350		dev_err(np->device, "Illegal phy string [%s]\n",
 8351			np->vpd.phy_type);
 8352		dev_err(np->device, "Falling back to SPROM\n");
 8353		np->flags &= ~NIU_FLAGS_VPD_VALID;
 8354		return;
 8355	}
 8356
 8357	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
 8358
 8359	val8 = dev->dev_addr[5];
 8360	dev->dev_addr[5] += np->port;
 8361	if (dev->dev_addr[5] < val8)
 8362		dev->dev_addr[4]++;
 8363}
 8364
 8365static int niu_pci_probe_sprom(struct niu *np)
 8366{
 8367	struct net_device *dev = np->dev;
 8368	int len, i;
 8369	u64 val, sum;
 8370	u8 val8;
 8371
 8372	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
 8373	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
 8374	len = val / 4;
 8375
 8376	np->eeprom_len = len;
 8377
 8378	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8379		     "SPROM: Image size %llu\n", (unsigned long long)val);
 8380
 8381	sum = 0;
 8382	for (i = 0; i < len; i++) {
 8383		val = nr64(ESPC_NCR(i));
 8384		sum += (val >>  0) & 0xff;
 8385		sum += (val >>  8) & 0xff;
 8386		sum += (val >> 16) & 0xff;
 8387		sum += (val >> 24) & 0xff;
 8388	}
 8389	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8390		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
 8391	if ((sum & 0xff) != 0xab) {
 8392		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
 8393		return -EINVAL;
 8394	}
 8395
 8396	val = nr64(ESPC_PHY_TYPE);
 8397	switch (np->port) {
 8398	case 0:
 8399		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
 8400			ESPC_PHY_TYPE_PORT0_SHIFT;
 8401		break;
 8402	case 1:
 8403		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
 8404			ESPC_PHY_TYPE_PORT1_SHIFT;
 8405		break;
 8406	case 2:
 8407		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
 8408			ESPC_PHY_TYPE_PORT2_SHIFT;
 8409		break;
 8410	case 3:
 8411		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
 8412			ESPC_PHY_TYPE_PORT3_SHIFT;
 8413		break;
 8414	default:
 8415		dev_err(np->device, "Bogus port number %u\n",
 8416			np->port);
 8417		return -EINVAL;
 8418	}
 8419	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8420		     "SPROM: PHY type %x\n", val8);
 8421
 8422	switch (val8) {
 8423	case ESPC_PHY_TYPE_1G_COPPER:
 8424		/* 1G copper, MII */
 8425		np->flags &= ~(NIU_FLAGS_FIBER |
 8426			       NIU_FLAGS_10G);
 8427		np->mac_xcvr = MAC_XCVR_MII;
 8428		break;
 8429
 8430	case ESPC_PHY_TYPE_1G_FIBER:
 8431		/* 1G fiber, PCS */
 8432		np->flags &= ~NIU_FLAGS_10G;
 8433		np->flags |= NIU_FLAGS_FIBER;
 8434		np->mac_xcvr = MAC_XCVR_PCS;
 8435		break;
 8436
 8437	case ESPC_PHY_TYPE_10G_COPPER:
 8438		/* 10G copper, XPCS */
 8439		np->flags |= NIU_FLAGS_10G;
 8440		np->flags &= ~NIU_FLAGS_FIBER;
 8441		np->mac_xcvr = MAC_XCVR_XPCS;
 8442		break;
 8443
 8444	case ESPC_PHY_TYPE_10G_FIBER:
 8445		/* 10G fiber, XPCS */
 8446		np->flags |= (NIU_FLAGS_10G |
 8447			      NIU_FLAGS_FIBER);
 8448		np->mac_xcvr = MAC_XCVR_XPCS;
 8449		break;
 8450
 8451	default:
 8452		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
 8453		return -EINVAL;
 8454	}
 8455
 8456	val = nr64(ESPC_MAC_ADDR0);
 8457	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8458		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
 8459	dev->dev_addr[0] = (val >>  0) & 0xff;
 8460	dev->dev_addr[1] = (val >>  8) & 0xff;
 8461	dev->dev_addr[2] = (val >> 16) & 0xff;
 8462	dev->dev_addr[3] = (val >> 24) & 0xff;
 8463
 8464	val = nr64(ESPC_MAC_ADDR1);
 8465	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8466		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
 8467	dev->dev_addr[4] = (val >>  0) & 0xff;
 8468	dev->dev_addr[5] = (val >>  8) & 0xff;
 8469
 8470	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 8471		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
 8472			dev->dev_addr);
 8473		return -EINVAL;
 8474	}
 8475
 8476	val8 = dev->dev_addr[5];
 8477	dev->dev_addr[5] += np->port;
 8478	if (dev->dev_addr[5] < val8)
 8479		dev->dev_addr[4]++;
 8480
 8481	val = nr64(ESPC_MOD_STR_LEN);
 8482	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8483		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8484	if (val >= 8 * 4)
 8485		return -EINVAL;
 8486
 8487	for (i = 0; i < val; i += 4) {
 8488		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
 8489
 8490		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
 8491		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
 8492		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
 8493		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
 8494	}
 8495	np->vpd.model[val] = '\0';
 8496
 8497	val = nr64(ESPC_BD_MOD_STR_LEN);
 8498	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8499		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
 8500	if (val >= 4 * 4)
 8501		return -EINVAL;
 8502
 8503	for (i = 0; i < val; i += 4) {
 8504		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
 8505
 8506		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
 8507		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
 8508		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
 8509		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
 8510	}
 8511	np->vpd.board_model[val] = '\0';
 8512
 8513	np->vpd.mac_num =
 8514		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
 8515	netif_printk(np, probe, KERN_DEBUG, np->dev,
 8516		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
 8517
 8518	return 0;
 8519}
 8520
 8521static int niu_get_and_validate_port(struct niu *np)
 8522{
 8523	struct niu_parent *parent = np->parent;
 8524
 8525	if (np->port <= 1)
 8526		np->flags |= NIU_FLAGS_XMAC;
 8527
 8528	if (!parent->num_ports) {
 8529		if (parent->plat_type == PLAT_TYPE_NIU) {
 8530			parent->num_ports = 2;
 8531		} else {
 8532			parent->num_ports = niu_pci_vpd_get_nports(np);
 8533			if (!parent->num_ports) {
 8534				/* Fall back to SPROM as last resort.
 8535				 * This will fail on most cards.
 8536				 */
 8537				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
 8538					ESPC_NUM_PORTS_MACS_VAL;
 8539
 8540				/* All of the current probing methods fail on
 8541				 * Maramba on-board parts.
 8542				 */
 8543				if (!parent->num_ports)
 8544					parent->num_ports = 4;
 8545			}
 8546		}
 8547	}
 8548
 8549	if (np->port >= parent->num_ports)
 8550		return -ENODEV;
 8551
 8552	return 0;
 8553}
 8554
 8555static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
 8556		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
 8557{
 8558	u32 id = (dev_id_1 << 16) | dev_id_2;
 8559	u8 idx;
 8560
 8561	if (dev_id_1 < 0 || dev_id_2 < 0)
 8562		return 0;
 8563	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
 8564		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
 8565		 * test covers the 8706 as well.
 8566		 */
 8567		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
 8568		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 8569			return 0;
 8570	} else {
 8571		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
 8572			return 0;
 8573	}
 8574
 8575	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
 8576		parent->index, id,
 8577		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
 8578		type == PHY_TYPE_PCS ? "PCS" : "MII",
 8579		phy_port);
 8580
 8581	if (p->cur[type] >= NIU_MAX_PORTS) {
 8582		pr_err("Too many PHY ports\n");
 8583		return -EINVAL;
 8584	}
 8585	idx = p->cur[type];
 8586	p->phy_id[type][idx] = id;
 8587	p->phy_port[type][idx] = phy_port;
 8588	p->cur[type] = idx + 1;
 8589	return 0;
 8590}
 8591
 8592static int port_has_10g(struct phy_probe_info *p, int port)
 8593{
 8594	int i;
 8595
 8596	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
 8597		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
 8598			return 1;
 8599	}
 8600	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
 8601		if (p->phy_port[PHY_TYPE_PCS][i] == port)
 8602			return 1;
 8603	}
 8604
 8605	return 0;
 8606}
 8607
 8608static int count_10g_ports(struct phy_probe_info *p, int *lowest)
 8609{
 8610	int port, cnt;
 8611
 8612	cnt = 0;
 8613	*lowest = 32;
 8614	for (port = 8; port < 32; port++) {
 8615		if (port_has_10g(p, port)) {
 8616			if (!cnt)
 8617				*lowest = port;
 8618			cnt++;
 8619		}
 8620	}
 8621
 8622	return cnt;
 8623}
 8624
 8625static int count_1g_ports(struct phy_probe_info *p, int *lowest)
 8626{
 8627	*lowest = 32;
 8628	if (p->cur[PHY_TYPE_MII])
 8629		*lowest = p->phy_port[PHY_TYPE_MII][0];
 8630
 8631	return p->cur[PHY_TYPE_MII];
 8632}
 8633
 8634static void niu_n2_divide_channels(struct niu_parent *parent)
 8635{
 8636	int num_ports = parent->num_ports;
 8637	int i;
 8638
 8639	for (i = 0; i < num_ports; i++) {
 8640		parent->rxchan_per_port[i] = (16 / num_ports);
 8641		parent->txchan_per_port[i] = (16 / num_ports);
 8642
 8643		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8644			parent->index, i,
 8645			parent->rxchan_per_port[i],
 8646			parent->txchan_per_port[i]);
 8647	}
 8648}
 8649
 8650static void niu_divide_channels(struct niu_parent *parent,
 8651				int num_10g, int num_1g)
 8652{
 8653	int num_ports = parent->num_ports;
 8654	int rx_chans_per_10g, rx_chans_per_1g;
 8655	int tx_chans_per_10g, tx_chans_per_1g;
 8656	int i, tot_rx, tot_tx;
 8657
 8658	if (!num_10g || !num_1g) {
 8659		rx_chans_per_10g = rx_chans_per_1g =
 8660			(NIU_NUM_RXCHAN / num_ports);
 8661		tx_chans_per_10g = tx_chans_per_1g =
 8662			(NIU_NUM_TXCHAN / num_ports);
 8663	} else {
 8664		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
 8665		rx_chans_per_10g = (NIU_NUM_RXCHAN -
 8666				    (rx_chans_per_1g * num_1g)) /
 8667			num_10g;
 8668
 8669		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
 8670		tx_chans_per_10g = (NIU_NUM_TXCHAN -
 8671				    (tx_chans_per_1g * num_1g)) /
 8672			num_10g;
 8673	}
 8674
 8675	tot_rx = tot_tx = 0;
 8676	for (i = 0; i < num_ports; i++) {
 8677		int type = phy_decode(parent->port_phy, i);
 8678
 8679		if (type == PORT_TYPE_10G) {
 8680			parent->rxchan_per_port[i] = rx_chans_per_10g;
 8681			parent->txchan_per_port[i] = tx_chans_per_10g;
 8682		} else {
 8683			parent->rxchan_per_port[i] = rx_chans_per_1g;
 8684			parent->txchan_per_port[i] = tx_chans_per_1g;
 8685		}
 8686		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
 8687			parent->index, i,
 8688			parent->rxchan_per_port[i],
 8689			parent->txchan_per_port[i]);
 8690		tot_rx += parent->rxchan_per_port[i];
 8691		tot_tx += parent->txchan_per_port[i];
 8692	}
 8693
 8694	if (tot_rx > NIU_NUM_RXCHAN) {
 8695		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
 8696		       parent->index, tot_rx);
 8697		for (i = 0; i < num_ports; i++)
 8698			parent->rxchan_per_port[i] = 1;
 8699	}
 8700	if (tot_tx > NIU_NUM_TXCHAN) {
 8701		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
 8702		       parent->index, tot_tx);
 8703		for (i = 0; i < num_ports; i++)
 8704			parent->txchan_per_port[i] = 1;
 8705	}
 8706	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
 8707		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
 8708			parent->index, tot_rx, tot_tx);
 8709	}
 8710}
 8711
 8712static void niu_divide_rdc_groups(struct niu_parent *parent,
 8713				  int num_10g, int num_1g)
 8714{
 8715	int i, num_ports = parent->num_ports;
 8716	int rdc_group, rdc_groups_per_port;
 8717	int rdc_channel_base;
 8718
 8719	rdc_group = 0;
 8720	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
 8721
 8722	rdc_channel_base = 0;
 8723
 8724	for (i = 0; i < num_ports; i++) {
 8725		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
 8726		int grp, num_channels = parent->rxchan_per_port[i];
 8727		int this_channel_offset;
 8728
 8729		tp->first_table_num = rdc_group;
 8730		tp->num_tables = rdc_groups_per_port;
 8731		this_channel_offset = 0;
 8732		for (grp = 0; grp < tp->num_tables; grp++) {
 8733			struct rdc_table *rt = &tp->tables[grp];
 8734			int slot;
 8735
 8736			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
 8737				parent->index, i, tp->first_table_num + grp);
 8738			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
 8739				rt->rxdma_channel[slot] =
 8740					rdc_channel_base + this_channel_offset;
 8741
 8742				pr_cont("%d ", rt->rxdma_channel[slot]);
 8743
 8744				if (++this_channel_offset == num_channels)
 8745					this_channel_offset = 0;
 8746			}
 8747			pr_cont("]\n");
 8748		}
 8749
 8750		parent->rdc_default[i] = rdc_channel_base;
 8751
 8752		rdc_channel_base += num_channels;
 8753		rdc_group += rdc_groups_per_port;
 8754	}
 8755}
 8756
 8757static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
 8758			       struct phy_probe_info *info)
 8759{
 8760	unsigned long flags;
 8761	int port, err;
 8762
 8763	memset(info, 0, sizeof(*info));
 8764
 8765	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
 8766	niu_lock_parent(np, flags);
 8767	err = 0;
 8768	for (port = 8; port < 32; port++) {
 8769		int dev_id_1, dev_id_2;
 8770
 8771		dev_id_1 = mdio_read(np, port,
 8772				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
 8773		dev_id_2 = mdio_read(np, port,
 8774				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
 8775		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8776				 PHY_TYPE_PMA_PMD);
 8777		if (err)
 8778			break;
 8779		dev_id_1 = mdio_read(np, port,
 8780				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
 8781		dev_id_2 = mdio_read(np, port,
 8782				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
 8783		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8784				 PHY_TYPE_PCS);
 8785		if (err)
 8786			break;
 8787		dev_id_1 = mii_read(np, port, MII_PHYSID1);
 8788		dev_id_2 = mii_read(np, port, MII_PHYSID2);
 8789		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
 8790				 PHY_TYPE_MII);
 8791		if (err)
 8792			break;
 8793	}
 8794	niu_unlock_parent(np, flags);
 8795
 8796	return err;
 8797}
 8798
 8799static int walk_phys(struct niu *np, struct niu_parent *parent)
 8800{
 8801	struct phy_probe_info *info = &parent->phy_probe_info;
 8802	int lowest_10g, lowest_1g;
 8803	int num_10g, num_1g;
 8804	u32 val;
 8805	int err;
 8806
 8807	num_10g = num_1g = 0;
 8808
 8809	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
 8810	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 8811		num_10g = 0;
 8812		num_1g = 2;
 8813		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
 8814		parent->num_ports = 4;
 8815		val = (phy_encode(PORT_TYPE_1G, 0) |
 8816		       phy_encode(PORT_TYPE_1G, 1) |
 8817		       phy_encode(PORT_TYPE_1G, 2) |
 8818		       phy_encode(PORT_TYPE_1G, 3));
 8819	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 8820		num_10g = 2;
 8821		num_1g = 0;
 8822		parent->num_ports = 2;
 8823		val = (phy_encode(PORT_TYPE_10G, 0) |
 8824		       phy_encode(PORT_TYPE_10G, 1));
 8825	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
 8826		   (parent->plat_type == PLAT_TYPE_NIU)) {
 8827		/* this is the Monza case */
 8828		if (np->flags & NIU_FLAGS_10G) {
 8829			val = (phy_encode(PORT_TYPE_10G, 0) |
 8830			       phy_encode(PORT_TYPE_10G, 1));
 8831		} else {
 8832			val = (phy_encode(PORT_TYPE_1G, 0) |
 8833			       phy_encode(PORT_TYPE_1G, 1));
 8834		}
 8835	} else {
 8836		err = fill_phy_probe_info(np, parent, info);
 8837		if (err)
 8838			return err;
 8839
 8840		num_10g = count_10g_ports(info, &lowest_10g);
 8841		num_1g = count_1g_ports(info, &lowest_1g);
 8842
 8843		switch ((num_10g << 4) | num_1g) {
 8844		case 0x24:
 8845			if (lowest_1g == 10)
 8846				parent->plat_type = PLAT_TYPE_VF_P0;
 8847			else if (lowest_1g == 26)
 8848				parent->plat_type = PLAT_TYPE_VF_P1;
 8849			else
 8850				goto unknown_vg_1g_port;
 8851
 8852			/* fallthru */
 8853		case 0x22:
 8854			val = (phy_encode(PORT_TYPE_10G, 0) |
 8855			       phy_encode(PORT_TYPE_10G, 1) |
 8856			       phy_encode(PORT_TYPE_1G, 2) |
 8857			       phy_encode(PORT_TYPE_1G, 3));
 8858			break;
 8859
 8860		case 0x20:
 8861			val = (phy_encode(PORT_TYPE_10G, 0) |
 8862			       phy_encode(PORT_TYPE_10G, 1));
 8863			break;
 8864
 8865		case 0x10:
 8866			val = phy_encode(PORT_TYPE_10G, np->port);
 8867			break;
 8868
 8869		case 0x14:
 8870			if (lowest_1g == 10)
 8871				parent->plat_type = PLAT_TYPE_VF_P0;
 8872			else if (lowest_1g == 26)
 8873				parent->plat_type = PLAT_TYPE_VF_P1;
 8874			else
 8875				goto unknown_vg_1g_port;
 8876
 8877			/* fallthru */
 8878		case 0x13:
 8879			if ((lowest_10g & 0x7) == 0)
 8880				val = (phy_encode(PORT_TYPE_10G, 0) |
 8881				       phy_encode(PORT_TYPE_1G, 1) |
 8882				       phy_encode(PORT_TYPE_1G, 2) |
 8883				       phy_encode(PORT_TYPE_1G, 3));
 8884			else
 8885				val = (phy_encode(PORT_TYPE_1G, 0) |
 8886				       phy_encode(PORT_TYPE_10G, 1) |
 8887				       phy_encode(PORT_TYPE_1G, 2) |
 8888				       phy_encode(PORT_TYPE_1G, 3));
 8889			break;
 8890
 8891		case 0x04:
 8892			if (lowest_1g == 10)
 8893				parent->plat_type = PLAT_TYPE_VF_P0;
 8894			else if (lowest_1g == 26)
 8895				parent->plat_type = PLAT_TYPE_VF_P1;
 8896			else
 8897				goto unknown_vg_1g_port;
 8898
 8899			val = (phy_encode(PORT_TYPE_1G, 0) |
 8900			       phy_encode(PORT_TYPE_1G, 1) |
 8901			       phy_encode(PORT_TYPE_1G, 2) |
 8902			       phy_encode(PORT_TYPE_1G, 3));
 8903			break;
 8904
 8905		default:
 8906			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
 8907			       num_10g, num_1g);
 8908			return -EINVAL;
 8909		}
 8910	}
 8911
 8912	parent->port_phy = val;
 8913
 8914	if (parent->plat_type == PLAT_TYPE_NIU)
 8915		niu_n2_divide_channels(parent);
 8916	else
 8917		niu_divide_channels(parent, num_10g, num_1g);
 8918
 8919	niu_divide_rdc_groups(parent, num_10g, num_1g);
 8920
 8921	return 0;
 8922
 8923unknown_vg_1g_port:
 8924	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
 8925	return -EINVAL;
 8926}
 8927
 8928static int niu_probe_ports(struct niu *np)
 8929{
 8930	struct niu_parent *parent = np->parent;
 8931	int err, i;
 8932
 8933	if (parent->port_phy == PORT_PHY_UNKNOWN) {
 8934		err = walk_phys(np, parent);
 8935		if (err)
 8936			return err;
 8937
 8938		niu_set_ldg_timer_res(np, 2);
 8939		for (i = 0; i <= LDN_MAX; i++)
 8940			niu_ldn_irq_enable(np, i, 0);
 8941	}
 8942
 8943	if (parent->port_phy == PORT_PHY_INVALID)
 8944		return -EINVAL;
 8945
 8946	return 0;
 8947}
 8948
 8949static int niu_classifier_swstate_init(struct niu *np)
 8950{
 8951	struct niu_classifier *cp = &np->clas;
 8952
 8953	cp->tcam_top = (u16) np->port;
 8954	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
 8955	cp->h1_init = 0xffffffff;
 8956	cp->h2_init = 0xffff;
 8957
 8958	return fflp_early_init(np);
 8959}
 8960
 8961static void niu_link_config_init(struct niu *np)
 8962{
 8963	struct niu_link_config *lp = &np->link_config;
 8964
 8965	lp->advertising = (ADVERTISED_10baseT_Half |
 8966			   ADVERTISED_10baseT_Full |
 8967			   ADVERTISED_100baseT_Half |
 8968			   ADVERTISED_100baseT_Full |
 8969			   ADVERTISED_1000baseT_Half |
 8970			   ADVERTISED_1000baseT_Full |
 8971			   ADVERTISED_10000baseT_Full |
 8972			   ADVERTISED_Autoneg);
 8973	lp->speed = lp->active_speed = SPEED_INVALID;
 8974	lp->duplex = DUPLEX_FULL;
 8975	lp->active_duplex = DUPLEX_INVALID;
 8976	lp->autoneg = 1;
 8977#if 0
 8978	lp->loopback_mode = LOOPBACK_MAC;
 8979	lp->active_speed = SPEED_10000;
 8980	lp->active_duplex = DUPLEX_FULL;
 8981#else
 8982	lp->loopback_mode = LOOPBACK_DISABLED;
 8983#endif
 8984}
 8985
 8986static int niu_init_mac_ipp_pcs_base(struct niu *np)
 8987{
 8988	switch (np->port) {
 8989	case 0:
 8990		np->mac_regs = np->regs + XMAC_PORT0_OFF;
 8991		np->ipp_off  = 0x00000;
 8992		np->pcs_off  = 0x04000;
 8993		np->xpcs_off = 0x02000;
 8994		break;
 8995
 8996	case 1:
 8997		np->mac_regs = np->regs + XMAC_PORT1_OFF;
 8998		np->ipp_off  = 0x08000;
 8999		np->pcs_off  = 0x0a000;
 9000		np->xpcs_off = 0x08000;
 9001		break;
 9002
 9003	case 2:
 9004		np->mac_regs = np->regs + BMAC_PORT2_OFF;
 9005		np->ipp_off  = 0x04000;
 9006		np->pcs_off  = 0x0e000;
 9007		np->xpcs_off = ~0UL;
 9008		break;
 9009
 9010	case 3:
 9011		np->mac_regs = np->regs + BMAC_PORT3_OFF;
 9012		np->ipp_off  = 0x0c000;
 9013		np->pcs_off  = 0x12000;
 9014		np->xpcs_off = ~0UL;
 9015		break;
 9016
 9017	default:
 9018		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
 9019		return -EINVAL;
 9020	}
 9021
 9022	return 0;
 9023}
 9024
 9025static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
 9026{
 9027	struct msix_entry msi_vec[NIU_NUM_LDG];
 9028	struct niu_parent *parent = np->parent;
 9029	struct pci_dev *pdev = np->pdev;
 9030	int i, num_irqs;
 9031	u8 first_ldg;
 9032
 9033	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
 9034	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
 9035		ldg_num_map[i] = first_ldg + i;
 9036
 9037	num_irqs = (parent->rxchan_per_port[np->port] +
 9038		    parent->txchan_per_port[np->port] +
 9039		    (np->port == 0 ? 3 : 1));
 9040	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 9041
 9042	for (i = 0; i < num_irqs; i++) {
 9043		msi_vec[i].vector = 0;
 9044		msi_vec[i].entry = i;
 9045	}
 9046
 9047	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
 9048	if (num_irqs < 0) {
 9049		np->flags &= ~NIU_FLAGS_MSIX;
 9050		return;
 9051	}
 9052
 9053	np->flags |= NIU_FLAGS_MSIX;
 9054	for (i = 0; i < num_irqs; i++)
 9055		np->ldg[i].irq = msi_vec[i].vector;
 9056	np->num_ldg = num_irqs;
 9057}
 9058
 9059static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
 9060{
 9061#ifdef CONFIG_SPARC64
 9062	struct platform_device *op = np->op;
 9063	const u32 *int_prop;
 9064	int i;
 9065
 9066	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
 9067	if (!int_prop)
 9068		return -ENODEV;
 9069
 9070	for (i = 0; i < op->archdata.num_irqs; i++) {
 9071		ldg_num_map[i] = int_prop[i];
 9072		np->ldg[i].irq = op->archdata.irqs[i];
 9073	}
 9074
 9075	np->num_ldg = op->archdata.num_irqs;
 9076
 9077	return 0;
 9078#else
 9079	return -EINVAL;
 9080#endif
 9081}
 9082
 9083static int niu_ldg_init(struct niu *np)
 9084{
 9085	struct niu_parent *parent = np->parent;
 9086	u8 ldg_num_map[NIU_NUM_LDG];
 9087	int first_chan, num_chan;
 9088	int i, err, ldg_rotor;
 9089	u8 port;
 9090
 9091	np->num_ldg = 1;
 9092	np->ldg[0].irq = np->dev->irq;
 9093	if (parent->plat_type == PLAT_TYPE_NIU) {
 9094		err = niu_n2_irq_init(np, ldg_num_map);
 9095		if (err)
 9096			return err;
 9097	} else
 9098		niu_try_msix(np, ldg_num_map);
 9099
 9100	port = np->port;
 9101	for (i = 0; i < np->num_ldg; i++) {
 9102		struct niu_ldg *lp = &np->ldg[i];
 9103
 9104		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
 9105
 9106		lp->np = np;
 9107		lp->ldg_num = ldg_num_map[i];
 9108		lp->timer = 2; /* XXX */
 9109
 9110		/* On N2 NIU the firmware has setup the SID mappings so they go
 9111		 * to the correct values that will route the LDG to the proper
 9112		 * interrupt in the NCU interrupt table.
 9113		 */
 9114		if (np->parent->plat_type != PLAT_TYPE_NIU) {
 9115			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
 9116			if (err)
 9117				return err;
 9118		}
 9119	}
 9120
 9121	/* We adopt the LDG assignment ordering used by the N2 NIU
 9122	 * 'interrupt' properties because that simplifies a lot of
 9123	 * things.  This ordering is:
 9124	 *
 9125	 *	MAC
 9126	 *	MIF	(if port zero)
 9127	 *	SYSERR	(if port zero)
 9128	 *	RX channels
 9129	 *	TX channels
 9130	 */
 9131
 9132	ldg_rotor = 0;
 9133
 9134	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
 9135				  LDN_MAC(port));
 9136	if (err)
 9137		return err;
 9138
 9139	ldg_rotor++;
 9140	if (ldg_rotor == np->num_ldg)
 9141		ldg_rotor = 0;
 9142
 9143	if (port == 0) {
 9144		err = niu_ldg_assign_ldn(np, parent,
 9145					 ldg_num_map[ldg_rotor],
 9146					 LDN_MIF);
 9147		if (err)
 9148			return err;
 9149
 9150		ldg_rotor++;
 9151		if (ldg_rotor == np->num_ldg)
 9152			ldg_rotor = 0;
 9153
 9154		err = niu_ldg_assign_ldn(np, parent,
 9155					 ldg_num_map[ldg_rotor],
 9156					 LDN_DEVICE_ERROR);
 9157		if (err)
 9158			return err;
 9159
 9160		ldg_rotor++;
 9161		if (ldg_rotor == np->num_ldg)
 9162			ldg_rotor = 0;
 9163
 9164	}
 9165
 9166	first_chan = 0;
 9167	for (i = 0; i < port; i++)
 9168		first_chan += parent->rxchan_per_port[i];
 9169	num_chan = parent->rxchan_per_port[port];
 9170
 9171	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9172		err = niu_ldg_assign_ldn(np, parent,
 9173					 ldg_num_map[ldg_rotor],
 9174					 LDN_RXDMA(i));
 9175		if (err)
 9176			return err;
 9177		ldg_rotor++;
 9178		if (ldg_rotor == np->num_ldg)
 9179			ldg_rotor = 0;
 9180	}
 9181
 9182	first_chan = 0;
 9183	for (i = 0; i < port; i++)
 9184		first_chan += parent->txchan_per_port[i];
 9185	num_chan = parent->txchan_per_port[port];
 9186	for (i = first_chan; i < (first_chan + num_chan); i++) {
 9187		err = niu_ldg_assign_ldn(np, parent,
 9188					 ldg_num_map[ldg_rotor],
 9189					 LDN_TXDMA(i));
 9190		if (err)
 9191			return err;
 9192		ldg_rotor++;
 9193		if (ldg_rotor == np->num_ldg)
 9194			ldg_rotor = 0;
 9195	}
 9196
 9197	return 0;
 9198}
 9199
 9200static void niu_ldg_free(struct niu *np)
 9201{
 9202	if (np->flags & NIU_FLAGS_MSIX)
 9203		pci_disable_msix(np->pdev);
 9204}
 9205
 9206static int niu_get_of_props(struct niu *np)
 9207{
 9208#ifdef CONFIG_SPARC64
 9209	struct net_device *dev = np->dev;
 9210	struct device_node *dp;
 9211	const char *phy_type;
 9212	const u8 *mac_addr;
 9213	const char *model;
 9214	int prop_len;
 9215
 9216	if (np->parent->plat_type == PLAT_TYPE_NIU)
 9217		dp = np->op->dev.of_node;
 9218	else
 9219		dp = pci_device_to_OF_node(np->pdev);
 9220
 9221	phy_type = of_get_property(dp, "phy-type", &prop_len);
 9222	if (!phy_type) {
 9223		netdev_err(dev, "%s: OF node lacks phy-type property\n",
 9224			   dp->full_name);
 9225		return -EINVAL;
 9226	}
 9227
 9228	if (!strcmp(phy_type, "none"))
 9229		return -ENODEV;
 9230
 9231	strcpy(np->vpd.phy_type, phy_type);
 9232
 9233	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
 9234		netdev_err(dev, "%s: Illegal phy string [%s]\n",
 9235			   dp->full_name, np->vpd.phy_type);
 9236		return -EINVAL;
 9237	}
 9238
 9239	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
 9240	if (!mac_addr) {
 9241		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
 9242			   dp->full_name);
 9243		return -EINVAL;
 9244	}
 9245	if (prop_len != dev->addr_len) {
 9246		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
 9247			   dp->full_name, prop_len);
 9248	}
 9249	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
 9250	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 9251		netdev_err(dev, "%s: OF MAC address is invalid\n",
 9252			   dp->full_name);
 9253		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
 9254		return -EINVAL;
 9255	}
 9256
 9257	model = of_get_property(dp, "model", &prop_len);
 9258
 9259	if (model)
 9260		strcpy(np->vpd.model, model);
 9261
 9262	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
 9263		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 9264			NIU_FLAGS_HOTPLUG_PHY);
 9265	}
 9266
 9267	return 0;
 9268#else
 9269	return -EINVAL;
 9270#endif
 9271}
 9272
 9273static int niu_get_invariants(struct niu *np)
 9274{
 9275	int err, have_props;
 9276	u32 offset;
 9277
 9278	err = niu_get_of_props(np);
 9279	if (err == -ENODEV)
 9280		return err;
 9281
 9282	have_props = !err;
 9283
 9284	err = niu_init_mac_ipp_pcs_base(np);
 9285	if (err)
 9286		return err;
 9287
 9288	if (have_props) {
 9289		err = niu_get_and_validate_port(np);
 9290		if (err)
 9291			return err;
 9292
 9293	} else  {
 9294		if (np->parent->plat_type == PLAT_TYPE_NIU)
 9295			return -EINVAL;
 9296
 9297		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
 9298		offset = niu_pci_vpd_offset(np);
 9299		netif_printk(np, probe, KERN_DEBUG, np->dev,
 9300			     "%s() VPD offset [%08x]\n", __func__, offset);
 9301		if (offset)
 9302			niu_pci_vpd_fetch(np, offset);
 9303		nw64(ESPC_PIO_EN, 0);
 9304
 9305		if (np->flags & NIU_FLAGS_VPD_VALID) {
 9306			niu_pci_vpd_validate(np);
 9307			err = niu_get_and_validate_port(np);
 9308			if (err)
 9309				return err;
 9310		}
 9311
 9312		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
 9313			err = niu_get_and_validate_port(np);
 9314			if (err)
 9315				return err;
 9316			err = niu_pci_probe_sprom(np);
 9317			if (err)
 9318				return err;
 9319		}
 9320	}
 9321
 9322	err = niu_probe_ports(np);
 9323	if (err)
 9324		return err;
 9325
 9326	niu_ldg_init(np);
 9327
 9328	niu_classifier_swstate_init(np);
 9329	niu_link_config_init(np);
 9330
 9331	err = niu_determine_phy_disposition(np);
 9332	if (!err)
 9333		err = niu_init_link(np);
 9334
 9335	return err;
 9336}
 9337
 9338static LIST_HEAD(niu_parent_list);
 9339static DEFINE_MUTEX(niu_parent_lock);
 9340static int niu_parent_index;
 9341
 9342static ssize_t show_port_phy(struct device *dev,
 9343			     struct device_attribute *attr, char *buf)
 9344{
 9345	struct platform_device *plat_dev = to_platform_device(dev);
 9346	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9347	u32 port_phy = p->port_phy;
 9348	char *orig_buf = buf;
 9349	int i;
 9350
 9351	if (port_phy == PORT_PHY_UNKNOWN ||
 9352	    port_phy == PORT_PHY_INVALID)
 9353		return 0;
 9354
 9355	for (i = 0; i < p->num_ports; i++) {
 9356		const char *type_str;
 9357		int type;
 9358
 9359		type = phy_decode(port_phy, i);
 9360		if (type == PORT_TYPE_10G)
 9361			type_str = "10G";
 9362		else
 9363			type_str = "1G";
 9364		buf += sprintf(buf,
 9365			       (i == 0) ? "%s" : " %s",
 9366			       type_str);
 9367	}
 9368	buf += sprintf(buf, "\n");
 9369	return buf - orig_buf;
 9370}
 9371
 9372static ssize_t show_plat_type(struct device *dev,
 9373			      struct device_attribute *attr, char *buf)
 9374{
 9375	struct platform_device *plat_dev = to_platform_device(dev);
 9376	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9377	const char *type_str;
 9378
 9379	switch (p->plat_type) {
 9380	case PLAT_TYPE_ATLAS:
 9381		type_str = "atlas";
 9382		break;
 9383	case PLAT_TYPE_NIU:
 9384		type_str = "niu";
 9385		break;
 9386	case PLAT_TYPE_VF_P0:
 9387		type_str = "vf_p0";
 9388		break;
 9389	case PLAT_TYPE_VF_P1:
 9390		type_str = "vf_p1";
 9391		break;
 9392	default:
 9393		type_str = "unknown";
 9394		break;
 9395	}
 9396
 9397	return sprintf(buf, "%s\n", type_str);
 9398}
 9399
 9400static ssize_t __show_chan_per_port(struct device *dev,
 9401				    struct device_attribute *attr, char *buf,
 9402				    int rx)
 9403{
 9404	struct platform_device *plat_dev = to_platform_device(dev);
 9405	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9406	char *orig_buf = buf;
 9407	u8 *arr;
 9408	int i;
 9409
 9410	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
 9411
 9412	for (i = 0; i < p->num_ports; i++) {
 9413		buf += sprintf(buf,
 9414			       (i == 0) ? "%d" : " %d",
 9415			       arr[i]);
 9416	}
 9417	buf += sprintf(buf, "\n");
 9418
 9419	return buf - orig_buf;
 9420}
 9421
 9422static ssize_t show_rxchan_per_port(struct device *dev,
 9423				    struct device_attribute *attr, char *buf)
 9424{
 9425	return __show_chan_per_port(dev, attr, buf, 1);
 9426}
 9427
 9428static ssize_t show_txchan_per_port(struct device *dev,
 9429				    struct device_attribute *attr, char *buf)
 9430{
 9431	return __show_chan_per_port(dev, attr, buf, 1);
 9432}
 9433
 9434static ssize_t show_num_ports(struct device *dev,
 9435			      struct device_attribute *attr, char *buf)
 9436{
 9437	struct platform_device *plat_dev = to_platform_device(dev);
 9438	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
 9439
 9440	return sprintf(buf, "%d\n", p->num_ports);
 9441}
 9442
 9443static struct device_attribute niu_parent_attributes[] = {
 9444	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
 9445	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
 9446	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
 9447	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
 9448	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
 9449	{}
 9450};
 9451
 9452static struct niu_parent *niu_new_parent(struct niu *np,
 9453					 union niu_parent_id *id, u8 ptype)
 9454{
 9455	struct platform_device *plat_dev;
 9456	struct niu_parent *p;
 9457	int i;
 9458
 9459	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
 9460						   NULL, 0);
 9461	if (IS_ERR(plat_dev))
 9462		return NULL;
 9463
 9464	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
 9465		int err = device_create_file(&plat_dev->dev,
 9466					     &niu_parent_attributes[i]);
 9467		if (err)
 9468			goto fail_unregister;
 9469	}
 9470
 9471	p = kzalloc(sizeof(*p), GFP_KERNEL);
 9472	if (!p)
 9473		goto fail_unregister;
 9474
 9475	p->index = niu_parent_index++;
 9476
 9477	plat_dev->dev.platform_data = p;
 9478	p->plat_dev = plat_dev;
 9479
 9480	memcpy(&p->id, id, sizeof(*id));
 9481	p->plat_type = ptype;
 9482	INIT_LIST_HEAD(&p->list);
 9483	atomic_set(&p->refcnt, 0);
 9484	list_add(&p->list, &niu_parent_list);
 9485	spin_lock_init(&p->lock);
 9486
 9487	p->rxdma_clock_divider = 7500;
 9488
 9489	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
 9490	if (p->plat_type == PLAT_TYPE_NIU)
 9491		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
 9492
 9493	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
 9494		int index = i - CLASS_CODE_USER_PROG1;
 9495
 9496		p->tcam_key[index] = TCAM_KEY_TSEL;
 9497		p->flow_key[index] = (FLOW_KEY_IPSA |
 9498				      FLOW_KEY_IPDA |
 9499				      FLOW_KEY_PROTO |
 9500				      (FLOW_KEY_L4_BYTE12 <<
 9501				       FLOW_KEY_L4_0_SHIFT) |
 9502				      (FLOW_KEY_L4_BYTE12 <<
 9503				       FLOW_KEY_L4_1_SHIFT));
 9504	}
 9505
 9506	for (i = 0; i < LDN_MAX + 1; i++)
 9507		p->ldg_map[i] = LDG_INVALID;
 9508
 9509	return p;
 9510
 9511fail_unregister:
 9512	platform_device_unregister(plat_dev);
 9513	return NULL;
 9514}
 9515
 9516static struct niu_parent *niu_get_parent(struct niu *np,
 9517					 union niu_parent_id *id, u8 ptype)
 9518{
 9519	struct niu_parent *p, *tmp;
 9520	int port = np->port;
 9521
 9522	mutex_lock(&niu_parent_lock);
 9523	p = NULL;
 9524	list_for_each_entry(tmp, &niu_parent_list, list) {
 9525		if (!memcmp(id, &tmp->id, sizeof(*id))) {
 9526			p = tmp;
 9527			break;
 9528		}
 9529	}
 9530	if (!p)
 9531		p = niu_new_parent(np, id, ptype);
 9532
 9533	if (p) {
 9534		char port_name[6];
 9535		int err;
 9536
 9537		sprintf(port_name, "port%d", port);
 9538		err = sysfs_create_link(&p->plat_dev->dev.kobj,
 9539					&np->device->kobj,
 9540					port_name);
 9541		if (!err) {
 9542			p->ports[port] = np;
 9543			atomic_inc(&p->refcnt);
 9544		}
 9545	}
 9546	mutex_unlock(&niu_parent_lock);
 9547
 9548	return p;
 9549}
 9550
 9551static void niu_put_parent(struct niu *np)
 9552{
 9553	struct niu_parent *p = np->parent;
 9554	u8 port = np->port;
 9555	char port_name[6];
 9556
 9557	BUG_ON(!p || p->ports[port] != np);
 9558
 9559	netif_printk(np, probe, KERN_DEBUG, np->dev,
 9560		     "%s() port[%u]\n", __func__, port);
 9561
 9562	sprintf(port_name, "port%d", port);
 9563
 9564	mutex_lock(&niu_parent_lock);
 9565
 9566	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
 9567
 9568	p->ports[port] = NULL;
 9569	np->parent = NULL;
 9570
 9571	if (atomic_dec_and_test(&p->refcnt)) {
 9572		list_del(&p->list);
 9573		platform_device_unregister(p->plat_dev);
 9574	}
 9575
 9576	mutex_unlock(&niu_parent_lock);
 9577}
 9578
 9579static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
 9580				    u64 *handle, gfp_t flag)
 9581{
 9582	dma_addr_t dh;
 9583	void *ret;
 9584
 9585	ret = dma_alloc_coherent(dev, size, &dh, flag);
 9586	if (ret)
 9587		*handle = dh;
 9588	return ret;
 9589}
 9590
 9591static void niu_pci_free_coherent(struct device *dev, size_t size,
 9592				  void *cpu_addr, u64 handle)
 9593{
 9594	dma_free_coherent(dev, size, cpu_addr, handle);
 9595}
 9596
 9597static u64 niu_pci_map_page(struct device *dev, struct page *page,
 9598			    unsigned long offset, size_t size,
 9599			    enum dma_data_direction direction)
 9600{
 9601	return dma_map_page(dev, page, offset, size, direction);
 9602}
 9603
 9604static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
 9605			       size_t size, enum dma_data_direction direction)
 9606{
 9607	dma_unmap_page(dev, dma_address, size, direction);
 9608}
 9609
 9610static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
 9611			      size_t size,
 9612			      enum dma_data_direction direction)
 9613{
 9614	return dma_map_single(dev, cpu_addr, size, direction);
 9615}
 9616
 9617static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
 9618				 size_t size,
 9619				 enum dma_data_direction direction)
 9620{
 9621	dma_unmap_single(dev, dma_address, size, direction);
 9622}
 9623
 9624static const struct niu_ops niu_pci_ops = {
 9625	.alloc_coherent	= niu_pci_alloc_coherent,
 9626	.free_coherent	= niu_pci_free_coherent,
 9627	.map_page	= niu_pci_map_page,
 9628	.unmap_page	= niu_pci_unmap_page,
 9629	.map_single	= niu_pci_map_single,
 9630	.unmap_single	= niu_pci_unmap_single,
 9631};
 9632
 9633static void niu_driver_version(void)
 9634{
 9635	static int niu_version_printed;
 9636
 9637	if (niu_version_printed++ == 0)
 9638		pr_info("%s", version);
 9639}
 9640
 9641static struct net_device *niu_alloc_and_init(struct device *gen_dev,
 9642					     struct pci_dev *pdev,
 9643					     struct platform_device *op,
 9644					     const struct niu_ops *ops, u8 port)
 9645{
 9646	struct net_device *dev;
 9647	struct niu *np;
 9648
 9649	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
 9650	if (!dev)
 9651		return NULL;
 9652
 9653	SET_NETDEV_DEV(dev, gen_dev);
 9654
 9655	np = netdev_priv(dev);
 9656	np->dev = dev;
 9657	np->pdev = pdev;
 9658	np->op = op;
 9659	np->device = gen_dev;
 9660	np->ops = ops;
 9661
 9662	np->msg_enable = niu_debug;
 9663
 9664	spin_lock_init(&np->lock);
 9665	INIT_WORK(&np->reset_task, niu_reset_task);
 9666
 9667	np->port = port;
 9668
 9669	return dev;
 9670}
 9671
 9672static const struct net_device_ops niu_netdev_ops = {
 9673	.ndo_open		= niu_open,
 9674	.ndo_stop		= niu_close,
 9675	.ndo_start_xmit		= niu_start_xmit,
 9676	.ndo_get_stats64	= niu_get_stats,
 9677	.ndo_set_rx_mode	= niu_set_rx_mode,
 9678	.ndo_validate_addr	= eth_validate_addr,
 9679	.ndo_set_mac_address	= niu_set_mac_addr,
 9680	.ndo_do_ioctl		= niu_ioctl,
 9681	.ndo_tx_timeout		= niu_tx_timeout,
 9682	.ndo_change_mtu		= niu_change_mtu,
 9683};
 9684
 9685static void niu_assign_netdev_ops(struct net_device *dev)
 9686{
 9687	dev->netdev_ops = &niu_netdev_ops;
 9688	dev->ethtool_ops = &niu_ethtool_ops;
 9689	dev->watchdog_timeo = NIU_TX_TIMEOUT;
 9690}
 9691
 9692static void niu_device_announce(struct niu *np)
 9693{
 9694	struct net_device *dev = np->dev;
 9695
 9696	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
 9697
 9698	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
 9699		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9700				dev->name,
 9701				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9702				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9703				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
 9704				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9705				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9706				np->vpd.phy_type);
 9707	} else {
 9708		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
 9709				dev->name,
 9710				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
 9711				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
 9712				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
 9713				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
 9714				  "COPPER")),
 9715				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
 9716				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
 9717				np->vpd.phy_type);
 9718	}
 9719}
 9720
 9721static void niu_set_basic_features(struct net_device *dev)
 9722{
 9723	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
 9724	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 9725}
 9726
 9727static int niu_pci_init_one(struct pci_dev *pdev,
 9728			    const struct pci_device_id *ent)
 9729{
 9730	union niu_parent_id parent_id;
 9731	struct net_device *dev;
 9732	struct niu *np;
 9733	int err;
 9734	u64 dma_mask;
 9735
 9736	niu_driver_version();
 9737
 9738	err = pci_enable_device(pdev);
 9739	if (err) {
 9740		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 9741		return err;
 9742	}
 9743
 9744	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
 9745	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 9746		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
 9747		err = -ENODEV;
 9748		goto err_out_disable_pdev;
 9749	}
 9750
 9751	err = pci_request_regions(pdev, DRV_MODULE_NAME);
 9752	if (err) {
 9753		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
 9754		goto err_out_disable_pdev;
 9755	}
 9756
 9757	if (!pci_is_pcie(pdev)) {
 9758		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
 9759		err = -ENODEV;
 9760		goto err_out_free_res;
 9761	}
 9762
 9763	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
 9764				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
 9765	if (!dev) {
 9766		err = -ENOMEM;
 9767		goto err_out_free_res;
 9768	}
 9769	np = netdev_priv(dev);
 9770
 9771	memset(&parent_id, 0, sizeof(parent_id));
 9772	parent_id.pci.domain = pci_domain_nr(pdev->bus);
 9773	parent_id.pci.bus = pdev->bus->number;
 9774	parent_id.pci.device = PCI_SLOT(pdev->devfn);
 9775
 9776	np->parent = niu_get_parent(np, &parent_id,
 9777				    PLAT_TYPE_ATLAS);
 9778	if (!np->parent) {
 9779		err = -ENOMEM;
 9780		goto err_out_free_dev;
 9781	}
 9782
 9783	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 9784		PCI_EXP_DEVCTL_NOSNOOP_EN,
 9785		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
 9786		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
 9787		PCI_EXP_DEVCTL_RELAX_EN);
 9788
 9789	dma_mask = DMA_BIT_MASK(44);
 9790	err = pci_set_dma_mask(pdev, dma_mask);
 9791	if (!err) {
 9792		dev->features |= NETIF_F_HIGHDMA;
 9793		err = pci_set_consistent_dma_mask(pdev, dma_mask);
 9794		if (err) {
 9795			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
 9796			goto err_out_release_parent;
 9797		}
 9798	}
 9799	if (err) {
 9800		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 9801		if (err) {
 9802			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 9803			goto err_out_release_parent;
 9804		}
 9805	}
 9806
 9807	niu_set_basic_features(dev);
 9808
 9809	dev->priv_flags |= IFF_UNICAST_FLT;
 9810
 9811	np->regs = pci_ioremap_bar(pdev, 0);
 9812	if (!np->regs) {
 9813		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
 9814		err = -ENOMEM;
 9815		goto err_out_release_parent;
 9816	}
 9817
 9818	pci_set_master(pdev);
 9819	pci_save_state(pdev);
 9820
 9821	dev->irq = pdev->irq;
 9822
 9823	/* MTU range: 68 - 9216 */
 9824	dev->min_mtu = ETH_MIN_MTU;
 9825	dev->max_mtu = NIU_MAX_MTU;
 9826
 9827	niu_assign_netdev_ops(dev);
 9828
 9829	err = niu_get_invariants(np);
 9830	if (err) {
 9831		if (err != -ENODEV)
 9832			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
 9833		goto err_out_iounmap;
 9834	}
 9835
 9836	err = register_netdev(dev);
 9837	if (err) {
 9838		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
 9839		goto err_out_iounmap;
 9840	}
 9841
 9842	pci_set_drvdata(pdev, dev);
 9843
 9844	niu_device_announce(np);
 9845
 9846	return 0;
 9847
 9848err_out_iounmap:
 9849	if (np->regs) {
 9850		iounmap(np->regs);
 9851		np->regs = NULL;
 9852	}
 9853
 9854err_out_release_parent:
 9855	niu_put_parent(np);
 9856
 9857err_out_free_dev:
 9858	free_netdev(dev);
 9859
 9860err_out_free_res:
 9861	pci_release_regions(pdev);
 9862
 9863err_out_disable_pdev:
 9864	pci_disable_device(pdev);
 9865
 9866	return err;
 9867}
 9868
 9869static void niu_pci_remove_one(struct pci_dev *pdev)
 9870{
 9871	struct net_device *dev = pci_get_drvdata(pdev);
 9872
 9873	if (dev) {
 9874		struct niu *np = netdev_priv(dev);
 9875
 9876		unregister_netdev(dev);
 9877		if (np->regs) {
 9878			iounmap(np->regs);
 9879			np->regs = NULL;
 9880		}
 9881
 9882		niu_ldg_free(np);
 9883
 9884		niu_put_parent(np);
 9885
 9886		free_netdev(dev);
 9887		pci_release_regions(pdev);
 9888		pci_disable_device(pdev);
 9889	}
 9890}
 9891
 9892static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
 9893{
 9894	struct net_device *dev = pci_get_drvdata(pdev);
 9895	struct niu *np = netdev_priv(dev);
 9896	unsigned long flags;
 9897
 9898	if (!netif_running(dev))
 9899		return 0;
 9900
 9901	flush_work(&np->reset_task);
 9902	niu_netif_stop(np);
 9903
 9904	del_timer_sync(&np->timer);
 9905
 9906	spin_lock_irqsave(&np->lock, flags);
 9907	niu_enable_interrupts(np, 0);
 9908	spin_unlock_irqrestore(&np->lock, flags);
 9909
 9910	netif_device_detach(dev);
 9911
 9912	spin_lock_irqsave(&np->lock, flags);
 9913	niu_stop_hw(np);
 9914	spin_unlock_irqrestore(&np->lock, flags);
 9915
 9916	pci_save_state(pdev);
 9917
 9918	return 0;
 9919}
 9920
 9921static int niu_resume(struct pci_dev *pdev)
 9922{
 9923	struct net_device *dev = pci_get_drvdata(pdev);
 9924	struct niu *np = netdev_priv(dev);
 9925	unsigned long flags;
 9926	int err;
 9927
 9928	if (!netif_running(dev))
 9929		return 0;
 9930
 9931	pci_restore_state(pdev);
 9932
 9933	netif_device_attach(dev);
 9934
 9935	spin_lock_irqsave(&np->lock, flags);
 9936
 9937	err = niu_init_hw(np);
 9938	if (!err) {
 9939		np->timer.expires = jiffies + HZ;
 9940		add_timer(&np->timer);
 9941		niu_netif_start(np);
 9942	}
 9943
 9944	spin_unlock_irqrestore(&np->lock, flags);
 9945
 9946	return err;
 9947}
 9948
 9949static struct pci_driver niu_pci_driver = {
 9950	.name		= DRV_MODULE_NAME,
 9951	.id_table	= niu_pci_tbl,
 9952	.probe		= niu_pci_init_one,
 9953	.remove		= niu_pci_remove_one,
 9954	.suspend	= niu_suspend,
 9955	.resume		= niu_resume,
 9956};
 9957
 9958#ifdef CONFIG_SPARC64
 9959static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
 9960				     u64 *dma_addr, gfp_t flag)
 9961{
 9962	unsigned long order = get_order(size);
 9963	unsigned long page = __get_free_pages(flag, order);
 9964
 9965	if (page == 0UL)
 9966		return NULL;
 9967	memset((char *)page, 0, PAGE_SIZE << order);
 9968	*dma_addr = __pa(page);
 9969
 9970	return (void *) page;
 9971}
 9972
 9973static void niu_phys_free_coherent(struct device *dev, size_t size,
 9974				   void *cpu_addr, u64 handle)
 9975{
 9976	unsigned long order = get_order(size);
 9977
 9978	free_pages((unsigned long) cpu_addr, order);
 9979}
 9980
 9981static u64 niu_phys_map_page(struct device *dev, struct page *page,
 9982			     unsigned long offset, size_t size,
 9983			     enum dma_data_direction direction)
 9984{
 9985	return page_to_phys(page) + offset;
 9986}
 9987
 9988static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
 9989				size_t size, enum dma_data_direction direction)
 9990{
 9991	/* Nothing to do.  */
 9992}
 9993
 9994static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
 9995			       size_t size,
 9996			       enum dma_data_direction direction)
 9997{
 9998	return __pa(cpu_addr);
 9999}
10000
10001static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10002				  size_t size,
10003				  enum dma_data_direction direction)
10004{
10005	/* Nothing to do.  */
10006}
10007
10008static const struct niu_ops niu_phys_ops = {
10009	.alloc_coherent	= niu_phys_alloc_coherent,
10010	.free_coherent	= niu_phys_free_coherent,
10011	.map_page	= niu_phys_map_page,
10012	.unmap_page	= niu_phys_unmap_page,
10013	.map_single	= niu_phys_map_single,
10014	.unmap_single	= niu_phys_unmap_single,
10015};
10016
10017static int niu_of_probe(struct platform_device *op)
10018{
10019	union niu_parent_id parent_id;
10020	struct net_device *dev;
10021	struct niu *np;
10022	const u32 *reg;
10023	int err;
10024
10025	niu_driver_version();
10026
10027	reg = of_get_property(op->dev.of_node, "reg", NULL);
10028	if (!reg) {
10029		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10030			op->dev.of_node->full_name);
10031		return -ENODEV;
10032	}
10033
10034	dev = niu_alloc_and_init(&op->dev, NULL, op,
10035				 &niu_phys_ops, reg[0] & 0x1);
10036	if (!dev) {
10037		err = -ENOMEM;
10038		goto err_out;
10039	}
10040	np = netdev_priv(dev);
10041
10042	memset(&parent_id, 0, sizeof(parent_id));
10043	parent_id.of = of_get_parent(op->dev.of_node);
10044
10045	np->parent = niu_get_parent(np, &parent_id,
10046				    PLAT_TYPE_NIU);
10047	if (!np->parent) {
10048		err = -ENOMEM;
10049		goto err_out_free_dev;
10050	}
10051
10052	niu_set_basic_features(dev);
10053
10054	np->regs = of_ioremap(&op->resource[1], 0,
10055			      resource_size(&op->resource[1]),
10056			      "niu regs");
10057	if (!np->regs) {
10058		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10059		err = -ENOMEM;
10060		goto err_out_release_parent;
10061	}
10062
10063	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10064				    resource_size(&op->resource[2]),
10065				    "niu vregs-1");
10066	if (!np->vir_regs_1) {
10067		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10068		err = -ENOMEM;
10069		goto err_out_iounmap;
10070	}
10071
10072	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10073				    resource_size(&op->resource[3]),
10074				    "niu vregs-2");
10075	if (!np->vir_regs_2) {
10076		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10077		err = -ENOMEM;
10078		goto err_out_iounmap;
10079	}
10080
10081	niu_assign_netdev_ops(dev);
10082
10083	err = niu_get_invariants(np);
10084	if (err) {
10085		if (err != -ENODEV)
10086			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10087		goto err_out_iounmap;
10088	}
10089
10090	err = register_netdev(dev);
10091	if (err) {
10092		dev_err(&op->dev, "Cannot register net device, aborting\n");
10093		goto err_out_iounmap;
10094	}
10095
10096	platform_set_drvdata(op, dev);
10097
10098	niu_device_announce(np);
10099
10100	return 0;
10101
10102err_out_iounmap:
10103	if (np->vir_regs_1) {
10104		of_iounmap(&op->resource[2], np->vir_regs_1,
10105			   resource_size(&op->resource[2]));
10106		np->vir_regs_1 = NULL;
10107	}
10108
10109	if (np->vir_regs_2) {
10110		of_iounmap(&op->resource[3], np->vir_regs_2,
10111			   resource_size(&op->resource[3]));
10112		np->vir_regs_2 = NULL;
10113	}
10114
10115	if (np->regs) {
10116		of_iounmap(&op->resource[1], np->regs,
10117			   resource_size(&op->resource[1]));
10118		np->regs = NULL;
10119	}
10120
10121err_out_release_parent:
10122	niu_put_parent(np);
10123
10124err_out_free_dev:
10125	free_netdev(dev);
10126
10127err_out:
10128	return err;
10129}
10130
10131static int niu_of_remove(struct platform_device *op)
10132{
10133	struct net_device *dev = platform_get_drvdata(op);
10134
10135	if (dev) {
10136		struct niu *np = netdev_priv(dev);
10137
10138		unregister_netdev(dev);
10139
10140		if (np->vir_regs_1) {
10141			of_iounmap(&op->resource[2], np->vir_regs_1,
10142				   resource_size(&op->resource[2]));
10143			np->vir_regs_1 = NULL;
10144		}
10145
10146		if (np->vir_regs_2) {
10147			of_iounmap(&op->resource[3], np->vir_regs_2,
10148				   resource_size(&op->resource[3]));
10149			np->vir_regs_2 = NULL;
10150		}
10151
10152		if (np->regs) {
10153			of_iounmap(&op->resource[1], np->regs,
10154				   resource_size(&op->resource[1]));
10155			np->regs = NULL;
10156		}
10157
10158		niu_ldg_free(np);
10159
10160		niu_put_parent(np);
10161
10162		free_netdev(dev);
10163	}
10164	return 0;
10165}
10166
10167static const struct of_device_id niu_match[] = {
10168	{
10169		.name = "network",
10170		.compatible = "SUNW,niusl",
10171	},
10172	{},
10173};
10174MODULE_DEVICE_TABLE(of, niu_match);
10175
10176static struct platform_driver niu_of_driver = {
10177	.driver = {
10178		.name = "niu",
10179		.of_match_table = niu_match,
10180	},
10181	.probe		= niu_of_probe,
10182	.remove		= niu_of_remove,
10183};
10184
10185#endif /* CONFIG_SPARC64 */
10186
10187static int __init niu_init(void)
10188{
10189	int err = 0;
10190
10191	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10192
10193	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10194
10195#ifdef CONFIG_SPARC64
10196	err = platform_driver_register(&niu_of_driver);
10197#endif
10198
10199	if (!err) {
10200		err = pci_register_driver(&niu_pci_driver);
10201#ifdef CONFIG_SPARC64
10202		if (err)
10203			platform_driver_unregister(&niu_of_driver);
10204#endif
10205	}
10206
10207	return err;
10208}
10209
10210static void __exit niu_exit(void)
10211{
10212	pci_unregister_driver(&niu_pci_driver);
10213#ifdef CONFIG_SPARC64
10214	platform_driver_unregister(&niu_of_driver);
10215#endif
10216}
10217
10218module_init(niu_init);
10219module_exit(niu_exit);