Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
   3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
   4 *
   5 * Based on the 64360 driver from:
   6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
   7 *		      Rabeeh Khoury <rabeeh@marvell.com>
   8 *
   9 * Copyright (C) 2003 PMC-Sierra, Inc.,
  10 *	written by Manish Lachwani
  11 *
  12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13 *
  14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15 *			   Dale Farnsworth <dale@farnsworth.org>
  16 *
  17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18 *				     <sjhill@realitydiluted.com>
  19 *
  20 * Copyright (C) 2007-2008 Marvell Semiconductor
  21 *			   Lennert Buytenhek <buytenh@marvell.com>
  22 *
  23 * This program is free software; you can redistribute it and/or
  24 * modify it under the terms of the GNU General Public License
  25 * as published by the Free Software Foundation; either version 2
  26 * of the License, or (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  31 * GNU General Public License for more details.
  32 *
  33 * You should have received a copy of the GNU General Public License
  34 * along with this program; if not, write to the Free Software
  35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  36 */
  37
  38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39
  40#include <linux/init.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/in.h>
  43#include <linux/ip.h>
  44#include <linux/tcp.h>
  45#include <linux/udp.h>
  46#include <linux/etherdevice.h>
  47#include <linux/delay.h>
  48#include <linux/ethtool.h>
  49#include <linux/platform_device.h>
  50#include <linux/module.h>
  51#include <linux/kernel.h>
  52#include <linux/spinlock.h>
  53#include <linux/workqueue.h>
  54#include <linux/phy.h>
  55#include <linux/mv643xx_eth.h>
  56#include <linux/io.h>
  57#include <linux/types.h>
  58#include <linux/inet_lro.h>
  59#include <linux/slab.h>
  60#include <asm/system.h>
  61
  62static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  63static char mv643xx_eth_driver_version[] = "1.4";
  64
  65
  66/*
  67 * Registers shared between all ports.
  68 */
  69#define PHY_ADDR			0x0000
  70#define SMI_REG				0x0004
  71#define  SMI_BUSY			0x10000000
  72#define  SMI_READ_VALID			0x08000000
  73#define  SMI_OPCODE_READ		0x04000000
  74#define  SMI_OPCODE_WRITE		0x00000000
  75#define ERR_INT_CAUSE			0x0080
  76#define  ERR_INT_SMI_DONE		0x00000010
  77#define ERR_INT_MASK			0x0084
  78#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
  79#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
  80#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
  81#define WINDOW_BAR_ENABLE		0x0290
  82#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
  83
  84/*
  85 * Main per-port registers.  These live at offset 0x0400 for
  86 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  87 */
  88#define PORT_CONFIG			0x0000
  89#define  UNICAST_PROMISCUOUS_MODE	0x00000001
  90#define PORT_CONFIG_EXT			0x0004
  91#define MAC_ADDR_LOW			0x0014
  92#define MAC_ADDR_HIGH			0x0018
  93#define SDMA_CONFIG			0x001c
  94#define  TX_BURST_SIZE_16_64BIT		0x01000000
  95#define  TX_BURST_SIZE_4_64BIT		0x00800000
  96#define  BLM_TX_NO_SWAP			0x00000020
  97#define  BLM_RX_NO_SWAP			0x00000010
  98#define  RX_BURST_SIZE_16_64BIT		0x00000008
  99#define  RX_BURST_SIZE_4_64BIT		0x00000004
 100#define PORT_SERIAL_CONTROL		0x003c
 101#define  SET_MII_SPEED_TO_100		0x01000000
 102#define  SET_GMII_SPEED_TO_1000		0x00800000
 103#define  SET_FULL_DUPLEX_MODE		0x00200000
 104#define  MAX_RX_PACKET_9700BYTE		0x000a0000
 105#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
 106#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
 107#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
 108#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
 109#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
 110#define  FORCE_LINK_PASS		0x00000002
 111#define  SERIAL_PORT_ENABLE		0x00000001
 112#define PORT_STATUS			0x0044
 113#define  TX_FIFO_EMPTY			0x00000400
 114#define  TX_IN_PROGRESS			0x00000080
 115#define  PORT_SPEED_MASK		0x00000030
 116#define  PORT_SPEED_1000		0x00000010
 117#define  PORT_SPEED_100			0x00000020
 118#define  PORT_SPEED_10			0x00000000
 119#define  FLOW_CONTROL_ENABLED		0x00000008
 120#define  FULL_DUPLEX			0x00000004
 121#define  LINK_UP			0x00000002
 122#define TXQ_COMMAND			0x0048
 123#define TXQ_FIX_PRIO_CONF		0x004c
 124#define TX_BW_RATE			0x0050
 125#define TX_BW_MTU			0x0058
 126#define TX_BW_BURST			0x005c
 127#define INT_CAUSE			0x0060
 128#define  INT_TX_END			0x07f80000
 129#define  INT_TX_END_0			0x00080000
 130#define  INT_RX				0x000003fc
 131#define  INT_RX_0			0x00000004
 132#define  INT_EXT			0x00000002
 133#define INT_CAUSE_EXT			0x0064
 134#define  INT_EXT_LINK_PHY		0x00110000
 135#define  INT_EXT_TX			0x000000ff
 136#define INT_MASK			0x0068
 137#define INT_MASK_EXT			0x006c
 138#define TX_FIFO_URGENT_THRESHOLD	0x0074
 139#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
 140#define TX_BW_RATE_MOVED		0x00e0
 141#define TX_BW_MTU_MOVED			0x00e8
 142#define TX_BW_BURST_MOVED		0x00ec
 143#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
 144#define RXQ_COMMAND			0x0280
 145#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
 146#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
 147#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
 148#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))
 149
 150/*
 151 * Misc per-port registers.
 152 */
 153#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
 154#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
 155#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
 156#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
 157
 158
 159/*
 160 * SDMA configuration register default value.
 161 */
 162#if defined(__BIG_ENDIAN)
 163#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 164		(RX_BURST_SIZE_4_64BIT	|	\
 165		 TX_BURST_SIZE_4_64BIT)
 166#elif defined(__LITTLE_ENDIAN)
 167#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 168		(RX_BURST_SIZE_4_64BIT	|	\
 169		 BLM_RX_NO_SWAP		|	\
 170		 BLM_TX_NO_SWAP		|	\
 171		 TX_BURST_SIZE_4_64BIT)
 172#else
 173#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 174#endif
 175
 176
 177/*
 178 * Misc definitions.
 179 */
 180#define DEFAULT_RX_QUEUE_SIZE	128
 181#define DEFAULT_TX_QUEUE_SIZE	256
 182#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 183
 184
 185/*
 186 * RX/TX descriptors.
 187 */
 188#if defined(__BIG_ENDIAN)
 189struct rx_desc {
 190	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 191	u16 buf_size;		/* Buffer size				*/
 192	u32 cmd_sts;		/* Descriptor command status		*/
 193	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 194	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 195};
 196
 197struct tx_desc {
 198	u16 byte_cnt;		/* buffer byte count			*/
 199	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 200	u32 cmd_sts;		/* Command/status field			*/
 201	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 202	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 203};
 204#elif defined(__LITTLE_ENDIAN)
 205struct rx_desc {
 206	u32 cmd_sts;		/* Descriptor command status		*/
 207	u16 buf_size;		/* Buffer size				*/
 208	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 209	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 210	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 211};
 212
 213struct tx_desc {
 214	u32 cmd_sts;		/* Command/status field			*/
 215	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 216	u16 byte_cnt;		/* buffer byte count			*/
 217	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 218	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 219};
 220#else
 221#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 222#endif
 223
 224/* RX & TX descriptor command */
 225#define BUFFER_OWNED_BY_DMA		0x80000000
 226
 227/* RX & TX descriptor status */
 228#define ERROR_SUMMARY			0x00000001
 229
 230/* RX descriptor status */
 231#define LAYER_4_CHECKSUM_OK		0x40000000
 232#define RX_ENABLE_INTERRUPT		0x20000000
 233#define RX_FIRST_DESC			0x08000000
 234#define RX_LAST_DESC			0x04000000
 235#define RX_IP_HDR_OK			0x02000000
 236#define RX_PKT_IS_IPV4			0x01000000
 237#define RX_PKT_IS_ETHERNETV2		0x00800000
 238#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
 239#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
 240#define RX_PKT_IS_VLAN_TAGGED		0x00080000
 241
 242/* TX descriptor command */
 243#define TX_ENABLE_INTERRUPT		0x00800000
 244#define GEN_CRC				0x00400000
 245#define TX_FIRST_DESC			0x00200000
 246#define TX_LAST_DESC			0x00100000
 247#define ZERO_PADDING			0x00080000
 248#define GEN_IP_V4_CHECKSUM		0x00040000
 249#define GEN_TCP_UDP_CHECKSUM		0x00020000
 250#define UDP_FRAME			0x00010000
 251#define MAC_HDR_EXTRA_4_BYTES		0x00008000
 252#define MAC_HDR_EXTRA_8_BYTES		0x00000200
 253
 254#define TX_IHL_SHIFT			11
 255
 256
 257/* global *******************************************************************/
 258struct mv643xx_eth_shared_private {
 259	/*
 260	 * Ethernet controller base address.
 261	 */
 262	void __iomem *base;
 263
 264	/*
 265	 * Points at the right SMI instance to use.
 266	 */
 267	struct mv643xx_eth_shared_private *smi;
 268
 269	/*
 270	 * Provides access to local SMI interface.
 271	 */
 272	struct mii_bus *smi_bus;
 273
 274	/*
 275	 * If we have access to the error interrupt pin (which is
 276	 * somewhat misnamed as it not only reflects internal errors
 277	 * but also reflects SMI completion), use that to wait for
 278	 * SMI access completion instead of polling the SMI busy bit.
 279	 */
 280	int err_interrupt;
 281	wait_queue_head_t smi_busy_wait;
 282
 283	/*
 284	 * Per-port MBUS window access register value.
 285	 */
 286	u32 win_protect;
 287
 288	/*
 289	 * Hardware-specific parameters.
 290	 */
 291	unsigned int t_clk;
 292	int extended_rx_coal_limit;
 293	int tx_bw_control;
 294	int tx_csum_limit;
 295};
 296
 297#define TX_BW_CONTROL_ABSENT		0
 298#define TX_BW_CONTROL_OLD_LAYOUT	1
 299#define TX_BW_CONTROL_NEW_LAYOUT	2
 300
 301static int mv643xx_eth_open(struct net_device *dev);
 302static int mv643xx_eth_stop(struct net_device *dev);
 303
 304
 305/* per-port *****************************************************************/
 306struct mib_counters {
 307	u64 good_octets_received;
 308	u32 bad_octets_received;
 309	u32 internal_mac_transmit_err;
 310	u32 good_frames_received;
 311	u32 bad_frames_received;
 312	u32 broadcast_frames_received;
 313	u32 multicast_frames_received;
 314	u32 frames_64_octets;
 315	u32 frames_65_to_127_octets;
 316	u32 frames_128_to_255_octets;
 317	u32 frames_256_to_511_octets;
 318	u32 frames_512_to_1023_octets;
 319	u32 frames_1024_to_max_octets;
 320	u64 good_octets_sent;
 321	u32 good_frames_sent;
 322	u32 excessive_collision;
 323	u32 multicast_frames_sent;
 324	u32 broadcast_frames_sent;
 325	u32 unrec_mac_control_received;
 326	u32 fc_sent;
 327	u32 good_fc_received;
 328	u32 bad_fc_received;
 329	u32 undersize_received;
 330	u32 fragments_received;
 331	u32 oversize_received;
 332	u32 jabber_received;
 333	u32 mac_receive_error;
 334	u32 bad_crc_event;
 335	u32 collision;
 336	u32 late_collision;
 337};
 338
 339struct lro_counters {
 340	u32 lro_aggregated;
 341	u32 lro_flushed;
 342	u32 lro_no_desc;
 343};
 344
 345struct rx_queue {
 346	int index;
 347
 348	int rx_ring_size;
 349
 350	int rx_desc_count;
 351	int rx_curr_desc;
 352	int rx_used_desc;
 353
 354	struct rx_desc *rx_desc_area;
 355	dma_addr_t rx_desc_dma;
 356	int rx_desc_area_size;
 357	struct sk_buff **rx_skb;
 358
 359	struct net_lro_mgr lro_mgr;
 360	struct net_lro_desc lro_arr[8];
 361};
 362
 363struct tx_queue {
 364	int index;
 365
 366	int tx_ring_size;
 367
 368	int tx_desc_count;
 369	int tx_curr_desc;
 370	int tx_used_desc;
 371
 372	struct tx_desc *tx_desc_area;
 373	dma_addr_t tx_desc_dma;
 374	int tx_desc_area_size;
 375
 376	struct sk_buff_head tx_skb;
 377
 378	unsigned long tx_packets;
 379	unsigned long tx_bytes;
 380	unsigned long tx_dropped;
 381};
 382
 383struct mv643xx_eth_private {
 384	struct mv643xx_eth_shared_private *shared;
 385	void __iomem *base;
 386	int port_num;
 387
 388	struct net_device *dev;
 389
 390	struct phy_device *phy;
 391
 392	struct timer_list mib_counters_timer;
 393	spinlock_t mib_counters_lock;
 394	struct mib_counters mib_counters;
 395
 396	struct lro_counters lro_counters;
 397
 398	struct work_struct tx_timeout_task;
 399
 400	struct napi_struct napi;
 401	u32 int_mask;
 402	u8 oom;
 403	u8 work_link;
 404	u8 work_tx;
 405	u8 work_tx_end;
 406	u8 work_rx;
 407	u8 work_rx_refill;
 408
 409	int skb_size;
 410	struct sk_buff_head rx_recycle;
 411
 412	/*
 413	 * RX state.
 414	 */
 415	int rx_ring_size;
 416	unsigned long rx_desc_sram_addr;
 417	int rx_desc_sram_size;
 418	int rxq_count;
 419	struct timer_list rx_oom;
 420	struct rx_queue rxq[8];
 421
 422	/*
 423	 * TX state.
 424	 */
 425	int tx_ring_size;
 426	unsigned long tx_desc_sram_addr;
 427	int tx_desc_sram_size;
 428	int txq_count;
 429	struct tx_queue txq[8];
 430};
 431
 432
 433/* port register accessors **************************************************/
 434static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
 435{
 436	return readl(mp->shared->base + offset);
 437}
 438
 439static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
 440{
 441	return readl(mp->base + offset);
 442}
 443
 444static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
 445{
 446	writel(data, mp->shared->base + offset);
 447}
 448
 449static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
 450{
 451	writel(data, mp->base + offset);
 452}
 453
 454
 455/* rxq/txq helper functions *************************************************/
 456static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
 457{
 458	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
 459}
 460
 461static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
 462{
 463	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
 464}
 465
 466static void rxq_enable(struct rx_queue *rxq)
 467{
 468	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 469	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
 470}
 471
 472static void rxq_disable(struct rx_queue *rxq)
 473{
 474	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 475	u8 mask = 1 << rxq->index;
 476
 477	wrlp(mp, RXQ_COMMAND, mask << 8);
 478	while (rdlp(mp, RXQ_COMMAND) & mask)
 479		udelay(10);
 480}
 481
 482static void txq_reset_hw_ptr(struct tx_queue *txq)
 483{
 484	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 485	u32 addr;
 486
 487	addr = (u32)txq->tx_desc_dma;
 488	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
 489	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
 490}
 491
 492static void txq_enable(struct tx_queue *txq)
 493{
 494	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 495	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
 496}
 497
 498static void txq_disable(struct tx_queue *txq)
 499{
 500	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 501	u8 mask = 1 << txq->index;
 502
 503	wrlp(mp, TXQ_COMMAND, mask << 8);
 504	while (rdlp(mp, TXQ_COMMAND) & mask)
 505		udelay(10);
 506}
 507
 508static void txq_maybe_wake(struct tx_queue *txq)
 509{
 510	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 511	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 512
 513	if (netif_tx_queue_stopped(nq)) {
 514		__netif_tx_lock(nq, smp_processor_id());
 515		if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
 516			netif_tx_wake_queue(nq);
 517		__netif_tx_unlock(nq);
 518	}
 519}
 520
 521
 522/* rx napi ******************************************************************/
 523static int
 524mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
 525		       u64 *hdr_flags, void *priv)
 526{
 527	unsigned long cmd_sts = (unsigned long)priv;
 528
 529	/*
 530	 * Make sure that this packet is Ethernet II, is not VLAN
 531	 * tagged, is IPv4, has a valid IP header, and is TCP.
 532	 */
 533	if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
 534		       RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
 535		       RX_PKT_IS_VLAN_TAGGED)) !=
 536	    (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
 537	     RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
 538		return -1;
 539
 540	skb_reset_network_header(skb);
 541	skb_set_transport_header(skb, ip_hdrlen(skb));
 542	*iphdr = ip_hdr(skb);
 543	*tcph = tcp_hdr(skb);
 544	*hdr_flags = LRO_IPV4 | LRO_TCP;
 545
 546	return 0;
 547}
 548
 549static int rxq_process(struct rx_queue *rxq, int budget)
 550{
 551	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 552	struct net_device_stats *stats = &mp->dev->stats;
 553	int lro_flush_needed;
 554	int rx;
 555
 556	lro_flush_needed = 0;
 557	rx = 0;
 558	while (rx < budget && rxq->rx_desc_count) {
 559		struct rx_desc *rx_desc;
 560		unsigned int cmd_sts;
 561		struct sk_buff *skb;
 562		u16 byte_cnt;
 563
 564		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
 565
 566		cmd_sts = rx_desc->cmd_sts;
 567		if (cmd_sts & BUFFER_OWNED_BY_DMA)
 568			break;
 569		rmb();
 570
 571		skb = rxq->rx_skb[rxq->rx_curr_desc];
 572		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
 573
 574		rxq->rx_curr_desc++;
 575		if (rxq->rx_curr_desc == rxq->rx_ring_size)
 576			rxq->rx_curr_desc = 0;
 577
 578		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
 579				 rx_desc->buf_size, DMA_FROM_DEVICE);
 580		rxq->rx_desc_count--;
 581		rx++;
 582
 583		mp->work_rx_refill |= 1 << rxq->index;
 584
 585		byte_cnt = rx_desc->byte_cnt;
 586
 587		/*
 588		 * Update statistics.
 589		 *
 590		 * Note that the descriptor byte count includes 2 dummy
 591		 * bytes automatically inserted by the hardware at the
 592		 * start of the packet (which we don't count), and a 4
 593		 * byte CRC at the end of the packet (which we do count).
 594		 */
 595		stats->rx_packets++;
 596		stats->rx_bytes += byte_cnt - 2;
 597
 598		/*
 599		 * In case we received a packet without first / last bits
 600		 * on, or the error summary bit is set, the packet needs
 601		 * to be dropped.
 602		 */
 603		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
 604			!= (RX_FIRST_DESC | RX_LAST_DESC))
 605			goto err;
 606
 607		/*
 608		 * The -4 is for the CRC in the trailer of the
 609		 * received packet
 610		 */
 611		skb_put(skb, byte_cnt - 2 - 4);
 612
 613		if (cmd_sts & LAYER_4_CHECKSUM_OK)
 614			skb->ip_summed = CHECKSUM_UNNECESSARY;
 615		skb->protocol = eth_type_trans(skb, mp->dev);
 616
 617		if (skb->dev->features & NETIF_F_LRO &&
 618		    skb->ip_summed == CHECKSUM_UNNECESSARY) {
 619			lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
 620			lro_flush_needed = 1;
 621		} else
 622			netif_receive_skb(skb);
 623
 624		continue;
 625
 626err:
 627		stats->rx_dropped++;
 628
 629		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
 630			(RX_FIRST_DESC | RX_LAST_DESC)) {
 631			if (net_ratelimit())
 632				netdev_err(mp->dev,
 633					   "received packet spanning multiple descriptors\n");
 634		}
 635
 636		if (cmd_sts & ERROR_SUMMARY)
 637			stats->rx_errors++;
 638
 639		dev_kfree_skb(skb);
 640	}
 641
 642	if (lro_flush_needed)
 643		lro_flush_all(&rxq->lro_mgr);
 644
 645	if (rx < budget)
 646		mp->work_rx &= ~(1 << rxq->index);
 647
 648	return rx;
 649}
 650
 651static int rxq_refill(struct rx_queue *rxq, int budget)
 652{
 653	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 654	int refilled;
 655
 656	refilled = 0;
 657	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
 658		struct sk_buff *skb;
 659		int rx;
 660		struct rx_desc *rx_desc;
 661		int size;
 662
 663		skb = __skb_dequeue(&mp->rx_recycle);
 664		if (skb == NULL)
 665			skb = dev_alloc_skb(mp->skb_size);
 666
 667		if (skb == NULL) {
 668			mp->oom = 1;
 669			goto oom;
 670		}
 671
 672		if (SKB_DMA_REALIGN)
 673			skb_reserve(skb, SKB_DMA_REALIGN);
 674
 675		refilled++;
 676		rxq->rx_desc_count++;
 677
 678		rx = rxq->rx_used_desc++;
 679		if (rxq->rx_used_desc == rxq->rx_ring_size)
 680			rxq->rx_used_desc = 0;
 681
 682		rx_desc = rxq->rx_desc_area + rx;
 683
 684		size = skb->end - skb->data;
 685		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
 686						  skb->data, size,
 687						  DMA_FROM_DEVICE);
 688		rx_desc->buf_size = size;
 689		rxq->rx_skb[rx] = skb;
 690		wmb();
 691		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
 692		wmb();
 693
 694		/*
 695		 * The hardware automatically prepends 2 bytes of
 696		 * dummy data to each received packet, so that the
 697		 * IP header ends up 16-byte aligned.
 698		 */
 699		skb_reserve(skb, 2);
 700	}
 701
 702	if (refilled < budget)
 703		mp->work_rx_refill &= ~(1 << rxq->index);
 704
 705oom:
 706	return refilled;
 707}
 708
 709
 710/* tx ***********************************************************************/
 711static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
 712{
 713	int frag;
 714
 715	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 716		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 717		if (fragp->size <= 8 && fragp->page_offset & 7)
 718			return 1;
 719	}
 720
 721	return 0;
 722}
 723
 724static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 725{
 726	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 727	int nr_frags = skb_shinfo(skb)->nr_frags;
 728	int frag;
 729
 730	for (frag = 0; frag < nr_frags; frag++) {
 731		skb_frag_t *this_frag;
 732		int tx_index;
 733		struct tx_desc *desc;
 734
 735		this_frag = &skb_shinfo(skb)->frags[frag];
 736		tx_index = txq->tx_curr_desc++;
 737		if (txq->tx_curr_desc == txq->tx_ring_size)
 738			txq->tx_curr_desc = 0;
 739		desc = &txq->tx_desc_area[tx_index];
 740
 741		/*
 742		 * The last fragment will generate an interrupt
 743		 * which will free the skb on TX completion.
 744		 */
 745		if (frag == nr_frags - 1) {
 746			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
 747					ZERO_PADDING | TX_LAST_DESC |
 748					TX_ENABLE_INTERRUPT;
 749		} else {
 750			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
 751		}
 752
 753		desc->l4i_chk = 0;
 754		desc->byte_cnt = this_frag->size;
 755		desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
 756					     this_frag->page,
 757					     this_frag->page_offset,
 758					     this_frag->size, DMA_TO_DEVICE);
 759	}
 760}
 761
 762static inline __be16 sum16_as_be(__sum16 sum)
 763{
 764	return (__force __be16)sum;
 765}
 766
 767static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
 768{
 769	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 770	int nr_frags = skb_shinfo(skb)->nr_frags;
 771	int tx_index;
 772	struct tx_desc *desc;
 773	u32 cmd_sts;
 774	u16 l4i_chk;
 775	int length;
 776
 777	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
 778	l4i_chk = 0;
 779
 780	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 781		int hdr_len;
 782		int tag_bytes;
 783
 784		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
 785		       skb->protocol != htons(ETH_P_8021Q));
 786
 787		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
 788		tag_bytes = hdr_len - ETH_HLEN;
 789		if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
 790		    unlikely(tag_bytes & ~12)) {
 791			if (skb_checksum_help(skb) == 0)
 792				goto no_csum;
 793			kfree_skb(skb);
 794			return 1;
 795		}
 796
 797		if (tag_bytes & 4)
 798			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
 799		if (tag_bytes & 8)
 800			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
 801
 802		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
 803			   GEN_IP_V4_CHECKSUM   |
 804			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
 805
 806		switch (ip_hdr(skb)->protocol) {
 807		case IPPROTO_UDP:
 808			cmd_sts |= UDP_FRAME;
 809			l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
 810			break;
 811		case IPPROTO_TCP:
 812			l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
 813			break;
 814		default:
 815			BUG();
 816		}
 817	} else {
 818no_csum:
 819		/* Errata BTS #50, IHL must be 5 if no HW checksum */
 820		cmd_sts |= 5 << TX_IHL_SHIFT;
 821	}
 822
 823	tx_index = txq->tx_curr_desc++;
 824	if (txq->tx_curr_desc == txq->tx_ring_size)
 825		txq->tx_curr_desc = 0;
 826	desc = &txq->tx_desc_area[tx_index];
 827
 828	if (nr_frags) {
 829		txq_submit_frag_skb(txq, skb);
 830		length = skb_headlen(skb);
 831	} else {
 832		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
 833		length = skb->len;
 834	}
 835
 836	desc->l4i_chk = l4i_chk;
 837	desc->byte_cnt = length;
 838	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
 839				       length, DMA_TO_DEVICE);
 840
 841	__skb_queue_tail(&txq->tx_skb, skb);
 842
 843	skb_tx_timestamp(skb);
 844
 845	/* ensure all other descriptors are written before first cmd_sts */
 846	wmb();
 847	desc->cmd_sts = cmd_sts;
 848
 849	/* clear TX_END status */
 850	mp->work_tx_end &= ~(1 << txq->index);
 851
 852	/* ensure all descriptors are written before poking hardware */
 853	wmb();
 854	txq_enable(txq);
 855
 856	txq->tx_desc_count += nr_frags + 1;
 857
 858	return 0;
 859}
 860
 861static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 862{
 863	struct mv643xx_eth_private *mp = netdev_priv(dev);
 864	int length, queue;
 865	struct tx_queue *txq;
 866	struct netdev_queue *nq;
 867
 868	queue = skb_get_queue_mapping(skb);
 869	txq = mp->txq + queue;
 870	nq = netdev_get_tx_queue(dev, queue);
 871
 872	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
 873		txq->tx_dropped++;
 874		netdev_printk(KERN_DEBUG, dev,
 875			      "failed to linearize skb with tiny unaligned fragment\n");
 876		return NETDEV_TX_BUSY;
 877	}
 878
 879	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
 880		if (net_ratelimit())
 881			netdev_err(dev, "tx queue full?!\n");
 882		kfree_skb(skb);
 883		return NETDEV_TX_OK;
 884	}
 885
 886	length = skb->len;
 887
 888	if (!txq_submit_skb(txq, skb)) {
 889		int entries_left;
 890
 891		txq->tx_bytes += length;
 892		txq->tx_packets++;
 893
 894		entries_left = txq->tx_ring_size - txq->tx_desc_count;
 895		if (entries_left < MAX_SKB_FRAGS + 1)
 896			netif_tx_stop_queue(nq);
 897	}
 898
 899	return NETDEV_TX_OK;
 900}
 901
 902
 903/* tx napi ******************************************************************/
 904static void txq_kick(struct tx_queue *txq)
 905{
 906	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 907	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 908	u32 hw_desc_ptr;
 909	u32 expected_ptr;
 910
 911	__netif_tx_lock(nq, smp_processor_id());
 912
 913	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
 914		goto out;
 915
 916	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
 917	expected_ptr = (u32)txq->tx_desc_dma +
 918				txq->tx_curr_desc * sizeof(struct tx_desc);
 919
 920	if (hw_desc_ptr != expected_ptr)
 921		txq_enable(txq);
 922
 923out:
 924	__netif_tx_unlock(nq);
 925
 926	mp->work_tx_end &= ~(1 << txq->index);
 927}
 928
 929static int txq_reclaim(struct tx_queue *txq, int budget, int force)
 930{
 931	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 932	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 933	int reclaimed;
 934
 935	__netif_tx_lock(nq, smp_processor_id());
 936
 937	reclaimed = 0;
 938	while (reclaimed < budget && txq->tx_desc_count > 0) {
 939		int tx_index;
 940		struct tx_desc *desc;
 941		u32 cmd_sts;
 942		struct sk_buff *skb;
 943
 944		tx_index = txq->tx_used_desc;
 945		desc = &txq->tx_desc_area[tx_index];
 946		cmd_sts = desc->cmd_sts;
 947
 948		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
 949			if (!force)
 950				break;
 951			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
 952		}
 953
 954		txq->tx_used_desc = tx_index + 1;
 955		if (txq->tx_used_desc == txq->tx_ring_size)
 956			txq->tx_used_desc = 0;
 957
 958		reclaimed++;
 959		txq->tx_desc_count--;
 960
 961		skb = NULL;
 962		if (cmd_sts & TX_LAST_DESC)
 963			skb = __skb_dequeue(&txq->tx_skb);
 964
 965		if (cmd_sts & ERROR_SUMMARY) {
 966			netdev_info(mp->dev, "tx error\n");
 967			mp->dev->stats.tx_errors++;
 968		}
 969
 970		if (cmd_sts & TX_FIRST_DESC) {
 971			dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
 972					 desc->byte_cnt, DMA_TO_DEVICE);
 973		} else {
 974			dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
 975				       desc->byte_cnt, DMA_TO_DEVICE);
 976		}
 977
 978		if (skb != NULL) {
 979			if (skb_queue_len(&mp->rx_recycle) <
 980					mp->rx_ring_size &&
 981			    skb_recycle_check(skb, mp->skb_size))
 982				__skb_queue_head(&mp->rx_recycle, skb);
 983			else
 984				dev_kfree_skb(skb);
 985		}
 986	}
 987
 988	__netif_tx_unlock(nq);
 989
 990	if (reclaimed < budget)
 991		mp->work_tx &= ~(1 << txq->index);
 992
 993	return reclaimed;
 994}
 995
 996
 997/* tx rate control **********************************************************/
 998/*
 999 * Set total maximum TX rate (shared by all TX queues for this port)
1000 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1001 */
1002static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1003{
1004	int token_rate;
1005	int mtu;
1006	int bucket_size;
1007
1008	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1009	if (token_rate > 1023)
1010		token_rate = 1023;
1011
1012	mtu = (mp->dev->mtu + 255) >> 8;
1013	if (mtu > 63)
1014		mtu = 63;
1015
1016	bucket_size = (burst + 255) >> 8;
1017	if (bucket_size > 65535)
1018		bucket_size = 65535;
1019
1020	switch (mp->shared->tx_bw_control) {
1021	case TX_BW_CONTROL_OLD_LAYOUT:
1022		wrlp(mp, TX_BW_RATE, token_rate);
1023		wrlp(mp, TX_BW_MTU, mtu);
1024		wrlp(mp, TX_BW_BURST, bucket_size);
1025		break;
1026	case TX_BW_CONTROL_NEW_LAYOUT:
1027		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1028		wrlp(mp, TX_BW_MTU_MOVED, mtu);
1029		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1030		break;
1031	}
1032}
1033
1034static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1035{
1036	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1037	int token_rate;
1038	int bucket_size;
1039
1040	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1041	if (token_rate > 1023)
1042		token_rate = 1023;
1043
1044	bucket_size = (burst + 255) >> 8;
1045	if (bucket_size > 65535)
1046		bucket_size = 65535;
1047
1048	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1049	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1050}
1051
1052static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1053{
1054	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1055	int off;
1056	u32 val;
1057
1058	/*
1059	 * Turn on fixed priority mode.
1060	 */
1061	off = 0;
1062	switch (mp->shared->tx_bw_control) {
1063	case TX_BW_CONTROL_OLD_LAYOUT:
1064		off = TXQ_FIX_PRIO_CONF;
1065		break;
1066	case TX_BW_CONTROL_NEW_LAYOUT:
1067		off = TXQ_FIX_PRIO_CONF_MOVED;
1068		break;
1069	}
1070
1071	if (off) {
1072		val = rdlp(mp, off);
1073		val |= 1 << txq->index;
1074		wrlp(mp, off, val);
1075	}
1076}
1077
1078
1079/* mii management interface *************************************************/
1080static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1081{
1082	struct mv643xx_eth_shared_private *msp = dev_id;
1083
1084	if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1085		writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1086		wake_up(&msp->smi_busy_wait);
1087		return IRQ_HANDLED;
1088	}
1089
1090	return IRQ_NONE;
1091}
1092
1093static int smi_is_done(struct mv643xx_eth_shared_private *msp)
1094{
1095	return !(readl(msp->base + SMI_REG) & SMI_BUSY);
1096}
1097
1098static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1099{
1100	if (msp->err_interrupt == NO_IRQ) {
1101		int i;
1102
1103		for (i = 0; !smi_is_done(msp); i++) {
1104			if (i == 10)
1105				return -ETIMEDOUT;
1106			msleep(10);
1107		}
1108
1109		return 0;
1110	}
1111
1112	if (!smi_is_done(msp)) {
1113		wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1114				   msecs_to_jiffies(100));
1115		if (!smi_is_done(msp))
1116			return -ETIMEDOUT;
1117	}
1118
1119	return 0;
1120}
1121
1122static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1123{
1124	struct mv643xx_eth_shared_private *msp = bus->priv;
1125	void __iomem *smi_reg = msp->base + SMI_REG;
1126	int ret;
1127
1128	if (smi_wait_ready(msp)) {
1129		pr_warn("SMI bus busy timeout\n");
1130		return -ETIMEDOUT;
1131	}
1132
1133	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1134
1135	if (smi_wait_ready(msp)) {
1136		pr_warn("SMI bus busy timeout\n");
1137		return -ETIMEDOUT;
1138	}
1139
1140	ret = readl(smi_reg);
1141	if (!(ret & SMI_READ_VALID)) {
1142		pr_warn("SMI bus read not valid\n");
1143		return -ENODEV;
1144	}
1145
1146	return ret & 0xffff;
1147}
1148
1149static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1150{
1151	struct mv643xx_eth_shared_private *msp = bus->priv;
1152	void __iomem *smi_reg = msp->base + SMI_REG;
1153
1154	if (smi_wait_ready(msp)) {
1155		pr_warn("SMI bus busy timeout\n");
1156		return -ETIMEDOUT;
1157	}
1158
1159	writel(SMI_OPCODE_WRITE | (reg << 21) |
1160		(addr << 16) | (val & 0xffff), smi_reg);
1161
1162	if (smi_wait_ready(msp)) {
1163		pr_warn("SMI bus busy timeout\n");
1164		return -ETIMEDOUT;
1165	}
1166
1167	return 0;
1168}
1169
1170
1171/* statistics ***************************************************************/
1172static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1173{
1174	struct mv643xx_eth_private *mp = netdev_priv(dev);
1175	struct net_device_stats *stats = &dev->stats;
1176	unsigned long tx_packets = 0;
1177	unsigned long tx_bytes = 0;
1178	unsigned long tx_dropped = 0;
1179	int i;
1180
1181	for (i = 0; i < mp->txq_count; i++) {
1182		struct tx_queue *txq = mp->txq + i;
1183
1184		tx_packets += txq->tx_packets;
1185		tx_bytes += txq->tx_bytes;
1186		tx_dropped += txq->tx_dropped;
1187	}
1188
1189	stats->tx_packets = tx_packets;
1190	stats->tx_bytes = tx_bytes;
1191	stats->tx_dropped = tx_dropped;
1192
1193	return stats;
1194}
1195
1196static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
1197{
1198	u32 lro_aggregated = 0;
1199	u32 lro_flushed = 0;
1200	u32 lro_no_desc = 0;
1201	int i;
1202
1203	for (i = 0; i < mp->rxq_count; i++) {
1204		struct rx_queue *rxq = mp->rxq + i;
1205
1206		lro_aggregated += rxq->lro_mgr.stats.aggregated;
1207		lro_flushed += rxq->lro_mgr.stats.flushed;
1208		lro_no_desc += rxq->lro_mgr.stats.no_desc;
1209	}
1210
1211	mp->lro_counters.lro_aggregated = lro_aggregated;
1212	mp->lro_counters.lro_flushed = lro_flushed;
1213	mp->lro_counters.lro_no_desc = lro_no_desc;
1214}
1215
1216static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1217{
1218	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1219}
1220
1221static void mib_counters_clear(struct mv643xx_eth_private *mp)
1222{
1223	int i;
1224
1225	for (i = 0; i < 0x80; i += 4)
1226		mib_read(mp, i);
1227}
1228
1229static void mib_counters_update(struct mv643xx_eth_private *mp)
1230{
1231	struct mib_counters *p = &mp->mib_counters;
1232
1233	spin_lock_bh(&mp->mib_counters_lock);
1234	p->good_octets_received += mib_read(mp, 0x00);
1235	p->bad_octets_received += mib_read(mp, 0x08);
1236	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1237	p->good_frames_received += mib_read(mp, 0x10);
1238	p->bad_frames_received += mib_read(mp, 0x14);
1239	p->broadcast_frames_received += mib_read(mp, 0x18);
1240	p->multicast_frames_received += mib_read(mp, 0x1c);
1241	p->frames_64_octets += mib_read(mp, 0x20);
1242	p->frames_65_to_127_octets += mib_read(mp, 0x24);
1243	p->frames_128_to_255_octets += mib_read(mp, 0x28);
1244	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1245	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1246	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1247	p->good_octets_sent += mib_read(mp, 0x38);
1248	p->good_frames_sent += mib_read(mp, 0x40);
1249	p->excessive_collision += mib_read(mp, 0x44);
1250	p->multicast_frames_sent += mib_read(mp, 0x48);
1251	p->broadcast_frames_sent += mib_read(mp, 0x4c);
1252	p->unrec_mac_control_received += mib_read(mp, 0x50);
1253	p->fc_sent += mib_read(mp, 0x54);
1254	p->good_fc_received += mib_read(mp, 0x58);
1255	p->bad_fc_received += mib_read(mp, 0x5c);
1256	p->undersize_received += mib_read(mp, 0x60);
1257	p->fragments_received += mib_read(mp, 0x64);
1258	p->oversize_received += mib_read(mp, 0x68);
1259	p->jabber_received += mib_read(mp, 0x6c);
1260	p->mac_receive_error += mib_read(mp, 0x70);
1261	p->bad_crc_event += mib_read(mp, 0x74);
1262	p->collision += mib_read(mp, 0x78);
1263	p->late_collision += mib_read(mp, 0x7c);
1264	spin_unlock_bh(&mp->mib_counters_lock);
1265
1266	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1267}
1268
1269static void mib_counters_timer_wrapper(unsigned long _mp)
1270{
1271	struct mv643xx_eth_private *mp = (void *)_mp;
1272
1273	mib_counters_update(mp);
1274}
1275
1276
1277/* interrupt coalescing *****************************************************/
1278/*
1279 * Hardware coalescing parameters are set in units of 64 t_clk
1280 * cycles.  I.e.:
1281 *
1282 *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1283 *
1284 *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
1285 *
1286 * In the ->set*() methods, we round the computed register value
1287 * to the nearest integer.
1288 */
1289static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1290{
1291	u32 val = rdlp(mp, SDMA_CONFIG);
1292	u64 temp;
1293
1294	if (mp->shared->extended_rx_coal_limit)
1295		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1296	else
1297		temp = (val & 0x003fff00) >> 8;
1298
1299	temp *= 64000000;
1300	do_div(temp, mp->shared->t_clk);
1301
1302	return (unsigned int)temp;
1303}
1304
1305static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1306{
1307	u64 temp;
1308	u32 val;
1309
1310	temp = (u64)usec * mp->shared->t_clk;
1311	temp += 31999999;
1312	do_div(temp, 64000000);
1313
1314	val = rdlp(mp, SDMA_CONFIG);
1315	if (mp->shared->extended_rx_coal_limit) {
1316		if (temp > 0xffff)
1317			temp = 0xffff;
1318		val &= ~0x023fff80;
1319		val |= (temp & 0x8000) << 10;
1320		val |= (temp & 0x7fff) << 7;
1321	} else {
1322		if (temp > 0x3fff)
1323			temp = 0x3fff;
1324		val &= ~0x003fff00;
1325		val |= (temp & 0x3fff) << 8;
1326	}
1327	wrlp(mp, SDMA_CONFIG, val);
1328}
1329
1330static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1331{
1332	u64 temp;
1333
1334	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1335	temp *= 64000000;
1336	do_div(temp, mp->shared->t_clk);
1337
1338	return (unsigned int)temp;
1339}
1340
1341static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1342{
1343	u64 temp;
1344
1345	temp = (u64)usec * mp->shared->t_clk;
1346	temp += 31999999;
1347	do_div(temp, 64000000);
1348
1349	if (temp > 0x3fff)
1350		temp = 0x3fff;
1351
1352	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1353}
1354
1355
1356/* ethtool ******************************************************************/
1357struct mv643xx_eth_stats {
1358	char stat_string[ETH_GSTRING_LEN];
1359	int sizeof_stat;
1360	int netdev_off;
1361	int mp_off;
1362};
1363
1364#define SSTAT(m)						\
1365	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
1366	  offsetof(struct net_device, stats.m), -1 }
1367
1368#define MIBSTAT(m)						\
1369	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
1370	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1371
1372#define LROSTAT(m)						\
1373	{ #m, FIELD_SIZEOF(struct lro_counters, m),		\
1374	  -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
1375
1376static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1377	SSTAT(rx_packets),
1378	SSTAT(tx_packets),
1379	SSTAT(rx_bytes),
1380	SSTAT(tx_bytes),
1381	SSTAT(rx_errors),
1382	SSTAT(tx_errors),
1383	SSTAT(rx_dropped),
1384	SSTAT(tx_dropped),
1385	MIBSTAT(good_octets_received),
1386	MIBSTAT(bad_octets_received),
1387	MIBSTAT(internal_mac_transmit_err),
1388	MIBSTAT(good_frames_received),
1389	MIBSTAT(bad_frames_received),
1390	MIBSTAT(broadcast_frames_received),
1391	MIBSTAT(multicast_frames_received),
1392	MIBSTAT(frames_64_octets),
1393	MIBSTAT(frames_65_to_127_octets),
1394	MIBSTAT(frames_128_to_255_octets),
1395	MIBSTAT(frames_256_to_511_octets),
1396	MIBSTAT(frames_512_to_1023_octets),
1397	MIBSTAT(frames_1024_to_max_octets),
1398	MIBSTAT(good_octets_sent),
1399	MIBSTAT(good_frames_sent),
1400	MIBSTAT(excessive_collision),
1401	MIBSTAT(multicast_frames_sent),
1402	MIBSTAT(broadcast_frames_sent),
1403	MIBSTAT(unrec_mac_control_received),
1404	MIBSTAT(fc_sent),
1405	MIBSTAT(good_fc_received),
1406	MIBSTAT(bad_fc_received),
1407	MIBSTAT(undersize_received),
1408	MIBSTAT(fragments_received),
1409	MIBSTAT(oversize_received),
1410	MIBSTAT(jabber_received),
1411	MIBSTAT(mac_receive_error),
1412	MIBSTAT(bad_crc_event),
1413	MIBSTAT(collision),
1414	MIBSTAT(late_collision),
1415	LROSTAT(lro_aggregated),
1416	LROSTAT(lro_flushed),
1417	LROSTAT(lro_no_desc),
1418};
1419
1420static int
1421mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1422			     struct ethtool_cmd *cmd)
1423{
1424	int err;
1425
1426	err = phy_read_status(mp->phy);
1427	if (err == 0)
1428		err = phy_ethtool_gset(mp->phy, cmd);
1429
1430	/*
1431	 * The MAC does not support 1000baseT_Half.
1432	 */
1433	cmd->supported &= ~SUPPORTED_1000baseT_Half;
1434	cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1435
1436	return err;
1437}
1438
1439static int
1440mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1441				 struct ethtool_cmd *cmd)
1442{
1443	u32 port_status;
1444
1445	port_status = rdlp(mp, PORT_STATUS);
1446
1447	cmd->supported = SUPPORTED_MII;
1448	cmd->advertising = ADVERTISED_MII;
1449	switch (port_status & PORT_SPEED_MASK) {
1450	case PORT_SPEED_10:
1451		ethtool_cmd_speed_set(cmd, SPEED_10);
1452		break;
1453	case PORT_SPEED_100:
1454		ethtool_cmd_speed_set(cmd, SPEED_100);
1455		break;
1456	case PORT_SPEED_1000:
1457		ethtool_cmd_speed_set(cmd, SPEED_1000);
1458		break;
1459	default:
1460		cmd->speed = -1;
1461		break;
1462	}
1463	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1464	cmd->port = PORT_MII;
1465	cmd->phy_address = 0;
1466	cmd->transceiver = XCVR_INTERNAL;
1467	cmd->autoneg = AUTONEG_DISABLE;
1468	cmd->maxtxpkt = 1;
1469	cmd->maxrxpkt = 1;
1470
1471	return 0;
1472}
1473
1474static int
1475mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1476{
1477	struct mv643xx_eth_private *mp = netdev_priv(dev);
1478
1479	if (mp->phy != NULL)
1480		return mv643xx_eth_get_settings_phy(mp, cmd);
1481	else
1482		return mv643xx_eth_get_settings_phyless(mp, cmd);
1483}
1484
1485static int
1486mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1487{
1488	struct mv643xx_eth_private *mp = netdev_priv(dev);
1489
1490	if (mp->phy == NULL)
1491		return -EINVAL;
1492
1493	/*
1494	 * The MAC does not support 1000baseT_Half.
1495	 */
1496	cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1497
1498	return phy_ethtool_sset(mp->phy, cmd);
1499}
1500
1501static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1502				    struct ethtool_drvinfo *drvinfo)
1503{
1504	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
1505	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1506	strncpy(drvinfo->fw_version, "N/A", 32);
1507	strncpy(drvinfo->bus_info, "platform", 32);
1508	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1509}
1510
1511static int mv643xx_eth_nway_reset(struct net_device *dev)
1512{
1513	struct mv643xx_eth_private *mp = netdev_priv(dev);
1514
1515	if (mp->phy == NULL)
1516		return -EINVAL;
1517
1518	return genphy_restart_aneg(mp->phy);
1519}
1520
1521static int
1522mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1523{
1524	struct mv643xx_eth_private *mp = netdev_priv(dev);
1525
1526	ec->rx_coalesce_usecs = get_rx_coal(mp);
1527	ec->tx_coalesce_usecs = get_tx_coal(mp);
1528
1529	return 0;
1530}
1531
1532static int
1533mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1534{
1535	struct mv643xx_eth_private *mp = netdev_priv(dev);
1536
1537	set_rx_coal(mp, ec->rx_coalesce_usecs);
1538	set_tx_coal(mp, ec->tx_coalesce_usecs);
1539
1540	return 0;
1541}
1542
1543static void
1544mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1545{
1546	struct mv643xx_eth_private *mp = netdev_priv(dev);
1547
1548	er->rx_max_pending = 4096;
1549	er->tx_max_pending = 4096;
1550	er->rx_mini_max_pending = 0;
1551	er->rx_jumbo_max_pending = 0;
1552
1553	er->rx_pending = mp->rx_ring_size;
1554	er->tx_pending = mp->tx_ring_size;
1555	er->rx_mini_pending = 0;
1556	er->rx_jumbo_pending = 0;
1557}
1558
1559static int
1560mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1561{
1562	struct mv643xx_eth_private *mp = netdev_priv(dev);
1563
1564	if (er->rx_mini_pending || er->rx_jumbo_pending)
1565		return -EINVAL;
1566
1567	mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1568	mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
1569
1570	if (netif_running(dev)) {
1571		mv643xx_eth_stop(dev);
1572		if (mv643xx_eth_open(dev)) {
1573			netdev_err(dev,
1574				   "fatal error on re-opening device after ring param change\n");
1575			return -ENOMEM;
1576		}
1577	}
1578
1579	return 0;
1580}
1581
1582
1583static int
1584mv643xx_eth_set_features(struct net_device *dev, u32 features)
1585{
1586	struct mv643xx_eth_private *mp = netdev_priv(dev);
1587	u32 rx_csum = features & NETIF_F_RXCSUM;
1588
1589	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1590
1591	return 0;
1592}
1593
1594static void mv643xx_eth_get_strings(struct net_device *dev,
1595				    uint32_t stringset, uint8_t *data)
1596{
1597	int i;
1598
1599	if (stringset == ETH_SS_STATS) {
1600		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1601			memcpy(data + i * ETH_GSTRING_LEN,
1602				mv643xx_eth_stats[i].stat_string,
1603				ETH_GSTRING_LEN);
1604		}
1605	}
1606}
1607
1608static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1609					  struct ethtool_stats *stats,
1610					  uint64_t *data)
1611{
1612	struct mv643xx_eth_private *mp = netdev_priv(dev);
1613	int i;
1614
1615	mv643xx_eth_get_stats(dev);
1616	mib_counters_update(mp);
1617	mv643xx_eth_grab_lro_stats(mp);
1618
1619	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1620		const struct mv643xx_eth_stats *stat;
1621		void *p;
1622
1623		stat = mv643xx_eth_stats + i;
1624
1625		if (stat->netdev_off >= 0)
1626			p = ((void *)mp->dev) + stat->netdev_off;
1627		else
1628			p = ((void *)mp) + stat->mp_off;
1629
1630		data[i] = (stat->sizeof_stat == 8) ?
1631				*(uint64_t *)p : *(uint32_t *)p;
1632	}
1633}
1634
1635static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1636{
1637	if (sset == ETH_SS_STATS)
1638		return ARRAY_SIZE(mv643xx_eth_stats);
1639
1640	return -EOPNOTSUPP;
1641}
1642
1643static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1644	.get_settings		= mv643xx_eth_get_settings,
1645	.set_settings		= mv643xx_eth_set_settings,
1646	.get_drvinfo		= mv643xx_eth_get_drvinfo,
1647	.nway_reset		= mv643xx_eth_nway_reset,
1648	.get_link		= ethtool_op_get_link,
1649	.get_coalesce		= mv643xx_eth_get_coalesce,
1650	.set_coalesce		= mv643xx_eth_set_coalesce,
1651	.get_ringparam		= mv643xx_eth_get_ringparam,
1652	.set_ringparam		= mv643xx_eth_set_ringparam,
1653	.get_strings		= mv643xx_eth_get_strings,
1654	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1655	.get_sset_count		= mv643xx_eth_get_sset_count,
1656};
1657
1658
1659/* address handling *********************************************************/
1660static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1661{
1662	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1663	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1664
1665	addr[0] = (mac_h >> 24) & 0xff;
1666	addr[1] = (mac_h >> 16) & 0xff;
1667	addr[2] = (mac_h >> 8) & 0xff;
1668	addr[3] = mac_h & 0xff;
1669	addr[4] = (mac_l >> 8) & 0xff;
1670	addr[5] = mac_l & 0xff;
1671}
1672
1673static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1674{
1675	wrlp(mp, MAC_ADDR_HIGH,
1676		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1677	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1678}
1679
1680static u32 uc_addr_filter_mask(struct net_device *dev)
1681{
1682	struct netdev_hw_addr *ha;
1683	u32 nibbles;
1684
1685	if (dev->flags & IFF_PROMISC)
1686		return 0;
1687
1688	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1689	netdev_for_each_uc_addr(ha, dev) {
1690		if (memcmp(dev->dev_addr, ha->addr, 5))
1691			return 0;
1692		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1693			return 0;
1694
1695		nibbles |= 1 << (ha->addr[5] & 0x0f);
1696	}
1697
1698	return nibbles;
1699}
1700
1701static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1702{
1703	struct mv643xx_eth_private *mp = netdev_priv(dev);
1704	u32 port_config;
1705	u32 nibbles;
1706	int i;
1707
1708	uc_addr_set(mp, dev->dev_addr);
1709
1710	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1711
1712	nibbles = uc_addr_filter_mask(dev);
1713	if (!nibbles) {
1714		port_config |= UNICAST_PROMISCUOUS_MODE;
1715		nibbles = 0xffff;
1716	}
1717
1718	for (i = 0; i < 16; i += 4) {
1719		int off = UNICAST_TABLE(mp->port_num) + i;
1720		u32 v;
1721
1722		v = 0;
1723		if (nibbles & 1)
1724			v |= 0x00000001;
1725		if (nibbles & 2)
1726			v |= 0x00000100;
1727		if (nibbles & 4)
1728			v |= 0x00010000;
1729		if (nibbles & 8)
1730			v |= 0x01000000;
1731		nibbles >>= 4;
1732
1733		wrl(mp, off, v);
1734	}
1735
1736	wrlp(mp, PORT_CONFIG, port_config);
1737}
1738
1739static int addr_crc(unsigned char *addr)
1740{
1741	int crc = 0;
1742	int i;
1743
1744	for (i = 0; i < 6; i++) {
1745		int j;
1746
1747		crc = (crc ^ addr[i]) << 8;
1748		for (j = 7; j >= 0; j--) {
1749			if (crc & (0x100 << j))
1750				crc ^= 0x107 << j;
1751		}
1752	}
1753
1754	return crc;
1755}
1756
1757static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1758{
1759	struct mv643xx_eth_private *mp = netdev_priv(dev);
1760	u32 *mc_spec;
1761	u32 *mc_other;
1762	struct netdev_hw_addr *ha;
1763	int i;
1764
1765	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1766		int port_num;
1767		u32 accept;
1768
1769oom:
1770		port_num = mp->port_num;
1771		accept = 0x01010101;
1772		for (i = 0; i < 0x100; i += 4) {
1773			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1774			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1775		}
1776		return;
1777	}
1778
1779	mc_spec = kmalloc(0x200, GFP_ATOMIC);
1780	if (mc_spec == NULL)
1781		goto oom;
1782	mc_other = mc_spec + (0x100 >> 2);
1783
1784	memset(mc_spec, 0, 0x100);
1785	memset(mc_other, 0, 0x100);
1786
1787	netdev_for_each_mc_addr(ha, dev) {
1788		u8 *a = ha->addr;
1789		u32 *table;
1790		int entry;
1791
1792		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1793			table = mc_spec;
1794			entry = a[5];
1795		} else {
1796			table = mc_other;
1797			entry = addr_crc(a);
1798		}
1799
1800		table[entry >> 2] |= 1 << (8 * (entry & 3));
1801	}
1802
1803	for (i = 0; i < 0x100; i += 4) {
1804		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1805		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1806	}
1807
1808	kfree(mc_spec);
1809}
1810
1811static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1812{
1813	mv643xx_eth_program_unicast_filter(dev);
1814	mv643xx_eth_program_multicast_filter(dev);
1815}
1816
1817static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1818{
1819	struct sockaddr *sa = addr;
1820
1821	if (!is_valid_ether_addr(sa->sa_data))
1822		return -EINVAL;
1823
1824	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1825
1826	netif_addr_lock_bh(dev);
1827	mv643xx_eth_program_unicast_filter(dev);
1828	netif_addr_unlock_bh(dev);
1829
1830	return 0;
1831}
1832
1833
1834/* rx/tx queue initialisation ***********************************************/
1835static int rxq_init(struct mv643xx_eth_private *mp, int index)
1836{
1837	struct rx_queue *rxq = mp->rxq + index;
1838	struct rx_desc *rx_desc;
1839	int size;
1840	int i;
1841
1842	rxq->index = index;
1843
1844	rxq->rx_ring_size = mp->rx_ring_size;
1845
1846	rxq->rx_desc_count = 0;
1847	rxq->rx_curr_desc = 0;
1848	rxq->rx_used_desc = 0;
1849
1850	size = rxq->rx_ring_size * sizeof(struct rx_desc);
1851
1852	if (index == 0 && size <= mp->rx_desc_sram_size) {
1853		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1854						mp->rx_desc_sram_size);
1855		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1856	} else {
1857		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1858						       size, &rxq->rx_desc_dma,
1859						       GFP_KERNEL);
1860	}
1861
1862	if (rxq->rx_desc_area == NULL) {
1863		netdev_err(mp->dev,
1864			   "can't allocate rx ring (%d bytes)\n", size);
1865		goto out;
1866	}
1867	memset(rxq->rx_desc_area, 0, size);
1868
1869	rxq->rx_desc_area_size = size;
1870	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1871								GFP_KERNEL);
1872	if (rxq->rx_skb == NULL) {
1873		netdev_err(mp->dev, "can't allocate rx skb ring\n");
1874		goto out_free;
1875	}
1876
1877	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1878	for (i = 0; i < rxq->rx_ring_size; i++) {
1879		int nexti;
1880
1881		nexti = i + 1;
1882		if (nexti == rxq->rx_ring_size)
1883			nexti = 0;
1884
1885		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1886					nexti * sizeof(struct rx_desc);
1887	}
1888
1889	rxq->lro_mgr.dev = mp->dev;
1890	memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
1891	rxq->lro_mgr.features = LRO_F_NAPI;
1892	rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1893	rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1894	rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
1895	rxq->lro_mgr.max_aggr = 32;
1896	rxq->lro_mgr.frag_align_pad = 0;
1897	rxq->lro_mgr.lro_arr = rxq->lro_arr;
1898	rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
1899
1900	memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
1901
1902	return 0;
1903
1904
1905out_free:
1906	if (index == 0 && size <= mp->rx_desc_sram_size)
1907		iounmap(rxq->rx_desc_area);
1908	else
1909		dma_free_coherent(mp->dev->dev.parent, size,
1910				  rxq->rx_desc_area,
1911				  rxq->rx_desc_dma);
1912
1913out:
1914	return -ENOMEM;
1915}
1916
1917static void rxq_deinit(struct rx_queue *rxq)
1918{
1919	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1920	int i;
1921
1922	rxq_disable(rxq);
1923
1924	for (i = 0; i < rxq->rx_ring_size; i++) {
1925		if (rxq->rx_skb[i]) {
1926			dev_kfree_skb(rxq->rx_skb[i]);
1927			rxq->rx_desc_count--;
1928		}
1929	}
1930
1931	if (rxq->rx_desc_count) {
1932		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1933			   rxq->rx_desc_count);
1934	}
1935
1936	if (rxq->index == 0 &&
1937	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1938		iounmap(rxq->rx_desc_area);
1939	else
1940		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1941				  rxq->rx_desc_area, rxq->rx_desc_dma);
1942
1943	kfree(rxq->rx_skb);
1944}
1945
1946static int txq_init(struct mv643xx_eth_private *mp, int index)
1947{
1948	struct tx_queue *txq = mp->txq + index;
1949	struct tx_desc *tx_desc;
1950	int size;
1951	int i;
1952
1953	txq->index = index;
1954
1955	txq->tx_ring_size = mp->tx_ring_size;
1956
1957	txq->tx_desc_count = 0;
1958	txq->tx_curr_desc = 0;
1959	txq->tx_used_desc = 0;
1960
1961	size = txq->tx_ring_size * sizeof(struct tx_desc);
1962
1963	if (index == 0 && size <= mp->tx_desc_sram_size) {
1964		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1965						mp->tx_desc_sram_size);
1966		txq->tx_desc_dma = mp->tx_desc_sram_addr;
1967	} else {
1968		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1969						       size, &txq->tx_desc_dma,
1970						       GFP_KERNEL);
1971	}
1972
1973	if (txq->tx_desc_area == NULL) {
1974		netdev_err(mp->dev,
1975			   "can't allocate tx ring (%d bytes)\n", size);
1976		return -ENOMEM;
1977	}
1978	memset(txq->tx_desc_area, 0, size);
1979
1980	txq->tx_desc_area_size = size;
1981
1982	tx_desc = (struct tx_desc *)txq->tx_desc_area;
1983	for (i = 0; i < txq->tx_ring_size; i++) {
1984		struct tx_desc *txd = tx_desc + i;
1985		int nexti;
1986
1987		nexti = i + 1;
1988		if (nexti == txq->tx_ring_size)
1989			nexti = 0;
1990
1991		txd->cmd_sts = 0;
1992		txd->next_desc_ptr = txq->tx_desc_dma +
1993					nexti * sizeof(struct tx_desc);
1994	}
1995
1996	skb_queue_head_init(&txq->tx_skb);
1997
1998	return 0;
1999}
2000
2001static void txq_deinit(struct tx_queue *txq)
2002{
2003	struct mv643xx_eth_private *mp = txq_to_mp(txq);
2004
2005	txq_disable(txq);
2006	txq_reclaim(txq, txq->tx_ring_size, 1);
2007
2008	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2009
2010	if (txq->index == 0 &&
2011	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2012		iounmap(txq->tx_desc_area);
2013	else
2014		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2015				  txq->tx_desc_area, txq->tx_desc_dma);
2016}
2017
2018
2019/* netdev ops and related ***************************************************/
2020static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2021{
2022	u32 int_cause;
2023	u32 int_cause_ext;
2024
2025	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2026	if (int_cause == 0)
2027		return 0;
2028
2029	int_cause_ext = 0;
2030	if (int_cause & INT_EXT) {
2031		int_cause &= ~INT_EXT;
2032		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2033	}
2034
2035	if (int_cause) {
2036		wrlp(mp, INT_CAUSE, ~int_cause);
2037		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2038				~(rdlp(mp, TXQ_COMMAND) & 0xff);
2039		mp->work_rx |= (int_cause & INT_RX) >> 2;
2040	}
2041
2042	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2043	if (int_cause_ext) {
2044		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2045		if (int_cause_ext & INT_EXT_LINK_PHY)
2046			mp->work_link = 1;
2047		mp->work_tx |= int_cause_ext & INT_EXT_TX;
2048	}
2049
2050	return 1;
2051}
2052
2053static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2054{
2055	struct net_device *dev = (struct net_device *)dev_id;
2056	struct mv643xx_eth_private *mp = netdev_priv(dev);
2057
2058	if (unlikely(!mv643xx_eth_collect_events(mp)))
2059		return IRQ_NONE;
2060
2061	wrlp(mp, INT_MASK, 0);
2062	napi_schedule(&mp->napi);
2063
2064	return IRQ_HANDLED;
2065}
2066
2067static void handle_link_event(struct mv643xx_eth_private *mp)
2068{
2069	struct net_device *dev = mp->dev;
2070	u32 port_status;
2071	int speed;
2072	int duplex;
2073	int fc;
2074
2075	port_status = rdlp(mp, PORT_STATUS);
2076	if (!(port_status & LINK_UP)) {
2077		if (netif_carrier_ok(dev)) {
2078			int i;
2079
2080			netdev_info(dev, "link down\n");
2081
2082			netif_carrier_off(dev);
2083
2084			for (i = 0; i < mp->txq_count; i++) {
2085				struct tx_queue *txq = mp->txq + i;
2086
2087				txq_reclaim(txq, txq->tx_ring_size, 1);
2088				txq_reset_hw_ptr(txq);
2089			}
2090		}
2091		return;
2092	}
2093
2094	switch (port_status & PORT_SPEED_MASK) {
2095	case PORT_SPEED_10:
2096		speed = 10;
2097		break;
2098	case PORT_SPEED_100:
2099		speed = 100;
2100		break;
2101	case PORT_SPEED_1000:
2102		speed = 1000;
2103		break;
2104	default:
2105		speed = -1;
2106		break;
2107	}
2108	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2109	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2110
2111	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2112		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
2113
2114	if (!netif_carrier_ok(dev))
2115		netif_carrier_on(dev);
2116}
2117
2118static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2119{
2120	struct mv643xx_eth_private *mp;
2121	int work_done;
2122
2123	mp = container_of(napi, struct mv643xx_eth_private, napi);
2124
2125	if (unlikely(mp->oom)) {
2126		mp->oom = 0;
2127		del_timer(&mp->rx_oom);
2128	}
2129
2130	work_done = 0;
2131	while (work_done < budget) {
2132		u8 queue_mask;
2133		int queue;
2134		int work_tbd;
2135
2136		if (mp->work_link) {
2137			mp->work_link = 0;
2138			handle_link_event(mp);
2139			work_done++;
2140			continue;
2141		}
2142
2143		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2144		if (likely(!mp->oom))
2145			queue_mask |= mp->work_rx_refill;
2146
2147		if (!queue_mask) {
2148			if (mv643xx_eth_collect_events(mp))
2149				continue;
2150			break;
2151		}
2152
2153		queue = fls(queue_mask) - 1;
2154		queue_mask = 1 << queue;
2155
2156		work_tbd = budget - work_done;
2157		if (work_tbd > 16)
2158			work_tbd = 16;
2159
2160		if (mp->work_tx_end & queue_mask) {
2161			txq_kick(mp->txq + queue);
2162		} else if (mp->work_tx & queue_mask) {
2163			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2164			txq_maybe_wake(mp->txq + queue);
2165		} else if (mp->work_rx & queue_mask) {
2166			work_done += rxq_process(mp->rxq + queue, work_tbd);
2167		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2168			work_done += rxq_refill(mp->rxq + queue, work_tbd);
2169		} else {
2170			BUG();
2171		}
2172	}
2173
2174	if (work_done < budget) {
2175		if (mp->oom)
2176			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2177		napi_complete(napi);
2178		wrlp(mp, INT_MASK, mp->int_mask);
2179	}
2180
2181	return work_done;
2182}
2183
2184static inline void oom_timer_wrapper(unsigned long data)
2185{
2186	struct mv643xx_eth_private *mp = (void *)data;
2187
2188	napi_schedule(&mp->napi);
2189}
2190
2191static void phy_reset(struct mv643xx_eth_private *mp)
2192{
2193	int data;
2194
2195	data = phy_read(mp->phy, MII_BMCR);
2196	if (data < 0)
2197		return;
2198
2199	data |= BMCR_RESET;
2200	if (phy_write(mp->phy, MII_BMCR, data) < 0)
2201		return;
2202
2203	do {
2204		data = phy_read(mp->phy, MII_BMCR);
2205	} while (data >= 0 && data & BMCR_RESET);
2206}
2207
2208static void port_start(struct mv643xx_eth_private *mp)
2209{
2210	u32 pscr;
2211	int i;
2212
2213	/*
2214	 * Perform PHY reset, if there is a PHY.
2215	 */
2216	if (mp->phy != NULL) {
2217		struct ethtool_cmd cmd;
2218
2219		mv643xx_eth_get_settings(mp->dev, &cmd);
2220		phy_reset(mp);
2221		mv643xx_eth_set_settings(mp->dev, &cmd);
2222	}
2223
2224	/*
2225	 * Configure basic link parameters.
2226	 */
2227	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2228
2229	pscr |= SERIAL_PORT_ENABLE;
2230	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2231
2232	pscr |= DO_NOT_FORCE_LINK_FAIL;
2233	if (mp->phy == NULL)
2234		pscr |= FORCE_LINK_PASS;
2235	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2236
2237	/*
2238	 * Configure TX path and queues.
2239	 */
2240	tx_set_rate(mp, 1000000000, 16777216);
2241	for (i = 0; i < mp->txq_count; i++) {
2242		struct tx_queue *txq = mp->txq + i;
2243
2244		txq_reset_hw_ptr(txq);
2245		txq_set_rate(txq, 1000000000, 16777216);
2246		txq_set_fixed_prio_mode(txq);
2247	}
2248
2249	/*
2250	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2251	 * frames to RX queue #0, and include the pseudo-header when
2252	 * calculating receive checksums.
2253	 */
2254	mv643xx_eth_set_features(mp->dev, mp->dev->features);
2255
2256	/*
2257	 * Treat BPDUs as normal multicasts, and disable partition mode.
2258	 */
2259	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2260
2261	/*
2262	 * Add configured unicast addresses to address filter table.
2263	 */
2264	mv643xx_eth_program_unicast_filter(mp->dev);
2265
2266	/*
2267	 * Enable the receive queues.
2268	 */
2269	for (i = 0; i < mp->rxq_count; i++) {
2270		struct rx_queue *rxq = mp->rxq + i;
2271		u32 addr;
2272
2273		addr = (u32)rxq->rx_desc_dma;
2274		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2275		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2276
2277		rxq_enable(rxq);
2278	}
2279}
2280
2281static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2282{
2283	int skb_size;
2284
2285	/*
2286	 * Reserve 2+14 bytes for an ethernet header (the hardware
2287	 * automatically prepends 2 bytes of dummy data to each
2288	 * received packet), 16 bytes for up to four VLAN tags, and
2289	 * 4 bytes for the trailing FCS -- 36 bytes total.
2290	 */
2291	skb_size = mp->dev->mtu + 36;
2292
2293	/*
2294	 * Make sure that the skb size is a multiple of 8 bytes, as
2295	 * the lower three bits of the receive descriptor's buffer
2296	 * size field are ignored by the hardware.
2297	 */
2298	mp->skb_size = (skb_size + 7) & ~7;
2299
2300	/*
2301	 * If NET_SKB_PAD is smaller than a cache line,
2302	 * netdev_alloc_skb() will cause skb->data to be misaligned
2303	 * to a cache line boundary.  If this is the case, include
2304	 * some extra space to allow re-aligning the data area.
2305	 */
2306	mp->skb_size += SKB_DMA_REALIGN;
2307}
2308
2309static int mv643xx_eth_open(struct net_device *dev)
2310{
2311	struct mv643xx_eth_private *mp = netdev_priv(dev);
2312	int err;
2313	int i;
2314
2315	wrlp(mp, INT_CAUSE, 0);
2316	wrlp(mp, INT_CAUSE_EXT, 0);
2317	rdlp(mp, INT_CAUSE_EXT);
2318
2319	err = request_irq(dev->irq, mv643xx_eth_irq,
2320			  IRQF_SHARED, dev->name, dev);
2321	if (err) {
2322		netdev_err(dev, "can't assign irq\n");
2323		return -EAGAIN;
2324	}
2325
2326	mv643xx_eth_recalc_skb_size(mp);
2327
2328	napi_enable(&mp->napi);
2329
2330	skb_queue_head_init(&mp->rx_recycle);
2331
2332	mp->int_mask = INT_EXT;
2333
2334	for (i = 0; i < mp->rxq_count; i++) {
2335		err = rxq_init(mp, i);
2336		if (err) {
2337			while (--i >= 0)
2338				rxq_deinit(mp->rxq + i);
2339			goto out;
2340		}
2341
2342		rxq_refill(mp->rxq + i, INT_MAX);
2343		mp->int_mask |= INT_RX_0 << i;
2344	}
2345
2346	if (mp->oom) {
2347		mp->rx_oom.expires = jiffies + (HZ / 10);
2348		add_timer(&mp->rx_oom);
2349	}
2350
2351	for (i = 0; i < mp->txq_count; i++) {
2352		err = txq_init(mp, i);
2353		if (err) {
2354			while (--i >= 0)
2355				txq_deinit(mp->txq + i);
2356			goto out_free;
2357		}
2358		mp->int_mask |= INT_TX_END_0 << i;
2359	}
2360
2361	port_start(mp);
2362
2363	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2364	wrlp(mp, INT_MASK, mp->int_mask);
2365
2366	return 0;
2367
2368
2369out_free:
2370	for (i = 0; i < mp->rxq_count; i++)
2371		rxq_deinit(mp->rxq + i);
2372out:
2373	free_irq(dev->irq, dev);
2374
2375	return err;
2376}
2377
2378static void port_reset(struct mv643xx_eth_private *mp)
2379{
2380	unsigned int data;
2381	int i;
2382
2383	for (i = 0; i < mp->rxq_count; i++)
2384		rxq_disable(mp->rxq + i);
2385	for (i = 0; i < mp->txq_count; i++)
2386		txq_disable(mp->txq + i);
2387
2388	while (1) {
2389		u32 ps = rdlp(mp, PORT_STATUS);
2390
2391		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2392			break;
2393		udelay(10);
2394	}
2395
2396	/* Reset the Enable bit in the Configuration Register */
2397	data = rdlp(mp, PORT_SERIAL_CONTROL);
2398	data &= ~(SERIAL_PORT_ENABLE		|
2399		  DO_NOT_FORCE_LINK_FAIL	|
2400		  FORCE_LINK_PASS);
2401	wrlp(mp, PORT_SERIAL_CONTROL, data);
2402}
2403
2404static int mv643xx_eth_stop(struct net_device *dev)
2405{
2406	struct mv643xx_eth_private *mp = netdev_priv(dev);
2407	int i;
2408
2409	wrlp(mp, INT_MASK_EXT, 0x00000000);
2410	wrlp(mp, INT_MASK, 0x00000000);
2411	rdlp(mp, INT_MASK);
2412
2413	napi_disable(&mp->napi);
2414
2415	del_timer_sync(&mp->rx_oom);
2416
2417	netif_carrier_off(dev);
2418
2419	free_irq(dev->irq, dev);
2420
2421	port_reset(mp);
2422	mv643xx_eth_get_stats(dev);
2423	mib_counters_update(mp);
2424	del_timer_sync(&mp->mib_counters_timer);
2425
2426	skb_queue_purge(&mp->rx_recycle);
2427
2428	for (i = 0; i < mp->rxq_count; i++)
2429		rxq_deinit(mp->rxq + i);
2430	for (i = 0; i < mp->txq_count; i++)
2431		txq_deinit(mp->txq + i);
2432
2433	return 0;
2434}
2435
2436static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2437{
2438	struct mv643xx_eth_private *mp = netdev_priv(dev);
2439
2440	if (mp->phy != NULL)
2441		return phy_mii_ioctl(mp->phy, ifr, cmd);
2442
2443	return -EOPNOTSUPP;
2444}
2445
2446static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2447{
2448	struct mv643xx_eth_private *mp = netdev_priv(dev);
2449
2450	if (new_mtu < 64 || new_mtu > 9500)
2451		return -EINVAL;
2452
2453	dev->mtu = new_mtu;
2454	mv643xx_eth_recalc_skb_size(mp);
2455	tx_set_rate(mp, 1000000000, 16777216);
2456
2457	if (!netif_running(dev))
2458		return 0;
2459
2460	/*
2461	 * Stop and then re-open the interface. This will allocate RX
2462	 * skbs of the new MTU.
2463	 * There is a possible danger that the open will not succeed,
2464	 * due to memory being full.
2465	 */
2466	mv643xx_eth_stop(dev);
2467	if (mv643xx_eth_open(dev)) {
2468		netdev_err(dev,
2469			   "fatal error on re-opening device after MTU change\n");
2470	}
2471
2472	return 0;
2473}
2474
2475static void tx_timeout_task(struct work_struct *ugly)
2476{
2477	struct mv643xx_eth_private *mp;
2478
2479	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2480	if (netif_running(mp->dev)) {
2481		netif_tx_stop_all_queues(mp->dev);
2482		port_reset(mp);
2483		port_start(mp);
2484		netif_tx_wake_all_queues(mp->dev);
2485	}
2486}
2487
2488static void mv643xx_eth_tx_timeout(struct net_device *dev)
2489{
2490	struct mv643xx_eth_private *mp = netdev_priv(dev);
2491
2492	netdev_info(dev, "tx timeout\n");
2493
2494	schedule_work(&mp->tx_timeout_task);
2495}
2496
2497#ifdef CONFIG_NET_POLL_CONTROLLER
2498static void mv643xx_eth_netpoll(struct net_device *dev)
2499{
2500	struct mv643xx_eth_private *mp = netdev_priv(dev);
2501
2502	wrlp(mp, INT_MASK, 0x00000000);
2503	rdlp(mp, INT_MASK);
2504
2505	mv643xx_eth_irq(dev->irq, dev);
2506
2507	wrlp(mp, INT_MASK, mp->int_mask);
2508}
2509#endif
2510
2511
2512/* platform glue ************************************************************/
2513static void
2514mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2515			      struct mbus_dram_target_info *dram)
2516{
2517	void __iomem *base = msp->base;
2518	u32 win_enable;
2519	u32 win_protect;
2520	int i;
2521
2522	for (i = 0; i < 6; i++) {
2523		writel(0, base + WINDOW_BASE(i));
2524		writel(0, base + WINDOW_SIZE(i));
2525		if (i < 4)
2526			writel(0, base + WINDOW_REMAP_HIGH(i));
2527	}
2528
2529	win_enable = 0x3f;
2530	win_protect = 0;
2531
2532	for (i = 0; i < dram->num_cs; i++) {
2533		struct mbus_dram_window *cs = dram->cs + i;
2534
2535		writel((cs->base & 0xffff0000) |
2536			(cs->mbus_attr << 8) |
2537			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2538		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2539
2540		win_enable &= ~(1 << i);
2541		win_protect |= 3 << (2 * i);
2542	}
2543
2544	writel(win_enable, base + WINDOW_BAR_ENABLE);
2545	msp->win_protect = win_protect;
2546}
2547
2548static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2549{
2550	/*
2551	 * Check whether we have a 14-bit coal limit field in bits
2552	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2553	 * SDMA config register.
2554	 */
2555	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2556	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2557		msp->extended_rx_coal_limit = 1;
2558	else
2559		msp->extended_rx_coal_limit = 0;
2560
2561	/*
2562	 * Check whether the MAC supports TX rate control, and if
2563	 * yes, whether its associated registers are in the old or
2564	 * the new place.
2565	 */
2566	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2567	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2568		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2569	} else {
2570		writel(7, msp->base + 0x0400 + TX_BW_RATE);
2571		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2572			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2573		else
2574			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2575	}
2576}
2577
2578static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2579{
2580	static int mv643xx_eth_version_printed;
2581	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2582	struct mv643xx_eth_shared_private *msp;
2583	struct resource *res;
2584	int ret;
2585
2586	if (!mv643xx_eth_version_printed++)
2587		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2588			  mv643xx_eth_driver_version);
2589
2590	ret = -EINVAL;
2591	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2592	if (res == NULL)
2593		goto out;
2594
2595	ret = -ENOMEM;
2596	msp = kzalloc(sizeof(*msp), GFP_KERNEL);
2597	if (msp == NULL)
2598		goto out;
2599
2600	msp->base = ioremap(res->start, resource_size(res));
2601	if (msp->base == NULL)
2602		goto out_free;
2603
2604	/*
2605	 * Set up and register SMI bus.
2606	 */
2607	if (pd == NULL || pd->shared_smi == NULL) {
2608		msp->smi_bus = mdiobus_alloc();
2609		if (msp->smi_bus == NULL)
2610			goto out_unmap;
2611
2612		msp->smi_bus->priv = msp;
2613		msp->smi_bus->name = "mv643xx_eth smi";
2614		msp->smi_bus->read = smi_bus_read;
2615		msp->smi_bus->write = smi_bus_write,
2616		snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
2617		msp->smi_bus->parent = &pdev->dev;
2618		msp->smi_bus->phy_mask = 0xffffffff;
2619		if (mdiobus_register(msp->smi_bus) < 0)
2620			goto out_free_mii_bus;
2621		msp->smi = msp;
2622	} else {
2623		msp->smi = platform_get_drvdata(pd->shared_smi);
2624	}
2625
2626	msp->err_interrupt = NO_IRQ;
2627	init_waitqueue_head(&msp->smi_busy_wait);
2628
2629	/*
2630	 * Check whether the error interrupt is hooked up.
2631	 */
2632	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2633	if (res != NULL) {
2634		int err;
2635
2636		err = request_irq(res->start, mv643xx_eth_err_irq,
2637				  IRQF_SHARED, "mv643xx_eth", msp);
2638		if (!err) {
2639			writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2640			msp->err_interrupt = res->start;
2641		}
2642	}
2643
2644	/*
2645	 * (Re-)program MBUS remapping windows if we are asked to.
2646	 */
2647	if (pd != NULL && pd->dram != NULL)
2648		mv643xx_eth_conf_mbus_windows(msp, pd->dram);
2649
2650	/*
2651	 * Detect hardware parameters.
2652	 */
2653	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2654	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2655					pd->tx_csum_limit : 9 * 1024;
2656	infer_hw_params(msp);
2657
2658	platform_set_drvdata(pdev, msp);
2659
2660	return 0;
2661
2662out_free_mii_bus:
2663	mdiobus_free(msp->smi_bus);
2664out_unmap:
2665	iounmap(msp->base);
2666out_free:
2667	kfree(msp);
2668out:
2669	return ret;
2670}
2671
2672static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2673{
2674	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2675	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2676
2677	if (pd == NULL || pd->shared_smi == NULL) {
2678		mdiobus_unregister(msp->smi_bus);
2679		mdiobus_free(msp->smi_bus);
2680	}
2681	if (msp->err_interrupt != NO_IRQ)
2682		free_irq(msp->err_interrupt, msp);
2683	iounmap(msp->base);
2684	kfree(msp);
2685
2686	return 0;
2687}
2688
2689static struct platform_driver mv643xx_eth_shared_driver = {
2690	.probe		= mv643xx_eth_shared_probe,
2691	.remove		= mv643xx_eth_shared_remove,
2692	.driver = {
2693		.name	= MV643XX_ETH_SHARED_NAME,
2694		.owner	= THIS_MODULE,
2695	},
2696};
2697
2698static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2699{
2700	int addr_shift = 5 * mp->port_num;
2701	u32 data;
2702
2703	data = rdl(mp, PHY_ADDR);
2704	data &= ~(0x1f << addr_shift);
2705	data |= (phy_addr & 0x1f) << addr_shift;
2706	wrl(mp, PHY_ADDR, data);
2707}
2708
2709static int phy_addr_get(struct mv643xx_eth_private *mp)
2710{
2711	unsigned int data;
2712
2713	data = rdl(mp, PHY_ADDR);
2714
2715	return (data >> (5 * mp->port_num)) & 0x1f;
2716}
2717
2718static void set_params(struct mv643xx_eth_private *mp,
2719		       struct mv643xx_eth_platform_data *pd)
2720{
2721	struct net_device *dev = mp->dev;
2722
2723	if (is_valid_ether_addr(pd->mac_addr))
2724		memcpy(dev->dev_addr, pd->mac_addr, 6);
2725	else
2726		uc_addr_get(mp, dev->dev_addr);
2727
2728	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2729	if (pd->rx_queue_size)
2730		mp->rx_ring_size = pd->rx_queue_size;
2731	mp->rx_desc_sram_addr = pd->rx_sram_addr;
2732	mp->rx_desc_sram_size = pd->rx_sram_size;
2733
2734	mp->rxq_count = pd->rx_queue_count ? : 1;
2735
2736	mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2737	if (pd->tx_queue_size)
2738		mp->tx_ring_size = pd->tx_queue_size;
2739	mp->tx_desc_sram_addr = pd->tx_sram_addr;
2740	mp->tx_desc_sram_size = pd->tx_sram_size;
2741
2742	mp->txq_count = pd->tx_queue_count ? : 1;
2743}
2744
2745static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2746				   int phy_addr)
2747{
2748	struct mii_bus *bus = mp->shared->smi->smi_bus;
2749	struct phy_device *phydev;
2750	int start;
2751	int num;
2752	int i;
2753
2754	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2755		start = phy_addr_get(mp) & 0x1f;
2756		num = 32;
2757	} else {
2758		start = phy_addr & 0x1f;
2759		num = 1;
2760	}
2761
2762	phydev = NULL;
2763	for (i = 0; i < num; i++) {
2764		int addr = (start + i) & 0x1f;
2765
2766		if (bus->phy_map[addr] == NULL)
2767			mdiobus_scan(bus, addr);
2768
2769		if (phydev == NULL) {
2770			phydev = bus->phy_map[addr];
2771			if (phydev != NULL)
2772				phy_addr_set(mp, addr);
2773		}
2774	}
2775
2776	return phydev;
2777}
2778
2779static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2780{
2781	struct phy_device *phy = mp->phy;
2782
2783	phy_reset(mp);
2784
2785	phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
2786
2787	if (speed == 0) {
2788		phy->autoneg = AUTONEG_ENABLE;
2789		phy->speed = 0;
2790		phy->duplex = 0;
2791		phy->advertising = phy->supported | ADVERTISED_Autoneg;
2792	} else {
2793		phy->autoneg = AUTONEG_DISABLE;
2794		phy->advertising = 0;
2795		phy->speed = speed;
2796		phy->duplex = duplex;
2797	}
2798	phy_start_aneg(phy);
2799}
2800
2801static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2802{
2803	u32 pscr;
2804
2805	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2806	if (pscr & SERIAL_PORT_ENABLE) {
2807		pscr &= ~SERIAL_PORT_ENABLE;
2808		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2809	}
2810
2811	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2812	if (mp->phy == NULL) {
2813		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2814		if (speed == SPEED_1000)
2815			pscr |= SET_GMII_SPEED_TO_1000;
2816		else if (speed == SPEED_100)
2817			pscr |= SET_MII_SPEED_TO_100;
2818
2819		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2820
2821		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2822		if (duplex == DUPLEX_FULL)
2823			pscr |= SET_FULL_DUPLEX_MODE;
2824	}
2825
2826	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2827}
2828
2829static const struct net_device_ops mv643xx_eth_netdev_ops = {
2830	.ndo_open		= mv643xx_eth_open,
2831	.ndo_stop		= mv643xx_eth_stop,
2832	.ndo_start_xmit		= mv643xx_eth_xmit,
2833	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
2834	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
2835	.ndo_validate_addr	= eth_validate_addr,
2836	.ndo_do_ioctl		= mv643xx_eth_ioctl,
2837	.ndo_change_mtu		= mv643xx_eth_change_mtu,
2838	.ndo_set_features	= mv643xx_eth_set_features,
2839	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
2840	.ndo_get_stats		= mv643xx_eth_get_stats,
2841#ifdef CONFIG_NET_POLL_CONTROLLER
2842	.ndo_poll_controller	= mv643xx_eth_netpoll,
2843#endif
2844};
2845
2846static int mv643xx_eth_probe(struct platform_device *pdev)
2847{
2848	struct mv643xx_eth_platform_data *pd;
2849	struct mv643xx_eth_private *mp;
2850	struct net_device *dev;
2851	struct resource *res;
2852	int err;
2853
2854	pd = pdev->dev.platform_data;
2855	if (pd == NULL) {
2856		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2857		return -ENODEV;
2858	}
2859
2860	if (pd->shared == NULL) {
2861		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2862		return -ENODEV;
2863	}
2864
2865	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2866	if (!dev)
2867		return -ENOMEM;
2868
2869	mp = netdev_priv(dev);
2870	platform_set_drvdata(pdev, mp);
2871
2872	mp->shared = platform_get_drvdata(pd->shared);
2873	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2874	mp->port_num = pd->port_number;
2875
2876	mp->dev = dev;
2877
2878	set_params(mp, pd);
2879	netif_set_real_num_tx_queues(dev, mp->txq_count);
2880	netif_set_real_num_rx_queues(dev, mp->rxq_count);
2881
2882	if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2883		mp->phy = phy_scan(mp, pd->phy_addr);
2884
2885	if (mp->phy != NULL)
2886		phy_init(mp, pd->speed, pd->duplex);
2887
2888	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2889
2890	init_pscr(mp, pd->speed, pd->duplex);
2891
2892
2893	mib_counters_clear(mp);
2894
2895	init_timer(&mp->mib_counters_timer);
2896	mp->mib_counters_timer.data = (unsigned long)mp;
2897	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2898	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2899	add_timer(&mp->mib_counters_timer);
2900
2901	spin_lock_init(&mp->mib_counters_lock);
2902
2903	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2904
2905	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2906
2907	init_timer(&mp->rx_oom);
2908	mp->rx_oom.data = (unsigned long)mp;
2909	mp->rx_oom.function = oom_timer_wrapper;
2910
2911
2912	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2913	BUG_ON(!res);
2914	dev->irq = res->start;
2915
2916	dev->netdev_ops = &mv643xx_eth_netdev_ops;
2917
2918	dev->watchdog_timeo = 2 * HZ;
2919	dev->base_addr = 0;
2920
2921	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2922		NETIF_F_RXCSUM | NETIF_F_LRO;
2923	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2924	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2925
2926	SET_NETDEV_DEV(dev, &pdev->dev);
2927
2928	if (mp->shared->win_protect)
2929		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2930
2931	netif_carrier_off(dev);
2932
2933	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2934
2935	set_rx_coal(mp, 250);
2936	set_tx_coal(mp, 0);
2937
2938	err = register_netdev(dev);
2939	if (err)
2940		goto out;
2941
2942	netdev_notice(dev, "port %d with MAC address %pM\n",
2943		      mp->port_num, dev->dev_addr);
2944
2945	if (mp->tx_desc_sram_size > 0)
2946		netdev_notice(dev, "configured with sram\n");
2947
2948	return 0;
2949
2950out:
2951	free_netdev(dev);
2952
2953	return err;
2954}
2955
2956static int mv643xx_eth_remove(struct platform_device *pdev)
2957{
2958	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2959
2960	unregister_netdev(mp->dev);
2961	if (mp->phy != NULL)
2962		phy_detach(mp->phy);
2963	cancel_work_sync(&mp->tx_timeout_task);
2964	free_netdev(mp->dev);
2965
2966	platform_set_drvdata(pdev, NULL);
2967
2968	return 0;
2969}
2970
2971static void mv643xx_eth_shutdown(struct platform_device *pdev)
2972{
2973	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2974
2975	/* Mask all interrupts on ethernet port */
2976	wrlp(mp, INT_MASK, 0);
2977	rdlp(mp, INT_MASK);
2978
2979	if (netif_running(mp->dev))
2980		port_reset(mp);
2981}
2982
2983static struct platform_driver mv643xx_eth_driver = {
2984	.probe		= mv643xx_eth_probe,
2985	.remove		= mv643xx_eth_remove,
2986	.shutdown	= mv643xx_eth_shutdown,
2987	.driver = {
2988		.name	= MV643XX_ETH_NAME,
2989		.owner	= THIS_MODULE,
2990	},
2991};
2992
2993static int __init mv643xx_eth_init_module(void)
2994{
2995	int rc;
2996
2997	rc = platform_driver_register(&mv643xx_eth_shared_driver);
2998	if (!rc) {
2999		rc = platform_driver_register(&mv643xx_eth_driver);
3000		if (rc)
3001			platform_driver_unregister(&mv643xx_eth_shared_driver);
3002	}
3003
3004	return rc;
3005}
3006module_init(mv643xx_eth_init_module);
3007
3008static void __exit mv643xx_eth_cleanup_module(void)
3009{
3010	platform_driver_unregister(&mv643xx_eth_driver);
3011	platform_driver_unregister(&mv643xx_eth_shared_driver);
3012}
3013module_exit(mv643xx_eth_cleanup_module);
3014
3015MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3016	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
3017MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
3018MODULE_LICENSE("GPL");
3019MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
3020MODULE_ALIAS("platform:" MV643XX_ETH_NAME);