Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
   4 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
   5 *
   6 * Based on the 64360 driver from:
   7 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
   8 *		      Rabeeh Khoury <rabeeh@marvell.com>
   9 *
  10 * Copyright (C) 2003 PMC-Sierra, Inc.,
  11 *	written by Manish Lachwani
  12 *
  13 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  14 *
  15 * Copyright (C) 2004-2006 MontaVista Software, Inc.
  16 *			   Dale Farnsworth <dale@farnsworth.org>
  17 *
  18 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  19 *				     <sjhill@realitydiluted.com>
  20 *
  21 * Copyright (C) 2007-2008 Marvell Semiconductor
  22 *			   Lennert Buytenhek <buytenh@marvell.com>
  23 *
  24 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
  25 */
  26
  27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  28
  29#include <linux/init.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/in.h>
  32#include <linux/ip.h>
  33#include <net/tso.h>
  34#include <linux/tcp.h>
  35#include <linux/udp.h>
  36#include <linux/etherdevice.h>
  37#include <linux/delay.h>
  38#include <linux/ethtool.h>
  39#include <linux/platform_device.h>
  40#include <linux/module.h>
  41#include <linux/kernel.h>
  42#include <linux/spinlock.h>
  43#include <linux/workqueue.h>
  44#include <linux/phy.h>
  45#include <linux/mv643xx_eth.h>
  46#include <linux/io.h>
  47#include <linux/interrupt.h>
  48#include <linux/types.h>
  49#include <linux/slab.h>
  50#include <linux/clk.h>
  51#include <linux/of.h>
  52#include <linux/of_irq.h>
  53#include <linux/of_net.h>
  54#include <linux/of_mdio.h>
  55
  56static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  57static char mv643xx_eth_driver_version[] = "1.4";
  58
  59
  60/*
  61 * Registers shared between all ports.
  62 */
  63#define PHY_ADDR			0x0000
  64#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
  65#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
  66#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
  67#define WINDOW_BAR_ENABLE		0x0290
  68#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
  69
  70/*
  71 * Main per-port registers.  These live at offset 0x0400 for
  72 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  73 */
  74#define PORT_CONFIG			0x0000
  75#define  UNICAST_PROMISCUOUS_MODE	0x00000001
  76#define PORT_CONFIG_EXT			0x0004
  77#define MAC_ADDR_LOW			0x0014
  78#define MAC_ADDR_HIGH			0x0018
  79#define SDMA_CONFIG			0x001c
  80#define  TX_BURST_SIZE_16_64BIT		0x01000000
  81#define  TX_BURST_SIZE_4_64BIT		0x00800000
  82#define  BLM_TX_NO_SWAP			0x00000020
  83#define  BLM_RX_NO_SWAP			0x00000010
  84#define  RX_BURST_SIZE_16_64BIT		0x00000008
  85#define  RX_BURST_SIZE_4_64BIT		0x00000004
  86#define PORT_SERIAL_CONTROL		0x003c
  87#define  SET_MII_SPEED_TO_100		0x01000000
  88#define  SET_GMII_SPEED_TO_1000		0x00800000
  89#define  SET_FULL_DUPLEX_MODE		0x00200000
  90#define  MAX_RX_PACKET_9700BYTE		0x000a0000
  91#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
  92#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
  93#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
  94#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
  95#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
  96#define  FORCE_LINK_PASS		0x00000002
  97#define  SERIAL_PORT_ENABLE		0x00000001
  98#define PORT_STATUS			0x0044
  99#define  TX_FIFO_EMPTY			0x00000400
 100#define  TX_IN_PROGRESS			0x00000080
 101#define  PORT_SPEED_MASK		0x00000030
 102#define  PORT_SPEED_1000		0x00000010
 103#define  PORT_SPEED_100			0x00000020
 104#define  PORT_SPEED_10			0x00000000
 105#define  FLOW_CONTROL_ENABLED		0x00000008
 106#define  FULL_DUPLEX			0x00000004
 107#define  LINK_UP			0x00000002
 108#define TXQ_COMMAND			0x0048
 109#define TXQ_FIX_PRIO_CONF		0x004c
 110#define PORT_SERIAL_CONTROL1		0x004c
 111#define  RGMII_EN			0x00000008
 112#define  CLK125_BYPASS_EN		0x00000010
 113#define TX_BW_RATE			0x0050
 114#define TX_BW_MTU			0x0058
 115#define TX_BW_BURST			0x005c
 116#define INT_CAUSE			0x0060
 117#define  INT_TX_END			0x07f80000
 118#define  INT_TX_END_0			0x00080000
 119#define  INT_RX				0x000003fc
 120#define  INT_RX_0			0x00000004
 121#define  INT_EXT			0x00000002
 122#define INT_CAUSE_EXT			0x0064
 123#define  INT_EXT_LINK_PHY		0x00110000
 124#define  INT_EXT_TX			0x000000ff
 125#define INT_MASK			0x0068
 126#define INT_MASK_EXT			0x006c
 127#define TX_FIFO_URGENT_THRESHOLD	0x0074
 128#define RX_DISCARD_FRAME_CNT		0x0084
 129#define RX_OVERRUN_FRAME_CNT		0x0088
 130#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
 131#define TX_BW_RATE_MOVED		0x00e0
 132#define TX_BW_MTU_MOVED			0x00e8
 133#define TX_BW_BURST_MOVED		0x00ec
 134#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
 135#define RXQ_COMMAND			0x0280
 136#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
 137#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
 138#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
 139#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))
 140
 141/*
 142 * Misc per-port registers.
 143 */
 144#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
 145#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
 146#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
 147#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
 148
 149
 150/*
 151 * SDMA configuration register default value.
 152 */
 153#if defined(__BIG_ENDIAN)
 154#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 155		(RX_BURST_SIZE_4_64BIT	|	\
 156		 TX_BURST_SIZE_4_64BIT)
 157#elif defined(__LITTLE_ENDIAN)
 158#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 159		(RX_BURST_SIZE_4_64BIT	|	\
 160		 BLM_RX_NO_SWAP		|	\
 161		 BLM_TX_NO_SWAP		|	\
 162		 TX_BURST_SIZE_4_64BIT)
 163#else
 164#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 165#endif
 166
 167
 168/*
 169 * Misc definitions.
 170 */
 171#define DEFAULT_RX_QUEUE_SIZE	128
 172#define DEFAULT_TX_QUEUE_SIZE	512
 173#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 174
 175/* Max number of allowed TCP segments for software TSO */
 176#define MV643XX_MAX_TSO_SEGS 100
 177#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 178
 179#define IS_TSO_HEADER(txq, addr) \
 180	((addr >= txq->tso_hdrs_dma) && \
 181	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
 182
 183#define DESC_DMA_MAP_SINGLE 0
 184#define DESC_DMA_MAP_PAGE 1
 185
 186/*
 187 * RX/TX descriptors.
 188 */
 189#if defined(__BIG_ENDIAN)
 190struct rx_desc {
 191	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 192	u16 buf_size;		/* Buffer size				*/
 193	u32 cmd_sts;		/* Descriptor command status		*/
 194	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 195	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 196};
 197
 198struct tx_desc {
 199	u16 byte_cnt;		/* buffer byte count			*/
 200	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 201	u32 cmd_sts;		/* Command/status field			*/
 202	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 203	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 204};
 205#elif defined(__LITTLE_ENDIAN)
 206struct rx_desc {
 207	u32 cmd_sts;		/* Descriptor command status		*/
 208	u16 buf_size;		/* Buffer size				*/
 209	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 210	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 211	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 212};
 213
 214struct tx_desc {
 215	u32 cmd_sts;		/* Command/status field			*/
 216	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 217	u16 byte_cnt;		/* buffer byte count			*/
 218	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 219	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 220};
 221#else
 222#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 223#endif
 224
 225/* RX & TX descriptor command */
 226#define BUFFER_OWNED_BY_DMA		0x80000000
 227
 228/* RX & TX descriptor status */
 229#define ERROR_SUMMARY			0x00000001
 230
 231/* RX descriptor status */
 232#define LAYER_4_CHECKSUM_OK		0x40000000
 233#define RX_ENABLE_INTERRUPT		0x20000000
 234#define RX_FIRST_DESC			0x08000000
 235#define RX_LAST_DESC			0x04000000
 236#define RX_IP_HDR_OK			0x02000000
 237#define RX_PKT_IS_IPV4			0x01000000
 238#define RX_PKT_IS_ETHERNETV2		0x00800000
 239#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
 240#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
 241#define RX_PKT_IS_VLAN_TAGGED		0x00080000
 242
 243/* TX descriptor command */
 244#define TX_ENABLE_INTERRUPT		0x00800000
 245#define GEN_CRC				0x00400000
 246#define TX_FIRST_DESC			0x00200000
 247#define TX_LAST_DESC			0x00100000
 248#define ZERO_PADDING			0x00080000
 249#define GEN_IP_V4_CHECKSUM		0x00040000
 250#define GEN_TCP_UDP_CHECKSUM		0x00020000
 251#define UDP_FRAME			0x00010000
 252#define MAC_HDR_EXTRA_4_BYTES		0x00008000
 253#define GEN_TCP_UDP_CHK_FULL		0x00000400
 254#define MAC_HDR_EXTRA_8_BYTES		0x00000200
 255
 256#define TX_IHL_SHIFT			11
 257
 258
 259/* global *******************************************************************/
 260struct mv643xx_eth_shared_private {
 261	/*
 262	 * Ethernet controller base address.
 263	 */
 264	void __iomem *base;
 265
 266	/*
 267	 * Per-port MBUS window access register value.
 268	 */
 269	u32 win_protect;
 270
 271	/*
 272	 * Hardware-specific parameters.
 273	 */
 274	int extended_rx_coal_limit;
 275	int tx_bw_control;
 276	int tx_csum_limit;
 277	struct clk *clk;
 278};
 279
 280#define TX_BW_CONTROL_ABSENT		0
 281#define TX_BW_CONTROL_OLD_LAYOUT	1
 282#define TX_BW_CONTROL_NEW_LAYOUT	2
 283
 284static int mv643xx_eth_open(struct net_device *dev);
 285static int mv643xx_eth_stop(struct net_device *dev);
 286
 287
 288/* per-port *****************************************************************/
 289struct mib_counters {
 290	u64 good_octets_received;
 291	u32 bad_octets_received;
 292	u32 internal_mac_transmit_err;
 293	u32 good_frames_received;
 294	u32 bad_frames_received;
 295	u32 broadcast_frames_received;
 296	u32 multicast_frames_received;
 297	u32 frames_64_octets;
 298	u32 frames_65_to_127_octets;
 299	u32 frames_128_to_255_octets;
 300	u32 frames_256_to_511_octets;
 301	u32 frames_512_to_1023_octets;
 302	u32 frames_1024_to_max_octets;
 303	u64 good_octets_sent;
 304	u32 good_frames_sent;
 305	u32 excessive_collision;
 306	u32 multicast_frames_sent;
 307	u32 broadcast_frames_sent;
 308	u32 unrec_mac_control_received;
 309	u32 fc_sent;
 310	u32 good_fc_received;
 311	u32 bad_fc_received;
 312	u32 undersize_received;
 313	u32 fragments_received;
 314	u32 oversize_received;
 315	u32 jabber_received;
 316	u32 mac_receive_error;
 317	u32 bad_crc_event;
 318	u32 collision;
 319	u32 late_collision;
 320	/* Non MIB hardware counters */
 321	u32 rx_discard;
 322	u32 rx_overrun;
 323};
 324
 325struct rx_queue {
 326	int index;
 327
 328	int rx_ring_size;
 329
 330	int rx_desc_count;
 331	int rx_curr_desc;
 332	int rx_used_desc;
 333
 334	struct rx_desc *rx_desc_area;
 335	dma_addr_t rx_desc_dma;
 336	int rx_desc_area_size;
 337	struct sk_buff **rx_skb;
 338};
 339
 340struct tx_queue {
 341	int index;
 342
 343	int tx_ring_size;
 344
 345	int tx_desc_count;
 346	int tx_curr_desc;
 347	int tx_used_desc;
 348
 349	int tx_stop_threshold;
 350	int tx_wake_threshold;
 351
 352	char *tso_hdrs;
 353	dma_addr_t tso_hdrs_dma;
 354
 355	struct tx_desc *tx_desc_area;
 356	char *tx_desc_mapping; /* array to track the type of the dma mapping */
 357	dma_addr_t tx_desc_dma;
 358	int tx_desc_area_size;
 359
 360	struct sk_buff_head tx_skb;
 361
 362	unsigned long tx_packets;
 363	unsigned long tx_bytes;
 364	unsigned long tx_dropped;
 365};
 366
 367struct mv643xx_eth_private {
 368	struct mv643xx_eth_shared_private *shared;
 369	void __iomem *base;
 370	int port_num;
 371
 372	struct net_device *dev;
 373
 374	struct timer_list mib_counters_timer;
 375	spinlock_t mib_counters_lock;
 376	struct mib_counters mib_counters;
 377
 378	struct work_struct tx_timeout_task;
 379
 380	struct napi_struct napi;
 381	u32 int_mask;
 382	u8 oom;
 383	u8 work_link;
 384	u8 work_tx;
 385	u8 work_tx_end;
 386	u8 work_rx;
 387	u8 work_rx_refill;
 388
 389	int skb_size;
 390
 391	/*
 392	 * RX state.
 393	 */
 394	int rx_ring_size;
 395	unsigned long rx_desc_sram_addr;
 396	int rx_desc_sram_size;
 397	int rxq_count;
 398	struct timer_list rx_oom;
 399	struct rx_queue rxq[8];
 400
 401	/*
 402	 * TX state.
 403	 */
 404	int tx_ring_size;
 405	unsigned long tx_desc_sram_addr;
 406	int tx_desc_sram_size;
 407	int txq_count;
 408	struct tx_queue txq[8];
 409
 410	/*
 411	 * Hardware-specific parameters.
 412	 */
 413	struct clk *clk;
 414	unsigned int t_clk;
 415};
 416
 417
 418/* port register accessors **************************************************/
 419static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
 420{
 421	return readl(mp->shared->base + offset);
 422}
 423
 424static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
 425{
 426	return readl(mp->base + offset);
 427}
 428
 429static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
 430{
 431	writel(data, mp->shared->base + offset);
 432}
 433
 434static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
 435{
 436	writel(data, mp->base + offset);
 437}
 438
 439
 440/* rxq/txq helper functions *************************************************/
 441static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
 442{
 443	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
 444}
 445
 446static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
 447{
 448	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
 449}
 450
 451static void rxq_enable(struct rx_queue *rxq)
 452{
 453	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 454	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
 455}
 456
 457static void rxq_disable(struct rx_queue *rxq)
 458{
 459	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 460	u8 mask = 1 << rxq->index;
 461
 462	wrlp(mp, RXQ_COMMAND, mask << 8);
 463	while (rdlp(mp, RXQ_COMMAND) & mask)
 464		udelay(10);
 465}
 466
 467static void txq_reset_hw_ptr(struct tx_queue *txq)
 468{
 469	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 470	u32 addr;
 471
 472	addr = (u32)txq->tx_desc_dma;
 473	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
 474	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
 475}
 476
 477static void txq_enable(struct tx_queue *txq)
 478{
 479	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 480	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
 481}
 482
 483static void txq_disable(struct tx_queue *txq)
 484{
 485	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 486	u8 mask = 1 << txq->index;
 487
 488	wrlp(mp, TXQ_COMMAND, mask << 8);
 489	while (rdlp(mp, TXQ_COMMAND) & mask)
 490		udelay(10);
 491}
 492
 493static void txq_maybe_wake(struct tx_queue *txq)
 494{
 495	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 496	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 497
 498	if (netif_tx_queue_stopped(nq)) {
 499		__netif_tx_lock(nq, smp_processor_id());
 500		if (txq->tx_desc_count <= txq->tx_wake_threshold)
 501			netif_tx_wake_queue(nq);
 502		__netif_tx_unlock(nq);
 503	}
 504}
 505
 506static int rxq_process(struct rx_queue *rxq, int budget)
 507{
 508	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 509	struct net_device_stats *stats = &mp->dev->stats;
 510	int rx;
 511
 512	rx = 0;
 513	while (rx < budget && rxq->rx_desc_count) {
 514		struct rx_desc *rx_desc;
 515		unsigned int cmd_sts;
 516		struct sk_buff *skb;
 517		u16 byte_cnt;
 518
 519		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
 520
 521		cmd_sts = rx_desc->cmd_sts;
 522		if (cmd_sts & BUFFER_OWNED_BY_DMA)
 523			break;
 524		rmb();
 525
 526		skb = rxq->rx_skb[rxq->rx_curr_desc];
 527		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
 528
 529		rxq->rx_curr_desc++;
 530		if (rxq->rx_curr_desc == rxq->rx_ring_size)
 531			rxq->rx_curr_desc = 0;
 532
 533		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
 534				 rx_desc->buf_size, DMA_FROM_DEVICE);
 535		rxq->rx_desc_count--;
 536		rx++;
 537
 538		mp->work_rx_refill |= 1 << rxq->index;
 539
 540		byte_cnt = rx_desc->byte_cnt;
 541
 542		/*
 543		 * Update statistics.
 544		 *
 545		 * Note that the descriptor byte count includes 2 dummy
 546		 * bytes automatically inserted by the hardware at the
 547		 * start of the packet (which we don't count), and a 4
 548		 * byte CRC at the end of the packet (which we do count).
 549		 */
 550		stats->rx_packets++;
 551		stats->rx_bytes += byte_cnt - 2;
 552
 553		/*
 554		 * In case we received a packet without first / last bits
 555		 * on, or the error summary bit is set, the packet needs
 556		 * to be dropped.
 557		 */
 558		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
 559			!= (RX_FIRST_DESC | RX_LAST_DESC))
 560			goto err;
 561
 562		/*
 563		 * The -4 is for the CRC in the trailer of the
 564		 * received packet
 565		 */
 566		skb_put(skb, byte_cnt - 2 - 4);
 567
 568		if (cmd_sts & LAYER_4_CHECKSUM_OK)
 569			skb->ip_summed = CHECKSUM_UNNECESSARY;
 570		skb->protocol = eth_type_trans(skb, mp->dev);
 571
 572		napi_gro_receive(&mp->napi, skb);
 573
 574		continue;
 575
 576err:
 577		stats->rx_dropped++;
 578
 579		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
 580			(RX_FIRST_DESC | RX_LAST_DESC)) {
 581			if (net_ratelimit())
 582				netdev_err(mp->dev,
 583					   "received packet spanning multiple descriptors\n");
 584		}
 585
 586		if (cmd_sts & ERROR_SUMMARY)
 587			stats->rx_errors++;
 588
 589		dev_kfree_skb(skb);
 590	}
 591
 592	if (rx < budget)
 593		mp->work_rx &= ~(1 << rxq->index);
 594
 595	return rx;
 596}
 597
 598static int rxq_refill(struct rx_queue *rxq, int budget)
 599{
 600	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 601	int refilled;
 602
 603	refilled = 0;
 604	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
 605		struct sk_buff *skb;
 606		int rx;
 607		struct rx_desc *rx_desc;
 608		int size;
 609
 610		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
 611
 612		if (skb == NULL) {
 613			mp->oom = 1;
 614			goto oom;
 615		}
 616
 617		if (SKB_DMA_REALIGN)
 618			skb_reserve(skb, SKB_DMA_REALIGN);
 619
 620		refilled++;
 621		rxq->rx_desc_count++;
 622
 623		rx = rxq->rx_used_desc++;
 624		if (rxq->rx_used_desc == rxq->rx_ring_size)
 625			rxq->rx_used_desc = 0;
 626
 627		rx_desc = rxq->rx_desc_area + rx;
 628
 629		size = skb_end_pointer(skb) - skb->data;
 630		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
 631						  skb->data, size,
 632						  DMA_FROM_DEVICE);
 633		rx_desc->buf_size = size;
 634		rxq->rx_skb[rx] = skb;
 635		wmb();
 636		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
 637		wmb();
 638
 639		/*
 640		 * The hardware automatically prepends 2 bytes of
 641		 * dummy data to each received packet, so that the
 642		 * IP header ends up 16-byte aligned.
 643		 */
 644		skb_reserve(skb, 2);
 645	}
 646
 647	if (refilled < budget)
 648		mp->work_rx_refill &= ~(1 << rxq->index);
 649
 650oom:
 651	return refilled;
 652}
 653
 654
 655/* tx ***********************************************************************/
 656static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
 657{
 658	int frag;
 659
 660	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 661		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 662
 663		if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
 664			return 1;
 665	}
 666
 667	return 0;
 668}
 669
 670static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
 671		       u16 *l4i_chk, u32 *command, int length)
 672{
 673	int ret;
 674	u32 cmd = 0;
 675
 676	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 677		int hdr_len;
 678		int tag_bytes;
 679
 680		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
 681		       skb->protocol != htons(ETH_P_8021Q));
 682
 683		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
 684		tag_bytes = hdr_len - ETH_HLEN;
 685
 686		if (length - hdr_len > mp->shared->tx_csum_limit ||
 687		    unlikely(tag_bytes & ~12)) {
 688			ret = skb_checksum_help(skb);
 689			if (!ret)
 690				goto no_csum;
 691			return ret;
 692		}
 693
 694		if (tag_bytes & 4)
 695			cmd |= MAC_HDR_EXTRA_4_BYTES;
 696		if (tag_bytes & 8)
 697			cmd |= MAC_HDR_EXTRA_8_BYTES;
 698
 699		cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
 700			   GEN_IP_V4_CHECKSUM   |
 701			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
 702
 703		/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
 704		 * it seems we don't need to pass the initial checksum.
 705		 */
 706		switch (ip_hdr(skb)->protocol) {
 707		case IPPROTO_UDP:
 708			cmd |= UDP_FRAME;
 709			*l4i_chk = 0;
 710			break;
 711		case IPPROTO_TCP:
 712			*l4i_chk = 0;
 713			break;
 714		default:
 715			WARN(1, "protocol not supported");
 716		}
 717	} else {
 718no_csum:
 719		/* Errata BTS #50, IHL must be 5 if no HW checksum */
 720		cmd |= 5 << TX_IHL_SHIFT;
 721	}
 722	*command = cmd;
 723	return 0;
 724}
 725
 726static inline int
 727txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 728		 struct sk_buff *skb, char *data, int length,
 729		 bool last_tcp, bool is_last)
 730{
 731	int tx_index;
 732	u32 cmd_sts;
 733	struct tx_desc *desc;
 734
 735	tx_index = txq->tx_curr_desc++;
 736	if (txq->tx_curr_desc == txq->tx_ring_size)
 737		txq->tx_curr_desc = 0;
 738	desc = &txq->tx_desc_area[tx_index];
 739	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 740
 741	desc->l4i_chk = 0;
 742	desc->byte_cnt = length;
 743
 744	if (length <= 8 && (uintptr_t)data & 0x7) {
 745		/* Copy unaligned small data fragment to TSO header data area */
 746		memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
 747		       data, length);
 748		desc->buf_ptr = txq->tso_hdrs_dma
 749			+ tx_index * TSO_HEADER_SIZE;
 750	} else {
 751		/* Alignment is okay, map buffer and hand off to hardware */
 752		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 753		desc->buf_ptr = dma_map_single(dev->dev.parent, data,
 754			length, DMA_TO_DEVICE);
 755		if (unlikely(dma_mapping_error(dev->dev.parent,
 756					       desc->buf_ptr))) {
 757			WARN(1, "dma_map_single failed!\n");
 758			return -ENOMEM;
 759		}
 760	}
 761
 762	cmd_sts = BUFFER_OWNED_BY_DMA;
 763	if (last_tcp) {
 764		/* last descriptor in the TCP packet */
 765		cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
 766		/* last descriptor in SKB */
 767		if (is_last)
 768			cmd_sts |= TX_ENABLE_INTERRUPT;
 769	}
 770	desc->cmd_sts = cmd_sts;
 771	return 0;
 772}
 773
 774static inline void
 775txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
 776		u32 *first_cmd_sts, bool first_desc)
 777{
 778	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 779	int hdr_len = skb_tcp_all_headers(skb);
 780	int tx_index;
 781	struct tx_desc *desc;
 782	int ret;
 783	u32 cmd_csum = 0;
 784	u16 l4i_chk = 0;
 785	u32 cmd_sts;
 786
 787	tx_index = txq->tx_curr_desc;
 788	desc = &txq->tx_desc_area[tx_index];
 789
 790	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
 791	if (ret)
 792		WARN(1, "failed to prepare checksum!");
 793
 794	/* Should we set this? Can't use the value from skb_tx_csum()
 795	 * as it's not the correct initial L4 checksum to use.
 796	 */
 797	desc->l4i_chk = 0;
 798
 799	desc->byte_cnt = hdr_len;
 800	desc->buf_ptr = txq->tso_hdrs_dma +
 801			txq->tx_curr_desc * TSO_HEADER_SIZE;
 802	cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
 803				   GEN_CRC;
 804
 805	/* Defer updating the first command descriptor until all
 806	 * following descriptors have been written.
 807	 */
 808	if (first_desc)
 809		*first_cmd_sts = cmd_sts;
 810	else
 811		desc->cmd_sts = cmd_sts;
 812
 813	txq->tx_curr_desc++;
 814	if (txq->tx_curr_desc == txq->tx_ring_size)
 815		txq->tx_curr_desc = 0;
 816}
 817
 818static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
 819			  struct net_device *dev)
 820{
 821	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 822	int hdr_len, total_len, data_left, ret;
 823	int desc_count = 0;
 824	struct tso_t tso;
 825	struct tx_desc *first_tx_desc;
 826	u32 first_cmd_sts = 0;
 827
 828	/* Count needed descriptors */
 829	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
 830		netdev_dbg(dev, "not enough descriptors for TSO!\n");
 831		return -EBUSY;
 832	}
 833
 834	first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
 835
 836	/* Initialize the TSO handler, and prepare the first payload */
 837	hdr_len = tso_start(skb, &tso);
 838
 839	total_len = skb->len - hdr_len;
 840	while (total_len > 0) {
 841		bool first_desc = (desc_count == 0);
 842		char *hdr;
 843
 844		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
 845		total_len -= data_left;
 846		desc_count++;
 847
 848		/* prepare packet headers: MAC + IP + TCP */
 849		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
 850		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
 851		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
 852				first_desc);
 853
 854		while (data_left > 0) {
 855			int size;
 856			desc_count++;
 857
 858			size = min_t(int, tso.size, data_left);
 859			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
 860					       size == data_left,
 861					       total_len == 0);
 862			if (ret)
 863				goto err_release;
 864			data_left -= size;
 865			tso_build_data(skb, &tso, size);
 866		}
 867	}
 868
 869	__skb_queue_tail(&txq->tx_skb, skb);
 870	skb_tx_timestamp(skb);
 871
 872	/* ensure all other descriptors are written before first cmd_sts */
 873	wmb();
 874	first_tx_desc->cmd_sts = first_cmd_sts;
 875
 876	/* clear TX_END status */
 877	mp->work_tx_end &= ~(1 << txq->index);
 878
 879	/* ensure all descriptors are written before poking hardware */
 880	wmb();
 881	txq_enable(txq);
 882	txq->tx_desc_count += desc_count;
 883	return 0;
 884err_release:
 885	/* TODO: Release all used data descriptors; header descriptors must not
 886	 * be DMA-unmapped.
 887	 */
 888	return ret;
 889}
 890
 891static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 892{
 893	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 894	int nr_frags = skb_shinfo(skb)->nr_frags;
 895	int frag;
 896
 897	for (frag = 0; frag < nr_frags; frag++) {
 898		skb_frag_t *this_frag;
 899		int tx_index;
 900		struct tx_desc *desc;
 901
 902		this_frag = &skb_shinfo(skb)->frags[frag];
 903		tx_index = txq->tx_curr_desc++;
 904		if (txq->tx_curr_desc == txq->tx_ring_size)
 905			txq->tx_curr_desc = 0;
 906		desc = &txq->tx_desc_area[tx_index];
 907		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
 908
 909		/*
 910		 * The last fragment will generate an interrupt
 911		 * which will free the skb on TX completion.
 912		 */
 913		if (frag == nr_frags - 1) {
 914			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
 915					ZERO_PADDING | TX_LAST_DESC |
 916					TX_ENABLE_INTERRUPT;
 917		} else {
 918			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
 919		}
 920
 921		desc->l4i_chk = 0;
 922		desc->byte_cnt = skb_frag_size(this_frag);
 923		desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
 924						 this_frag, 0, desc->byte_cnt,
 925						 DMA_TO_DEVICE);
 926	}
 927}
 928
 929static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
 930			  struct net_device *dev)
 931{
 932	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 933	int nr_frags = skb_shinfo(skb)->nr_frags;
 934	int tx_index;
 935	struct tx_desc *desc;
 936	u32 cmd_sts;
 937	u16 l4i_chk;
 938	int length, ret;
 939
 940	cmd_sts = 0;
 941	l4i_chk = 0;
 942
 943	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
 944		if (net_ratelimit())
 945			netdev_err(dev, "tx queue full?!\n");
 946		return -EBUSY;
 947	}
 948
 949	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
 950	if (ret)
 951		return ret;
 952	cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
 953
 954	tx_index = txq->tx_curr_desc++;
 955	if (txq->tx_curr_desc == txq->tx_ring_size)
 956		txq->tx_curr_desc = 0;
 957	desc = &txq->tx_desc_area[tx_index];
 958	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 959
 960	if (nr_frags) {
 961		txq_submit_frag_skb(txq, skb);
 962		length = skb_headlen(skb);
 963	} else {
 964		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
 965		length = skb->len;
 966	}
 967
 968	desc->l4i_chk = l4i_chk;
 969	desc->byte_cnt = length;
 970	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
 971				       length, DMA_TO_DEVICE);
 972
 973	__skb_queue_tail(&txq->tx_skb, skb);
 974
 975	skb_tx_timestamp(skb);
 976
 977	/* ensure all other descriptors are written before first cmd_sts */
 978	wmb();
 979	desc->cmd_sts = cmd_sts;
 980
 981	/* clear TX_END status */
 982	mp->work_tx_end &= ~(1 << txq->index);
 983
 984	/* ensure all descriptors are written before poking hardware */
 985	wmb();
 986	txq_enable(txq);
 987
 988	txq->tx_desc_count += nr_frags + 1;
 989
 990	return 0;
 991}
 992
 993static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 994{
 995	struct mv643xx_eth_private *mp = netdev_priv(dev);
 996	int length, queue, ret;
 997	struct tx_queue *txq;
 998	struct netdev_queue *nq;
 999
1000	queue = skb_get_queue_mapping(skb);
1001	txq = mp->txq + queue;
1002	nq = netdev_get_tx_queue(dev, queue);
1003
1004	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
1005		netdev_printk(KERN_DEBUG, dev,
1006			      "failed to linearize skb with tiny unaligned fragment\n");
1007		return NETDEV_TX_BUSY;
1008	}
1009
1010	length = skb->len;
1011
1012	if (skb_is_gso(skb))
1013		ret = txq_submit_tso(txq, skb, dev);
1014	else
1015		ret = txq_submit_skb(txq, skb, dev);
1016	if (!ret) {
1017		txq->tx_bytes += length;
1018		txq->tx_packets++;
1019
1020		if (txq->tx_desc_count >= txq->tx_stop_threshold)
1021			netif_tx_stop_queue(nq);
1022	} else {
1023		txq->tx_dropped++;
1024		dev_kfree_skb_any(skb);
1025	}
1026
1027	return NETDEV_TX_OK;
1028}
1029
1030
1031/* tx napi ******************************************************************/
1032static void txq_kick(struct tx_queue *txq)
1033{
1034	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1035	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1036	u32 hw_desc_ptr;
1037	u32 expected_ptr;
1038
1039	__netif_tx_lock(nq, smp_processor_id());
1040
1041	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1042		goto out;
1043
1044	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1045	expected_ptr = (u32)txq->tx_desc_dma +
1046				txq->tx_curr_desc * sizeof(struct tx_desc);
1047
1048	if (hw_desc_ptr != expected_ptr)
1049		txq_enable(txq);
1050
1051out:
1052	__netif_tx_unlock(nq);
1053
1054	mp->work_tx_end &= ~(1 << txq->index);
1055}
1056
1057static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1058{
1059	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1060	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1061	int reclaimed;
1062
1063	__netif_tx_lock_bh(nq);
1064
1065	reclaimed = 0;
1066	while (reclaimed < budget && txq->tx_desc_count > 0) {
1067		int tx_index;
1068		struct tx_desc *desc;
1069		u32 cmd_sts;
1070		char desc_dma_map;
1071
1072		tx_index = txq->tx_used_desc;
1073		desc = &txq->tx_desc_area[tx_index];
1074		desc_dma_map = txq->tx_desc_mapping[tx_index];
1075
1076		cmd_sts = desc->cmd_sts;
1077
1078		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1079			if (!force)
1080				break;
1081			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1082		}
1083
1084		txq->tx_used_desc = tx_index + 1;
1085		if (txq->tx_used_desc == txq->tx_ring_size)
1086			txq->tx_used_desc = 0;
1087
1088		reclaimed++;
1089		txq->tx_desc_count--;
1090
1091		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1092
1093			if (desc_dma_map == DESC_DMA_MAP_PAGE)
1094				dma_unmap_page(mp->dev->dev.parent,
1095					       desc->buf_ptr,
1096					       desc->byte_cnt,
1097					       DMA_TO_DEVICE);
1098			else
1099				dma_unmap_single(mp->dev->dev.parent,
1100						 desc->buf_ptr,
1101						 desc->byte_cnt,
1102						 DMA_TO_DEVICE);
1103		}
1104
1105		if (cmd_sts & TX_ENABLE_INTERRUPT) {
1106			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1107
1108			if (!WARN_ON(!skb))
1109				dev_consume_skb_any(skb);
1110		}
1111
1112		if (cmd_sts & ERROR_SUMMARY) {
1113			netdev_info(mp->dev, "tx error\n");
1114			mp->dev->stats.tx_errors++;
1115		}
1116
1117	}
1118
1119	__netif_tx_unlock_bh(nq);
1120
1121	if (reclaimed < budget)
1122		mp->work_tx &= ~(1 << txq->index);
1123
1124	return reclaimed;
1125}
1126
1127
1128/* tx rate control **********************************************************/
1129/*
1130 * Set total maximum TX rate (shared by all TX queues for this port)
1131 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1132 */
1133static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1134{
1135	int token_rate;
1136	int mtu;
1137	int bucket_size;
1138
1139	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1140	if (token_rate > 1023)
1141		token_rate = 1023;
1142
1143	mtu = (mp->dev->mtu + 255) >> 8;
1144	if (mtu > 63)
1145		mtu = 63;
1146
1147	bucket_size = (burst + 255) >> 8;
1148	if (bucket_size > 65535)
1149		bucket_size = 65535;
1150
1151	switch (mp->shared->tx_bw_control) {
1152	case TX_BW_CONTROL_OLD_LAYOUT:
1153		wrlp(mp, TX_BW_RATE, token_rate);
1154		wrlp(mp, TX_BW_MTU, mtu);
1155		wrlp(mp, TX_BW_BURST, bucket_size);
1156		break;
1157	case TX_BW_CONTROL_NEW_LAYOUT:
1158		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1159		wrlp(mp, TX_BW_MTU_MOVED, mtu);
1160		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1161		break;
1162	}
1163}
1164
1165static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1166{
1167	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1168	int token_rate;
1169	int bucket_size;
1170
1171	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1172	if (token_rate > 1023)
1173		token_rate = 1023;
1174
1175	bucket_size = (burst + 255) >> 8;
1176	if (bucket_size > 65535)
1177		bucket_size = 65535;
1178
1179	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1180	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1181}
1182
1183static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1184{
1185	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1186	int off;
1187	u32 val;
1188
1189	/*
1190	 * Turn on fixed priority mode.
1191	 */
1192	off = 0;
1193	switch (mp->shared->tx_bw_control) {
1194	case TX_BW_CONTROL_OLD_LAYOUT:
1195		off = TXQ_FIX_PRIO_CONF;
1196		break;
1197	case TX_BW_CONTROL_NEW_LAYOUT:
1198		off = TXQ_FIX_PRIO_CONF_MOVED;
1199		break;
1200	}
1201
1202	if (off) {
1203		val = rdlp(mp, off);
1204		val |= 1 << txq->index;
1205		wrlp(mp, off, val);
1206	}
1207}
1208
1209
1210/* mii management interface *************************************************/
1211static void mv643xx_eth_adjust_link(struct net_device *dev)
1212{
1213	struct mv643xx_eth_private *mp = netdev_priv(dev);
1214	u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1215	u32 autoneg_disable = FORCE_LINK_PASS |
1216	             DISABLE_AUTO_NEG_SPEED_GMII |
1217		     DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1218		     DISABLE_AUTO_NEG_FOR_DUPLEX;
1219
1220	if (dev->phydev->autoneg == AUTONEG_ENABLE) {
1221		/* enable auto negotiation */
1222		pscr &= ~autoneg_disable;
1223		goto out_write;
1224	}
1225
1226	pscr |= autoneg_disable;
1227
1228	if (dev->phydev->speed == SPEED_1000) {
1229		/* force gigabit, half duplex not supported */
1230		pscr |= SET_GMII_SPEED_TO_1000;
1231		pscr |= SET_FULL_DUPLEX_MODE;
1232		goto out_write;
1233	}
1234
1235	pscr &= ~SET_GMII_SPEED_TO_1000;
1236
1237	if (dev->phydev->speed == SPEED_100)
1238		pscr |= SET_MII_SPEED_TO_100;
1239	else
1240		pscr &= ~SET_MII_SPEED_TO_100;
1241
1242	if (dev->phydev->duplex == DUPLEX_FULL)
1243		pscr |= SET_FULL_DUPLEX_MODE;
1244	else
1245		pscr &= ~SET_FULL_DUPLEX_MODE;
1246
1247out_write:
1248	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1249}
1250
1251/* statistics ***************************************************************/
1252static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1253{
1254	struct mv643xx_eth_private *mp = netdev_priv(dev);
1255	struct net_device_stats *stats = &dev->stats;
1256	unsigned long tx_packets = 0;
1257	unsigned long tx_bytes = 0;
1258	unsigned long tx_dropped = 0;
1259	int i;
1260
1261	for (i = 0; i < mp->txq_count; i++) {
1262		struct tx_queue *txq = mp->txq + i;
1263
1264		tx_packets += txq->tx_packets;
1265		tx_bytes += txq->tx_bytes;
1266		tx_dropped += txq->tx_dropped;
1267	}
1268
1269	stats->tx_packets = tx_packets;
1270	stats->tx_bytes = tx_bytes;
1271	stats->tx_dropped = tx_dropped;
1272
1273	return stats;
1274}
1275
1276static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1277{
1278	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1279}
1280
1281static void mib_counters_clear(struct mv643xx_eth_private *mp)
1282{
1283	int i;
1284
1285	for (i = 0; i < 0x80; i += 4)
1286		mib_read(mp, i);
1287
1288	/* Clear non MIB hw counters also */
1289	rdlp(mp, RX_DISCARD_FRAME_CNT);
1290	rdlp(mp, RX_OVERRUN_FRAME_CNT);
1291}
1292
1293static void mib_counters_update(struct mv643xx_eth_private *mp)
1294{
1295	struct mib_counters *p = &mp->mib_counters;
1296
1297	spin_lock_bh(&mp->mib_counters_lock);
1298	p->good_octets_received += mib_read(mp, 0x00);
1299	p->bad_octets_received += mib_read(mp, 0x08);
1300	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1301	p->good_frames_received += mib_read(mp, 0x10);
1302	p->bad_frames_received += mib_read(mp, 0x14);
1303	p->broadcast_frames_received += mib_read(mp, 0x18);
1304	p->multicast_frames_received += mib_read(mp, 0x1c);
1305	p->frames_64_octets += mib_read(mp, 0x20);
1306	p->frames_65_to_127_octets += mib_read(mp, 0x24);
1307	p->frames_128_to_255_octets += mib_read(mp, 0x28);
1308	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1309	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1310	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1311	p->good_octets_sent += mib_read(mp, 0x38);
1312	p->good_frames_sent += mib_read(mp, 0x40);
1313	p->excessive_collision += mib_read(mp, 0x44);
1314	p->multicast_frames_sent += mib_read(mp, 0x48);
1315	p->broadcast_frames_sent += mib_read(mp, 0x4c);
1316	p->unrec_mac_control_received += mib_read(mp, 0x50);
1317	p->fc_sent += mib_read(mp, 0x54);
1318	p->good_fc_received += mib_read(mp, 0x58);
1319	p->bad_fc_received += mib_read(mp, 0x5c);
1320	p->undersize_received += mib_read(mp, 0x60);
1321	p->fragments_received += mib_read(mp, 0x64);
1322	p->oversize_received += mib_read(mp, 0x68);
1323	p->jabber_received += mib_read(mp, 0x6c);
1324	p->mac_receive_error += mib_read(mp, 0x70);
1325	p->bad_crc_event += mib_read(mp, 0x74);
1326	p->collision += mib_read(mp, 0x78);
1327	p->late_collision += mib_read(mp, 0x7c);
1328	/* Non MIB hardware counters */
1329	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1330	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1331	spin_unlock_bh(&mp->mib_counters_lock);
1332}
1333
1334static void mib_counters_timer_wrapper(struct timer_list *t)
1335{
1336	struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
1337	mib_counters_update(mp);
1338	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1339}
1340
1341
1342/* interrupt coalescing *****************************************************/
1343/*
1344 * Hardware coalescing parameters are set in units of 64 t_clk
1345 * cycles.  I.e.:
1346 *
1347 *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1348 *
1349 *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
1350 *
1351 * In the ->set*() methods, we round the computed register value
1352 * to the nearest integer.
1353 */
1354static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1355{
1356	u32 val = rdlp(mp, SDMA_CONFIG);
1357	u64 temp;
1358
1359	if (mp->shared->extended_rx_coal_limit)
1360		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1361	else
1362		temp = (val & 0x003fff00) >> 8;
1363
1364	temp *= 64000000;
1365	temp += mp->t_clk / 2;
1366	do_div(temp, mp->t_clk);
1367
1368	return (unsigned int)temp;
1369}
1370
1371static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1372{
1373	u64 temp;
1374	u32 val;
1375
1376	temp = (u64)usec * mp->t_clk;
1377	temp += 31999999;
1378	do_div(temp, 64000000);
1379
1380	val = rdlp(mp, SDMA_CONFIG);
1381	if (mp->shared->extended_rx_coal_limit) {
1382		if (temp > 0xffff)
1383			temp = 0xffff;
1384		val &= ~0x023fff80;
1385		val |= (temp & 0x8000) << 10;
1386		val |= (temp & 0x7fff) << 7;
1387	} else {
1388		if (temp > 0x3fff)
1389			temp = 0x3fff;
1390		val &= ~0x003fff00;
1391		val |= (temp & 0x3fff) << 8;
1392	}
1393	wrlp(mp, SDMA_CONFIG, val);
1394}
1395
1396static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1397{
1398	u64 temp;
1399
1400	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1401	temp *= 64000000;
1402	temp += mp->t_clk / 2;
1403	do_div(temp, mp->t_clk);
1404
1405	return (unsigned int)temp;
1406}
1407
1408static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1409{
1410	u64 temp;
1411
1412	temp = (u64)usec * mp->t_clk;
1413	temp += 31999999;
1414	do_div(temp, 64000000);
1415
1416	if (temp > 0x3fff)
1417		temp = 0x3fff;
1418
1419	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1420}
1421
1422
1423/* ethtool ******************************************************************/
1424struct mv643xx_eth_stats {
1425	char stat_string[ETH_GSTRING_LEN];
1426	int sizeof_stat;
1427	int netdev_off;
1428	int mp_off;
1429};
1430
1431#define SSTAT(m)						\
1432	{ #m, sizeof_field(struct net_device_stats, m),		\
1433	  offsetof(struct net_device, stats.m), -1 }
1434
1435#define MIBSTAT(m)						\
1436	{ #m, sizeof_field(struct mib_counters, m),		\
1437	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1438
1439static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1440	SSTAT(rx_packets),
1441	SSTAT(tx_packets),
1442	SSTAT(rx_bytes),
1443	SSTAT(tx_bytes),
1444	SSTAT(rx_errors),
1445	SSTAT(tx_errors),
1446	SSTAT(rx_dropped),
1447	SSTAT(tx_dropped),
1448	MIBSTAT(good_octets_received),
1449	MIBSTAT(bad_octets_received),
1450	MIBSTAT(internal_mac_transmit_err),
1451	MIBSTAT(good_frames_received),
1452	MIBSTAT(bad_frames_received),
1453	MIBSTAT(broadcast_frames_received),
1454	MIBSTAT(multicast_frames_received),
1455	MIBSTAT(frames_64_octets),
1456	MIBSTAT(frames_65_to_127_octets),
1457	MIBSTAT(frames_128_to_255_octets),
1458	MIBSTAT(frames_256_to_511_octets),
1459	MIBSTAT(frames_512_to_1023_octets),
1460	MIBSTAT(frames_1024_to_max_octets),
1461	MIBSTAT(good_octets_sent),
1462	MIBSTAT(good_frames_sent),
1463	MIBSTAT(excessive_collision),
1464	MIBSTAT(multicast_frames_sent),
1465	MIBSTAT(broadcast_frames_sent),
1466	MIBSTAT(unrec_mac_control_received),
1467	MIBSTAT(fc_sent),
1468	MIBSTAT(good_fc_received),
1469	MIBSTAT(bad_fc_received),
1470	MIBSTAT(undersize_received),
1471	MIBSTAT(fragments_received),
1472	MIBSTAT(oversize_received),
1473	MIBSTAT(jabber_received),
1474	MIBSTAT(mac_receive_error),
1475	MIBSTAT(bad_crc_event),
1476	MIBSTAT(collision),
1477	MIBSTAT(late_collision),
1478	MIBSTAT(rx_discard),
1479	MIBSTAT(rx_overrun),
1480};
1481
1482static int
1483mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
1484				   struct ethtool_link_ksettings *cmd)
1485{
1486	struct net_device *dev = mp->dev;
1487
1488	phy_ethtool_ksettings_get(dev->phydev, cmd);
1489
1490	/*
1491	 * The MAC does not support 1000baseT_Half.
1492	 */
1493	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1494			   cmd->link_modes.supported);
1495	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1496			   cmd->link_modes.advertising);
1497
1498	return 0;
1499}
1500
1501static int
1502mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
1503				       struct ethtool_link_ksettings *cmd)
1504{
1505	u32 port_status;
1506	u32 supported, advertising;
1507
1508	port_status = rdlp(mp, PORT_STATUS);
1509
1510	supported = SUPPORTED_MII;
1511	advertising = ADVERTISED_MII;
1512	switch (port_status & PORT_SPEED_MASK) {
1513	case PORT_SPEED_10:
1514		cmd->base.speed = SPEED_10;
1515		break;
1516	case PORT_SPEED_100:
1517		cmd->base.speed = SPEED_100;
1518		break;
1519	case PORT_SPEED_1000:
1520		cmd->base.speed = SPEED_1000;
1521		break;
1522	default:
1523		cmd->base.speed = -1;
1524		break;
1525	}
1526	cmd->base.duplex = (port_status & FULL_DUPLEX) ?
1527		DUPLEX_FULL : DUPLEX_HALF;
1528	cmd->base.port = PORT_MII;
1529	cmd->base.phy_address = 0;
1530	cmd->base.autoneg = AUTONEG_DISABLE;
1531
1532	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1533						supported);
1534	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1535						advertising);
1536
1537	return 0;
1538}
1539
1540static void
1541mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1542{
1543	wol->supported = 0;
1544	wol->wolopts = 0;
1545	if (dev->phydev)
1546		phy_ethtool_get_wol(dev->phydev, wol);
1547}
1548
1549static int
1550mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1551{
1552	int err;
1553
1554	if (!dev->phydev)
1555		return -EOPNOTSUPP;
1556
1557	err = phy_ethtool_set_wol(dev->phydev, wol);
1558	/* Given that mv643xx_eth works without the marvell-specific PHY driver,
1559	 * this debugging hint is useful to have.
1560	 */
1561	if (err == -EOPNOTSUPP)
1562		netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1563	return err;
1564}
1565
1566static int
1567mv643xx_eth_get_link_ksettings(struct net_device *dev,
1568			       struct ethtool_link_ksettings *cmd)
1569{
1570	struct mv643xx_eth_private *mp = netdev_priv(dev);
1571
1572	if (dev->phydev)
1573		return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
1574	else
1575		return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
1576}
1577
1578static int
1579mv643xx_eth_set_link_ksettings(struct net_device *dev,
1580			       const struct ethtool_link_ksettings *cmd)
1581{
1582	struct ethtool_link_ksettings c = *cmd;
1583	u32 advertising;
1584	int ret;
1585
1586	if (!dev->phydev)
1587		return -EINVAL;
1588
1589	/*
1590	 * The MAC does not support 1000baseT_Half.
1591	 */
1592	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1593						c.link_modes.advertising);
1594	advertising &= ~ADVERTISED_1000baseT_Half;
1595	ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
1596						advertising);
1597
1598	ret = phy_ethtool_ksettings_set(dev->phydev, &c);
1599	if (!ret)
1600		mv643xx_eth_adjust_link(dev);
1601	return ret;
1602}
1603
1604static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1605				    struct ethtool_drvinfo *drvinfo)
1606{
1607	strscpy(drvinfo->driver, mv643xx_eth_driver_name,
1608		sizeof(drvinfo->driver));
1609	strscpy(drvinfo->version, mv643xx_eth_driver_version,
1610		sizeof(drvinfo->version));
1611	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1612	strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
1613}
1614
1615static int mv643xx_eth_get_coalesce(struct net_device *dev,
1616				    struct ethtool_coalesce *ec,
1617				    struct kernel_ethtool_coalesce *kernel_coal,
1618				    struct netlink_ext_ack *extack)
1619{
1620	struct mv643xx_eth_private *mp = netdev_priv(dev);
1621
1622	ec->rx_coalesce_usecs = get_rx_coal(mp);
1623	ec->tx_coalesce_usecs = get_tx_coal(mp);
1624
1625	return 0;
1626}
1627
1628static int mv643xx_eth_set_coalesce(struct net_device *dev,
1629				    struct ethtool_coalesce *ec,
1630				    struct kernel_ethtool_coalesce *kernel_coal,
1631				    struct netlink_ext_ack *extack)
1632{
1633	struct mv643xx_eth_private *mp = netdev_priv(dev);
1634
1635	set_rx_coal(mp, ec->rx_coalesce_usecs);
1636	set_tx_coal(mp, ec->tx_coalesce_usecs);
1637
1638	return 0;
1639}
1640
1641static void
1642mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
1643			  struct kernel_ethtool_ringparam *kernel_er,
1644			  struct netlink_ext_ack *extack)
1645{
1646	struct mv643xx_eth_private *mp = netdev_priv(dev);
1647
1648	er->rx_max_pending = 4096;
1649	er->tx_max_pending = 4096;
1650
1651	er->rx_pending = mp->rx_ring_size;
1652	er->tx_pending = mp->tx_ring_size;
1653}
1654
1655static int
1656mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
1657			  struct kernel_ethtool_ringparam *kernel_er,
1658			  struct netlink_ext_ack *extack)
1659{
1660	struct mv643xx_eth_private *mp = netdev_priv(dev);
1661
1662	if (er->rx_mini_pending || er->rx_jumbo_pending)
1663		return -EINVAL;
1664
1665	mp->rx_ring_size = min(er->rx_pending, 4096U);
1666	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1667				   MV643XX_MAX_SKB_DESCS * 2, 4096);
1668	if (mp->tx_ring_size != er->tx_pending)
1669		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1670			    mp->tx_ring_size, er->tx_pending);
1671
1672	if (netif_running(dev)) {
1673		mv643xx_eth_stop(dev);
1674		if (mv643xx_eth_open(dev)) {
1675			netdev_err(dev,
1676				   "fatal error on re-opening device after ring param change\n");
1677			return -ENOMEM;
1678		}
1679	}
1680
1681	return 0;
1682}
1683
1684
1685static int
1686mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
1687{
1688	struct mv643xx_eth_private *mp = netdev_priv(dev);
1689	bool rx_csum = features & NETIF_F_RXCSUM;
1690
1691	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1692
1693	return 0;
1694}
1695
1696static void mv643xx_eth_get_strings(struct net_device *dev,
1697				    uint32_t stringset, uint8_t *data)
1698{
1699	int i;
1700
1701	if (stringset == ETH_SS_STATS)
1702		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++)
1703			ethtool_puts(&data, mv643xx_eth_stats[i].stat_string);
 
 
 
 
1704}
1705
1706static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1707					  struct ethtool_stats *stats,
1708					  uint64_t *data)
1709{
1710	struct mv643xx_eth_private *mp = netdev_priv(dev);
1711	int i;
1712
1713	mv643xx_eth_get_stats(dev);
1714	mib_counters_update(mp);
1715
1716	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1717		const struct mv643xx_eth_stats *stat;
1718		void *p;
1719
1720		stat = mv643xx_eth_stats + i;
1721
1722		if (stat->netdev_off >= 0)
1723			p = ((void *)mp->dev) + stat->netdev_off;
1724		else
1725			p = ((void *)mp) + stat->mp_off;
1726
1727		data[i] = (stat->sizeof_stat == 8) ?
1728				*(uint64_t *)p : *(uint32_t *)p;
1729	}
1730}
1731
1732static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1733{
1734	if (sset == ETH_SS_STATS)
1735		return ARRAY_SIZE(mv643xx_eth_stats);
1736
1737	return -EOPNOTSUPP;
1738}
1739
1740static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1741	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
1742	.get_drvinfo		= mv643xx_eth_get_drvinfo,
1743	.nway_reset		= phy_ethtool_nway_reset,
1744	.get_link		= ethtool_op_get_link,
1745	.get_coalesce		= mv643xx_eth_get_coalesce,
1746	.set_coalesce		= mv643xx_eth_set_coalesce,
1747	.get_ringparam		= mv643xx_eth_get_ringparam,
1748	.set_ringparam		= mv643xx_eth_set_ringparam,
1749	.get_strings		= mv643xx_eth_get_strings,
1750	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1751	.get_sset_count		= mv643xx_eth_get_sset_count,
1752	.get_ts_info		= ethtool_op_get_ts_info,
1753	.get_wol                = mv643xx_eth_get_wol,
1754	.set_wol                = mv643xx_eth_set_wol,
1755	.get_link_ksettings	= mv643xx_eth_get_link_ksettings,
1756	.set_link_ksettings	= mv643xx_eth_set_link_ksettings,
1757};
1758
1759
1760/* address handling *********************************************************/
1761static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1762{
1763	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1764	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1765
1766	addr[0] = (mac_h >> 24) & 0xff;
1767	addr[1] = (mac_h >> 16) & 0xff;
1768	addr[2] = (mac_h >> 8) & 0xff;
1769	addr[3] = mac_h & 0xff;
1770	addr[4] = (mac_l >> 8) & 0xff;
1771	addr[5] = mac_l & 0xff;
1772}
1773
1774static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
1775{
1776	wrlp(mp, MAC_ADDR_HIGH,
1777		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1778	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1779}
1780
1781static u32 uc_addr_filter_mask(struct net_device *dev)
1782{
1783	struct netdev_hw_addr *ha;
1784	u32 nibbles;
1785
1786	if (dev->flags & IFF_PROMISC)
1787		return 0;
1788
1789	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1790	netdev_for_each_uc_addr(ha, dev) {
1791		if (memcmp(dev->dev_addr, ha->addr, 5))
1792			return 0;
1793		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1794			return 0;
1795
1796		nibbles |= 1 << (ha->addr[5] & 0x0f);
1797	}
1798
1799	return nibbles;
1800}
1801
1802static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1803{
1804	struct mv643xx_eth_private *mp = netdev_priv(dev);
1805	u32 port_config;
1806	u32 nibbles;
1807	int i;
1808
1809	uc_addr_set(mp, dev->dev_addr);
1810
1811	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1812
1813	nibbles = uc_addr_filter_mask(dev);
1814	if (!nibbles) {
1815		port_config |= UNICAST_PROMISCUOUS_MODE;
1816		nibbles = 0xffff;
1817	}
1818
1819	for (i = 0; i < 16; i += 4) {
1820		int off = UNICAST_TABLE(mp->port_num) + i;
1821		u32 v;
1822
1823		v = 0;
1824		if (nibbles & 1)
1825			v |= 0x00000001;
1826		if (nibbles & 2)
1827			v |= 0x00000100;
1828		if (nibbles & 4)
1829			v |= 0x00010000;
1830		if (nibbles & 8)
1831			v |= 0x01000000;
1832		nibbles >>= 4;
1833
1834		wrl(mp, off, v);
1835	}
1836
1837	wrlp(mp, PORT_CONFIG, port_config);
1838}
1839
1840static int addr_crc(unsigned char *addr)
1841{
1842	int crc = 0;
1843	int i;
1844
1845	for (i = 0; i < 6; i++) {
1846		int j;
1847
1848		crc = (crc ^ addr[i]) << 8;
1849		for (j = 7; j >= 0; j--) {
1850			if (crc & (0x100 << j))
1851				crc ^= 0x107 << j;
1852		}
1853	}
1854
1855	return crc;
1856}
1857
1858static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1859{
1860	struct mv643xx_eth_private *mp = netdev_priv(dev);
1861	u32 *mc_spec;
1862	u32 *mc_other;
1863	struct netdev_hw_addr *ha;
1864	int i;
1865
1866	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
1867		goto promiscuous;
1868
1869	/* Allocate both mc_spec and mc_other tables */
1870	mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
1871	if (!mc_spec)
1872		goto promiscuous;
1873	mc_other = &mc_spec[64];
1874
1875	netdev_for_each_mc_addr(ha, dev) {
1876		u8 *a = ha->addr;
1877		u32 *table;
1878		u8 entry;
1879
1880		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1881			table = mc_spec;
1882			entry = a[5];
1883		} else {
1884			table = mc_other;
1885			entry = addr_crc(a);
1886		}
1887
1888		table[entry >> 2] |= 1 << (8 * (entry & 3));
1889	}
1890
1891	for (i = 0; i < 64; i++) {
1892		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1893		    mc_spec[i]);
1894		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1895		    mc_other[i]);
1896	}
1897
1898	kfree(mc_spec);
1899	return;
1900
1901promiscuous:
1902	for (i = 0; i < 64; i++) {
1903		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1904		    0x01010101u);
1905		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1906		    0x01010101u);
1907	}
1908}
1909
1910static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1911{
1912	mv643xx_eth_program_unicast_filter(dev);
1913	mv643xx_eth_program_multicast_filter(dev);
1914}
1915
1916static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1917{
1918	struct sockaddr *sa = addr;
1919
1920	if (!is_valid_ether_addr(sa->sa_data))
1921		return -EADDRNOTAVAIL;
1922
1923	eth_hw_addr_set(dev, sa->sa_data);
1924
1925	netif_addr_lock_bh(dev);
1926	mv643xx_eth_program_unicast_filter(dev);
1927	netif_addr_unlock_bh(dev);
1928
1929	return 0;
1930}
1931
1932
1933/* rx/tx queue initialisation ***********************************************/
1934static int rxq_init(struct mv643xx_eth_private *mp, int index)
1935{
1936	struct rx_queue *rxq = mp->rxq + index;
1937	struct rx_desc *rx_desc;
1938	int size;
1939	int i;
1940
1941	rxq->index = index;
1942
1943	rxq->rx_ring_size = mp->rx_ring_size;
1944
1945	rxq->rx_desc_count = 0;
1946	rxq->rx_curr_desc = 0;
1947	rxq->rx_used_desc = 0;
1948
1949	size = rxq->rx_ring_size * sizeof(struct rx_desc);
1950
1951	if (index == 0 && size <= mp->rx_desc_sram_size) {
1952		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1953						mp->rx_desc_sram_size);
1954		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1955	} else {
1956		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1957						       size, &rxq->rx_desc_dma,
1958						       GFP_KERNEL);
1959	}
1960
1961	if (rxq->rx_desc_area == NULL) {
1962		netdev_err(mp->dev,
1963			   "can't allocate rx ring (%d bytes)\n", size);
1964		goto out;
1965	}
1966	memset(rxq->rx_desc_area, 0, size);
1967
1968	rxq->rx_desc_area_size = size;
1969	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1970				    GFP_KERNEL);
1971	if (rxq->rx_skb == NULL)
1972		goto out_free;
1973
1974	rx_desc = rxq->rx_desc_area;
1975	for (i = 0; i < rxq->rx_ring_size; i++) {
1976		int nexti;
1977
1978		nexti = i + 1;
1979		if (nexti == rxq->rx_ring_size)
1980			nexti = 0;
1981
1982		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1983					nexti * sizeof(struct rx_desc);
1984	}
1985
1986	return 0;
1987
1988
1989out_free:
1990	if (index == 0 && size <= mp->rx_desc_sram_size)
1991		iounmap(rxq->rx_desc_area);
1992	else
1993		dma_free_coherent(mp->dev->dev.parent, size,
1994				  rxq->rx_desc_area,
1995				  rxq->rx_desc_dma);
1996
1997out:
1998	return -ENOMEM;
1999}
2000
2001static void rxq_deinit(struct rx_queue *rxq)
2002{
2003	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
2004	int i;
2005
2006	rxq_disable(rxq);
2007
2008	for (i = 0; i < rxq->rx_ring_size; i++) {
2009		if (rxq->rx_skb[i]) {
2010			dev_consume_skb_any(rxq->rx_skb[i]);
2011			rxq->rx_desc_count--;
2012		}
2013	}
2014
2015	if (rxq->rx_desc_count) {
2016		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
2017			   rxq->rx_desc_count);
2018	}
2019
2020	if (rxq->index == 0 &&
2021	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
2022		iounmap(rxq->rx_desc_area);
2023	else
2024		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
2025				  rxq->rx_desc_area, rxq->rx_desc_dma);
2026
2027	kfree(rxq->rx_skb);
2028}
2029
2030static int txq_init(struct mv643xx_eth_private *mp, int index)
2031{
2032	struct tx_queue *txq = mp->txq + index;
2033	struct tx_desc *tx_desc;
2034	int size;
2035	int ret;
2036	int i;
2037
2038	txq->index = index;
2039
2040	txq->tx_ring_size = mp->tx_ring_size;
2041
2042	/* A queue must always have room for at least one skb.
2043	 * Therefore, stop the queue when the free entries reaches
2044	 * the maximum number of descriptors per skb.
2045	 */
2046	txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2047	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2048
2049	txq->tx_desc_count = 0;
2050	txq->tx_curr_desc = 0;
2051	txq->tx_used_desc = 0;
2052
2053	size = txq->tx_ring_size * sizeof(struct tx_desc);
2054
2055	if (index == 0 && size <= mp->tx_desc_sram_size) {
2056		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2057						mp->tx_desc_sram_size);
2058		txq->tx_desc_dma = mp->tx_desc_sram_addr;
2059	} else {
2060		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2061						       size, &txq->tx_desc_dma,
2062						       GFP_KERNEL);
2063	}
2064
2065	if (txq->tx_desc_area == NULL) {
2066		netdev_err(mp->dev,
2067			   "can't allocate tx ring (%d bytes)\n", size);
2068		return -ENOMEM;
2069	}
2070	memset(txq->tx_desc_area, 0, size);
2071
2072	txq->tx_desc_area_size = size;
2073
2074	tx_desc = txq->tx_desc_area;
2075	for (i = 0; i < txq->tx_ring_size; i++) {
2076		struct tx_desc *txd = tx_desc + i;
2077		int nexti;
2078
2079		nexti = i + 1;
2080		if (nexti == txq->tx_ring_size)
2081			nexti = 0;
2082
2083		txd->cmd_sts = 0;
2084		txd->next_desc_ptr = txq->tx_desc_dma +
2085					nexti * sizeof(struct tx_desc);
2086	}
2087
2088	txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2089				       GFP_KERNEL);
2090	if (!txq->tx_desc_mapping) {
2091		ret = -ENOMEM;
2092		goto err_free_desc_area;
2093	}
2094
2095	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2096	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2097					   txq->tx_ring_size * TSO_HEADER_SIZE,
2098					   &txq->tso_hdrs_dma, GFP_KERNEL);
2099	if (txq->tso_hdrs == NULL) {
2100		ret = -ENOMEM;
2101		goto err_free_desc_mapping;
2102	}
2103	skb_queue_head_init(&txq->tx_skb);
2104
2105	return 0;
2106
2107err_free_desc_mapping:
2108	kfree(txq->tx_desc_mapping);
2109err_free_desc_area:
2110	if (index == 0 && size <= mp->tx_desc_sram_size)
2111		iounmap(txq->tx_desc_area);
2112	else
2113		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2114				  txq->tx_desc_area, txq->tx_desc_dma);
2115	return ret;
2116}
2117
2118static void txq_deinit(struct tx_queue *txq)
2119{
2120	struct mv643xx_eth_private *mp = txq_to_mp(txq);
2121
2122	txq_disable(txq);
2123	txq_reclaim(txq, txq->tx_ring_size, 1);
2124
2125	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2126
2127	if (txq->index == 0 &&
2128	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2129		iounmap(txq->tx_desc_area);
2130	else
2131		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2132				  txq->tx_desc_area, txq->tx_desc_dma);
2133	kfree(txq->tx_desc_mapping);
2134
2135	if (txq->tso_hdrs)
2136		dma_free_coherent(mp->dev->dev.parent,
2137				  txq->tx_ring_size * TSO_HEADER_SIZE,
2138				  txq->tso_hdrs, txq->tso_hdrs_dma);
2139}
2140
2141
2142/* netdev ops and related ***************************************************/
2143static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2144{
2145	u32 int_cause;
2146	u32 int_cause_ext;
2147
2148	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2149	if (int_cause == 0)
2150		return 0;
2151
2152	int_cause_ext = 0;
2153	if (int_cause & INT_EXT) {
2154		int_cause &= ~INT_EXT;
2155		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2156	}
2157
2158	if (int_cause) {
2159		wrlp(mp, INT_CAUSE, ~int_cause);
2160		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2161				~(rdlp(mp, TXQ_COMMAND) & 0xff);
2162		mp->work_rx |= (int_cause & INT_RX) >> 2;
2163	}
2164
2165	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2166	if (int_cause_ext) {
2167		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2168		if (int_cause_ext & INT_EXT_LINK_PHY)
2169			mp->work_link = 1;
2170		mp->work_tx |= int_cause_ext & INT_EXT_TX;
2171	}
2172
2173	return 1;
2174}
2175
2176static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2177{
2178	struct net_device *dev = (struct net_device *)dev_id;
2179	struct mv643xx_eth_private *mp = netdev_priv(dev);
2180
2181	if (unlikely(!mv643xx_eth_collect_events(mp)))
2182		return IRQ_NONE;
2183
2184	wrlp(mp, INT_MASK, 0);
2185	napi_schedule(&mp->napi);
2186
2187	return IRQ_HANDLED;
2188}
2189
2190static void handle_link_event(struct mv643xx_eth_private *mp)
2191{
2192	struct net_device *dev = mp->dev;
2193	u32 port_status;
2194	int speed;
2195	int duplex;
2196	int fc;
2197
2198	port_status = rdlp(mp, PORT_STATUS);
2199	if (!(port_status & LINK_UP)) {
2200		if (netif_carrier_ok(dev)) {
2201			int i;
2202
2203			netdev_info(dev, "link down\n");
2204
2205			netif_carrier_off(dev);
2206
2207			for (i = 0; i < mp->txq_count; i++) {
2208				struct tx_queue *txq = mp->txq + i;
2209
2210				txq_reclaim(txq, txq->tx_ring_size, 1);
2211				txq_reset_hw_ptr(txq);
2212			}
2213		}
2214		return;
2215	}
2216
2217	switch (port_status & PORT_SPEED_MASK) {
2218	case PORT_SPEED_10:
2219		speed = 10;
2220		break;
2221	case PORT_SPEED_100:
2222		speed = 100;
2223		break;
2224	case PORT_SPEED_1000:
2225		speed = 1000;
2226		break;
2227	default:
2228		speed = -1;
2229		break;
2230	}
2231	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2232	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2233
2234	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2235		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
2236
2237	if (!netif_carrier_ok(dev))
2238		netif_carrier_on(dev);
2239}
2240
2241static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2242{
2243	struct mv643xx_eth_private *mp;
2244	int work_done;
2245
2246	mp = container_of(napi, struct mv643xx_eth_private, napi);
2247
2248	if (unlikely(mp->oom)) {
2249		mp->oom = 0;
2250		del_timer(&mp->rx_oom);
2251	}
2252
2253	work_done = 0;
2254	while (work_done < budget) {
2255		u8 queue_mask;
2256		int queue;
2257		int work_tbd;
2258
2259		if (mp->work_link) {
2260			mp->work_link = 0;
2261			handle_link_event(mp);
2262			work_done++;
2263			continue;
2264		}
2265
2266		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2267		if (likely(!mp->oom))
2268			queue_mask |= mp->work_rx_refill;
2269
2270		if (!queue_mask) {
2271			if (mv643xx_eth_collect_events(mp))
2272				continue;
2273			break;
2274		}
2275
2276		queue = fls(queue_mask) - 1;
2277		queue_mask = 1 << queue;
2278
2279		work_tbd = budget - work_done;
2280		if (work_tbd > 16)
2281			work_tbd = 16;
2282
2283		if (mp->work_tx_end & queue_mask) {
2284			txq_kick(mp->txq + queue);
2285		} else if (mp->work_tx & queue_mask) {
2286			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2287			txq_maybe_wake(mp->txq + queue);
2288		} else if (mp->work_rx & queue_mask) {
2289			work_done += rxq_process(mp->rxq + queue, work_tbd);
2290		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2291			work_done += rxq_refill(mp->rxq + queue, work_tbd);
2292		} else {
2293			BUG();
2294		}
2295	}
2296
2297	if (work_done < budget) {
2298		if (mp->oom)
2299			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2300		napi_complete_done(napi, work_done);
2301		wrlp(mp, INT_MASK, mp->int_mask);
2302	}
2303
2304	return work_done;
2305}
2306
2307static inline void oom_timer_wrapper(struct timer_list *t)
2308{
2309	struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
2310
2311	napi_schedule(&mp->napi);
2312}
2313
2314static void port_start(struct mv643xx_eth_private *mp)
2315{
2316	struct net_device *dev = mp->dev;
2317	u32 pscr;
2318	int i;
2319
2320	/*
2321	 * Perform PHY reset, if there is a PHY.
2322	 */
2323	if (dev->phydev) {
2324		struct ethtool_link_ksettings cmd;
2325
2326		mv643xx_eth_get_link_ksettings(dev, &cmd);
2327		phy_init_hw(dev->phydev);
2328		mv643xx_eth_set_link_ksettings(
2329			dev, (const struct ethtool_link_ksettings *)&cmd);
2330		phy_start(dev->phydev);
2331	}
2332
2333	/*
2334	 * Configure basic link parameters.
2335	 */
2336	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2337
2338	pscr |= SERIAL_PORT_ENABLE;
2339	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2340
2341	pscr |= DO_NOT_FORCE_LINK_FAIL;
2342	if (!dev->phydev)
2343		pscr |= FORCE_LINK_PASS;
2344	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2345
2346	/*
2347	 * Configure TX path and queues.
2348	 */
2349	tx_set_rate(mp, 1000000000, 16777216);
2350	for (i = 0; i < mp->txq_count; i++) {
2351		struct tx_queue *txq = mp->txq + i;
2352
2353		txq_reset_hw_ptr(txq);
2354		txq_set_rate(txq, 1000000000, 16777216);
2355		txq_set_fixed_prio_mode(txq);
2356	}
2357
2358	/*
2359	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2360	 * frames to RX queue #0, and include the pseudo-header when
2361	 * calculating receive checksums.
2362	 */
2363	mv643xx_eth_set_features(mp->dev, mp->dev->features);
2364
2365	/*
2366	 * Treat BPDUs as normal multicasts, and disable partition mode.
2367	 */
2368	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2369
2370	/*
2371	 * Add configured unicast addresses to address filter table.
2372	 */
2373	mv643xx_eth_program_unicast_filter(mp->dev);
2374
2375	/*
2376	 * Enable the receive queues.
2377	 */
2378	for (i = 0; i < mp->rxq_count; i++) {
2379		struct rx_queue *rxq = mp->rxq + i;
2380		u32 addr;
2381
2382		addr = (u32)rxq->rx_desc_dma;
2383		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2384		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2385
2386		rxq_enable(rxq);
2387	}
2388}
2389
2390static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2391{
2392	int skb_size;
2393
2394	/*
2395	 * Reserve 2+14 bytes for an ethernet header (the hardware
2396	 * automatically prepends 2 bytes of dummy data to each
2397	 * received packet), 16 bytes for up to four VLAN tags, and
2398	 * 4 bytes for the trailing FCS -- 36 bytes total.
2399	 */
2400	skb_size = mp->dev->mtu + 36;
2401
2402	/*
2403	 * Make sure that the skb size is a multiple of 8 bytes, as
2404	 * the lower three bits of the receive descriptor's buffer
2405	 * size field are ignored by the hardware.
2406	 */
2407	mp->skb_size = (skb_size + 7) & ~7;
2408
2409	/*
2410	 * If NET_SKB_PAD is smaller than a cache line,
2411	 * netdev_alloc_skb() will cause skb->data to be misaligned
2412	 * to a cache line boundary.  If this is the case, include
2413	 * some extra space to allow re-aligning the data area.
2414	 */
2415	mp->skb_size += SKB_DMA_REALIGN;
2416}
2417
2418static int mv643xx_eth_open(struct net_device *dev)
2419{
2420	struct mv643xx_eth_private *mp = netdev_priv(dev);
2421	int err;
2422	int i;
2423
2424	wrlp(mp, INT_CAUSE, 0);
2425	wrlp(mp, INT_CAUSE_EXT, 0);
2426	rdlp(mp, INT_CAUSE_EXT);
2427
2428	err = request_irq(dev->irq, mv643xx_eth_irq,
2429			  IRQF_SHARED, dev->name, dev);
2430	if (err) {
2431		netdev_err(dev, "can't assign irq\n");
2432		return -EAGAIN;
2433	}
2434
2435	mv643xx_eth_recalc_skb_size(mp);
2436
2437	napi_enable(&mp->napi);
2438
2439	mp->int_mask = INT_EXT;
2440
2441	for (i = 0; i < mp->rxq_count; i++) {
2442		err = rxq_init(mp, i);
2443		if (err) {
2444			while (--i >= 0)
2445				rxq_deinit(mp->rxq + i);
2446			goto out;
2447		}
2448
2449		rxq_refill(mp->rxq + i, INT_MAX);
2450		mp->int_mask |= INT_RX_0 << i;
2451	}
2452
2453	if (mp->oom) {
2454		mp->rx_oom.expires = jiffies + (HZ / 10);
2455		add_timer(&mp->rx_oom);
2456	}
2457
2458	for (i = 0; i < mp->txq_count; i++) {
2459		err = txq_init(mp, i);
2460		if (err) {
2461			while (--i >= 0)
2462				txq_deinit(mp->txq + i);
2463			goto out_free;
2464		}
2465		mp->int_mask |= INT_TX_END_0 << i;
2466	}
2467
2468	add_timer(&mp->mib_counters_timer);
2469	port_start(mp);
2470
2471	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2472	wrlp(mp, INT_MASK, mp->int_mask);
2473
2474	return 0;
2475
2476
2477out_free:
2478	for (i = 0; i < mp->rxq_count; i++)
2479		rxq_deinit(mp->rxq + i);
2480out:
2481	napi_disable(&mp->napi);
2482	free_irq(dev->irq, dev);
2483
2484	return err;
2485}
2486
2487static void port_reset(struct mv643xx_eth_private *mp)
2488{
2489	unsigned int data;
2490	int i;
2491
2492	for (i = 0; i < mp->rxq_count; i++)
2493		rxq_disable(mp->rxq + i);
2494	for (i = 0; i < mp->txq_count; i++)
2495		txq_disable(mp->txq + i);
2496
2497	while (1) {
2498		u32 ps = rdlp(mp, PORT_STATUS);
2499
2500		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2501			break;
2502		udelay(10);
2503	}
2504
2505	/* Reset the Enable bit in the Configuration Register */
2506	data = rdlp(mp, PORT_SERIAL_CONTROL);
2507	data &= ~(SERIAL_PORT_ENABLE		|
2508		  DO_NOT_FORCE_LINK_FAIL	|
2509		  FORCE_LINK_PASS);
2510	wrlp(mp, PORT_SERIAL_CONTROL, data);
2511}
2512
2513static int mv643xx_eth_stop(struct net_device *dev)
2514{
2515	struct mv643xx_eth_private *mp = netdev_priv(dev);
2516	int i;
2517
2518	wrlp(mp, INT_MASK_EXT, 0x00000000);
2519	wrlp(mp, INT_MASK, 0x00000000);
2520	rdlp(mp, INT_MASK);
2521
2522	napi_disable(&mp->napi);
2523
2524	del_timer_sync(&mp->rx_oom);
2525
2526	netif_carrier_off(dev);
2527	if (dev->phydev)
2528		phy_stop(dev->phydev);
2529	free_irq(dev->irq, dev);
2530
2531	port_reset(mp);
2532	mv643xx_eth_get_stats(dev);
2533	mib_counters_update(mp);
2534	del_timer_sync(&mp->mib_counters_timer);
2535
2536	for (i = 0; i < mp->rxq_count; i++)
2537		rxq_deinit(mp->rxq + i);
2538	for (i = 0; i < mp->txq_count; i++)
2539		txq_deinit(mp->txq + i);
2540
2541	return 0;
2542}
2543
2544static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2545{
2546	int ret;
2547
2548	if (!dev->phydev)
2549		return -ENOTSUPP;
2550
2551	ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
2552	if (!ret)
2553		mv643xx_eth_adjust_link(dev);
2554	return ret;
2555}
2556
2557static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2558{
2559	struct mv643xx_eth_private *mp = netdev_priv(dev);
2560
2561	WRITE_ONCE(dev->mtu, new_mtu);
2562	mv643xx_eth_recalc_skb_size(mp);
2563	tx_set_rate(mp, 1000000000, 16777216);
2564
2565	if (!netif_running(dev))
2566		return 0;
2567
2568	/*
2569	 * Stop and then re-open the interface. This will allocate RX
2570	 * skbs of the new MTU.
2571	 * There is a possible danger that the open will not succeed,
2572	 * due to memory being full.
2573	 */
2574	mv643xx_eth_stop(dev);
2575	if (mv643xx_eth_open(dev)) {
2576		netdev_err(dev,
2577			   "fatal error on re-opening device after MTU change\n");
2578	}
2579
2580	return 0;
2581}
2582
2583static void tx_timeout_task(struct work_struct *ugly)
2584{
2585	struct mv643xx_eth_private *mp;
2586
2587	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2588	if (netif_running(mp->dev)) {
2589		netif_tx_stop_all_queues(mp->dev);
2590		port_reset(mp);
2591		port_start(mp);
2592		netif_tx_wake_all_queues(mp->dev);
2593	}
2594}
2595
2596static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
2597{
2598	struct mv643xx_eth_private *mp = netdev_priv(dev);
2599
2600	netdev_info(dev, "tx timeout\n");
2601
2602	schedule_work(&mp->tx_timeout_task);
2603}
2604
2605#ifdef CONFIG_NET_POLL_CONTROLLER
2606static void mv643xx_eth_netpoll(struct net_device *dev)
2607{
2608	struct mv643xx_eth_private *mp = netdev_priv(dev);
2609
2610	wrlp(mp, INT_MASK, 0x00000000);
2611	rdlp(mp, INT_MASK);
2612
2613	mv643xx_eth_irq(dev->irq, dev);
2614
2615	wrlp(mp, INT_MASK, mp->int_mask);
2616}
2617#endif
2618
2619
2620/* platform glue ************************************************************/
2621static void
2622mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2623			      const struct mbus_dram_target_info *dram)
2624{
2625	void __iomem *base = msp->base;
2626	u32 win_enable;
2627	u32 win_protect;
2628	int i;
2629
2630	for (i = 0; i < 6; i++) {
2631		writel(0, base + WINDOW_BASE(i));
2632		writel(0, base + WINDOW_SIZE(i));
2633		if (i < 4)
2634			writel(0, base + WINDOW_REMAP_HIGH(i));
2635	}
2636
2637	win_enable = 0x3f;
2638	win_protect = 0;
2639
2640	for (i = 0; i < dram->num_cs; i++) {
2641		const struct mbus_dram_window *cs = dram->cs + i;
2642
2643		writel((cs->base & 0xffff0000) |
2644			(cs->mbus_attr << 8) |
2645			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2646		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2647
2648		win_enable &= ~(1 << i);
2649		win_protect |= 3 << (2 * i);
2650	}
2651
2652	writel(win_enable, base + WINDOW_BAR_ENABLE);
2653	msp->win_protect = win_protect;
2654}
2655
2656static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2657{
2658	/*
2659	 * Check whether we have a 14-bit coal limit field in bits
2660	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2661	 * SDMA config register.
2662	 */
2663	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2664	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2665		msp->extended_rx_coal_limit = 1;
2666	else
2667		msp->extended_rx_coal_limit = 0;
2668
2669	/*
2670	 * Check whether the MAC supports TX rate control, and if
2671	 * yes, whether its associated registers are in the old or
2672	 * the new place.
2673	 */
2674	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2675	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2676		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2677	} else {
2678		writel(7, msp->base + 0x0400 + TX_BW_RATE);
2679		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2680			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2681		else
2682			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2683	}
2684}
2685
2686#if defined(CONFIG_OF)
2687static const struct of_device_id mv643xx_eth_shared_ids[] = {
2688	{ .compatible = "marvell,orion-eth", },
2689	{ .compatible = "marvell,kirkwood-eth", },
2690	{ }
2691};
2692MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2693#endif
2694
2695#ifdef CONFIG_OF_IRQ
2696#define mv643xx_eth_property(_np, _name, _v)				\
2697	do {								\
2698		u32 tmp;						\
2699		if (!of_property_read_u32(_np, "marvell," _name, &tmp))	\
2700			_v = tmp;					\
2701	} while (0)
2702
2703static struct platform_device *port_platdev[3];
2704
2705static void mv643xx_eth_shared_of_remove(void)
2706{
2707	struct mv643xx_eth_platform_data *pd;
2708	int n;
2709
2710	for (n = 0; n < 3; n++) {
2711		if (!port_platdev[n])
2712			continue;
2713		pd = dev_get_platdata(&port_platdev[n]->dev);
2714		if (pd)
2715			of_node_put(pd->phy_node);
2716		platform_device_del(port_platdev[n]);
2717		port_platdev[n] = NULL;
2718	}
2719}
2720
2721static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2722					  struct device_node *pnp)
2723{
2724	struct platform_device *ppdev;
2725	struct mv643xx_eth_platform_data ppd;
2726	struct resource res;
 
2727	int ret;
2728	int dev_num = 0;
2729
2730	memset(&ppd, 0, sizeof(ppd));
2731	ppd.shared = pdev;
2732
2733	memset(&res, 0, sizeof(res));
2734	if (of_irq_to_resource(pnp, 0, &res) <= 0) {
2735		dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
2736		return -EINVAL;
2737	}
2738
2739	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2740		dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
2741		return -EINVAL;
2742	}
2743
2744	if (ppd.port_number >= 3) {
2745		dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
2746		return -EINVAL;
2747	}
2748
2749	while (dev_num < 3 && port_platdev[dev_num])
2750		dev_num++;
2751
2752	if (dev_num == 3) {
2753		dev_err(&pdev->dev, "too many ports registered\n");
2754		return -EINVAL;
2755	}
2756
2757	ret = of_get_mac_address(pnp, ppd.mac_addr);
2758	if (ret == -EPROBE_DEFER)
2759		return ret;
2760
2761	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2762	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2763	mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2764	mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2765	mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2766	mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2767
2768	of_get_phy_mode(pnp, &ppd.interface);
2769
2770	ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2771	if (!ppd.phy_node) {
2772		ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2773		of_property_read_u32(pnp, "speed", &ppd.speed);
2774		of_property_read_u32(pnp, "duplex", &ppd.duplex);
2775	}
2776
2777	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
2778	if (!ppdev) {
2779		ret = -ENOMEM;
2780		goto put_err;
2781	}
2782	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2783	ppdev->dev.of_node = pnp;
2784
2785	ret = platform_device_add_resources(ppdev, &res, 1);
2786	if (ret)
2787		goto port_err;
2788
2789	ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2790	if (ret)
2791		goto port_err;
2792
2793	ret = platform_device_add(ppdev);
2794	if (ret)
2795		goto port_err;
2796
2797	port_platdev[dev_num] = ppdev;
2798
2799	return 0;
2800
2801port_err:
2802	platform_device_put(ppdev);
2803put_err:
2804	of_node_put(ppd.phy_node);
2805	return ret;
2806}
2807
2808static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2809{
2810	struct mv643xx_eth_shared_platform_data *pd;
2811	struct device_node *np = pdev->dev.of_node;
2812	int ret;
2813
2814	/* bail out if not registered from DT */
2815	if (!np)
2816		return 0;
2817
2818	pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2819	if (!pd)
2820		return -ENOMEM;
2821	pdev->dev.platform_data = pd;
2822
2823	mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2824
2825	for_each_available_child_of_node_scoped(np, pnp) {
2826		ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2827		if (ret) {
2828			mv643xx_eth_shared_of_remove();
2829			return ret;
2830		}
2831	}
2832	return 0;
2833}
2834
 
 
 
 
 
 
 
 
 
2835#else
2836static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2837{
2838	return 0;
2839}
2840
2841static inline void mv643xx_eth_shared_of_remove(void)
2842{
2843}
2844#endif
2845
2846static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2847{
2848	static int mv643xx_eth_version_printed;
2849	struct mv643xx_eth_shared_platform_data *pd;
2850	struct mv643xx_eth_shared_private *msp;
2851	const struct mbus_dram_target_info *dram;
 
2852	int ret;
2853
2854	if (!mv643xx_eth_version_printed++)
2855		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2856			  mv643xx_eth_driver_version);
2857
 
 
 
 
2858	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2859	if (msp == NULL)
2860		return -ENOMEM;
2861	platform_set_drvdata(pdev, msp);
2862
2863	msp->base = devm_platform_ioremap_resource(pdev, 0);
2864	if (IS_ERR(msp->base))
2865		return PTR_ERR(msp->base);
2866
2867	msp->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
2868	if (IS_ERR(msp->clk))
2869		return PTR_ERR(msp->clk);
2870
2871	/*
2872	 * (Re-)program MBUS remapping windows if we are asked to.
2873	 */
2874	dram = mv_mbus_dram_info();
2875	if (dram)
2876		mv643xx_eth_conf_mbus_windows(msp, dram);
2877
2878	ret = mv643xx_eth_shared_of_probe(pdev);
2879	if (ret)
2880		return ret;
2881	pd = dev_get_platdata(&pdev->dev);
2882
2883	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2884					pd->tx_csum_limit : 9 * 1024;
2885	infer_hw_params(msp);
2886
2887	return 0;
 
 
 
 
 
2888}
2889
2890static void mv643xx_eth_shared_remove(struct platform_device *pdev)
2891{
 
 
2892	mv643xx_eth_shared_of_remove();
 
 
 
2893}
2894
2895static struct platform_driver mv643xx_eth_shared_driver = {
2896	.probe		= mv643xx_eth_shared_probe,
2897	.remove		= mv643xx_eth_shared_remove,
2898	.driver = {
2899		.name	= MV643XX_ETH_SHARED_NAME,
2900		.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
2901	},
2902};
2903
2904static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2905{
2906	int addr_shift = 5 * mp->port_num;
2907	u32 data;
2908
2909	data = rdl(mp, PHY_ADDR);
2910	data &= ~(0x1f << addr_shift);
2911	data |= (phy_addr & 0x1f) << addr_shift;
2912	wrl(mp, PHY_ADDR, data);
2913}
2914
2915static int phy_addr_get(struct mv643xx_eth_private *mp)
2916{
2917	unsigned int data;
2918
2919	data = rdl(mp, PHY_ADDR);
2920
2921	return (data >> (5 * mp->port_num)) & 0x1f;
2922}
2923
2924static void set_params(struct mv643xx_eth_private *mp,
2925		       struct mv643xx_eth_platform_data *pd)
2926{
2927	struct net_device *dev = mp->dev;
2928	unsigned int tx_ring_size;
2929
2930	if (is_valid_ether_addr(pd->mac_addr)) {
2931		eth_hw_addr_set(dev, pd->mac_addr);
2932	} else {
2933		u8 addr[ETH_ALEN];
2934
2935		uc_addr_get(mp, addr);
2936		eth_hw_addr_set(dev, addr);
2937	}
2938
2939	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2940	if (pd->rx_queue_size)
2941		mp->rx_ring_size = pd->rx_queue_size;
2942	mp->rx_desc_sram_addr = pd->rx_sram_addr;
2943	mp->rx_desc_sram_size = pd->rx_sram_size;
2944
2945	mp->rxq_count = pd->rx_queue_count ? : 1;
2946
2947	tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2948	if (pd->tx_queue_size)
2949		tx_ring_size = pd->tx_queue_size;
2950
2951	mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2952				   MV643XX_MAX_SKB_DESCS * 2, 4096);
2953	if (mp->tx_ring_size != tx_ring_size)
2954		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2955			    mp->tx_ring_size, tx_ring_size);
2956
2957	mp->tx_desc_sram_addr = pd->tx_sram_addr;
2958	mp->tx_desc_sram_size = pd->tx_sram_size;
2959
2960	mp->txq_count = pd->tx_queue_count ? : 1;
2961}
2962
2963static int get_phy_mode(struct mv643xx_eth_private *mp)
2964{
2965	struct device *dev = mp->dev->dev.parent;
2966	phy_interface_t iface;
2967	int err;
2968
2969	if (dev->of_node)
2970		err = of_get_phy_mode(dev->of_node, &iface);
2971
2972	/* Historical default if unspecified. We could also read/write
2973	 * the interface state in the PSC1
2974	 */
2975	if (!dev->of_node || err)
2976		iface = PHY_INTERFACE_MODE_GMII;
2977	return iface;
2978}
2979
2980static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2981				   int phy_addr)
2982{
2983	struct phy_device *phydev;
2984	int start;
2985	int num;
2986	int i;
2987	char phy_id[MII_BUS_ID_SIZE + 3];
2988
2989	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2990		start = phy_addr_get(mp) & 0x1f;
2991		num = 32;
2992	} else {
2993		start = phy_addr & 0x1f;
2994		num = 1;
2995	}
2996
2997	/* Attempt to connect to the PHY using orion-mdio */
2998	phydev = ERR_PTR(-ENODEV);
2999	for (i = 0; i < num; i++) {
3000		int addr = (start + i) & 0x1f;
3001
3002		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
3003				"orion-mdio-mii", addr);
3004
3005		phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
3006				     get_phy_mode(mp));
3007		if (!IS_ERR(phydev)) {
3008			phy_addr_set(mp, addr);
3009			break;
3010		}
3011	}
3012
3013	return phydev;
3014}
3015
3016static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
3017{
3018	struct net_device *dev = mp->dev;
3019	struct phy_device *phy = dev->phydev;
3020
3021	if (speed == 0) {
3022		phy->autoneg = AUTONEG_ENABLE;
3023		phy->speed = 0;
3024		phy->duplex = 0;
3025		linkmode_copy(phy->advertising, phy->supported);
3026		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3027				 phy->advertising);
3028	} else {
3029		phy->autoneg = AUTONEG_DISABLE;
3030		linkmode_zero(phy->advertising);
3031		phy->speed = speed;
3032		phy->duplex = duplex;
3033	}
3034	phy_start_aneg(phy);
3035}
3036
3037static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
3038{
3039	struct net_device *dev = mp->dev;
3040	u32 pscr;
3041
3042	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
3043	if (pscr & SERIAL_PORT_ENABLE) {
3044		pscr &= ~SERIAL_PORT_ENABLE;
3045		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3046	}
3047
3048	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
3049	if (!dev->phydev) {
3050		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
3051		if (speed == SPEED_1000)
3052			pscr |= SET_GMII_SPEED_TO_1000;
3053		else if (speed == SPEED_100)
3054			pscr |= SET_MII_SPEED_TO_100;
3055
3056		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
3057
3058		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
3059		if (duplex == DUPLEX_FULL)
3060			pscr |= SET_FULL_DUPLEX_MODE;
3061	}
3062
3063	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3064}
3065
3066static const struct net_device_ops mv643xx_eth_netdev_ops = {
3067	.ndo_open		= mv643xx_eth_open,
3068	.ndo_stop		= mv643xx_eth_stop,
3069	.ndo_start_xmit		= mv643xx_eth_xmit,
3070	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
3071	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
3072	.ndo_validate_addr	= eth_validate_addr,
3073	.ndo_eth_ioctl		= mv643xx_eth_ioctl,
3074	.ndo_change_mtu		= mv643xx_eth_change_mtu,
3075	.ndo_set_features	= mv643xx_eth_set_features,
3076	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
3077	.ndo_get_stats		= mv643xx_eth_get_stats,
3078#ifdef CONFIG_NET_POLL_CONTROLLER
3079	.ndo_poll_controller	= mv643xx_eth_netpoll,
3080#endif
3081};
3082
3083static int mv643xx_eth_probe(struct platform_device *pdev)
3084{
3085	struct mv643xx_eth_platform_data *pd;
3086	struct mv643xx_eth_private *mp;
3087	struct net_device *dev;
3088	struct phy_device *phydev = NULL;
3089	u32 psc1r;
3090	int err, irq;
3091
3092	pd = dev_get_platdata(&pdev->dev);
3093	if (pd == NULL) {
3094		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
3095		return -ENODEV;
3096	}
3097
3098	if (pd->shared == NULL) {
3099		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
3100		return -ENODEV;
3101	}
3102
3103	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
3104	if (!dev)
3105		return -ENOMEM;
3106
3107	SET_NETDEV_DEV(dev, &pdev->dev);
3108	mp = netdev_priv(dev);
3109	platform_set_drvdata(pdev, mp);
3110
3111	mp->shared = platform_get_drvdata(pd->shared);
3112	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
3113	mp->port_num = pd->port_number;
3114
3115	mp->dev = dev;
3116
 
 
 
 
3117	if (of_device_is_compatible(pdev->dev.of_node,
3118				    "marvell,kirkwood-eth-port")) {
3119		psc1r = rdlp(mp, PORT_SERIAL_CONTROL1);
3120
3121		/* Kirkwood resets some registers on gated clocks. Especially
3122		 * CLK125_BYPASS_EN must be cleared but is not available on
3123		 * all other SoCs/System Controllers using this driver.
3124		 */
3125		psc1r &= ~CLK125_BYPASS_EN;
3126
3127		/* On Kirkwood with two Ethernet controllers, if both of them
3128		 * have RGMII_EN disabled, the first controller will be in GMII
3129		 * mode and the second one is effectively disabled, instead of
3130		 * two MII interfaces.
3131		 *
3132		 * To enable GMII in the first controller, the second one must
3133		 * also be configured (and may be enabled) with RGMII_EN
3134		 * disabled too, even though it cannot be used at all.
3135		 */
3136		switch (pd->interface) {
3137		/* Use internal to denote second controller being disabled */
3138		case PHY_INTERFACE_MODE_INTERNAL:
3139		case PHY_INTERFACE_MODE_MII:
3140		case PHY_INTERFACE_MODE_GMII:
3141			psc1r &= ~RGMII_EN;
3142			break;
3143		case PHY_INTERFACE_MODE_RGMII:
3144		case PHY_INTERFACE_MODE_RGMII_ID:
3145		case PHY_INTERFACE_MODE_RGMII_RXID:
3146		case PHY_INTERFACE_MODE_RGMII_TXID:
3147			psc1r |= RGMII_EN;
3148			break;
3149		default:
3150			/* Unknown; don't touch */
3151			break;
3152		}
3153
3154		wrlp(mp, PORT_SERIAL_CONTROL1, psc1r);
3155	}
3156
3157	/*
3158	 * Start with a default rate, and if there is a clock, allow
3159	 * it to override the default.
3160	 */
3161	mp->t_clk = 133000000;
3162	mp->clk = devm_clk_get(&pdev->dev, NULL);
3163	if (!IS_ERR(mp->clk)) {
3164		clk_prepare_enable(mp->clk);
3165		mp->t_clk = clk_get_rate(mp->clk);
3166	} else if (!IS_ERR(mp->shared->clk)) {
3167		mp->t_clk = clk_get_rate(mp->shared->clk);
3168	}
3169
3170	set_params(mp, pd);
3171	netif_set_real_num_tx_queues(dev, mp->txq_count);
3172	netif_set_real_num_rx_queues(dev, mp->rxq_count);
3173
3174	err = 0;
3175	if (pd->phy_node) {
3176		phydev = of_phy_connect(mp->dev, pd->phy_node,
3177					mv643xx_eth_adjust_link, 0,
3178					get_phy_mode(mp));
3179		if (!phydev)
3180			err = -ENODEV;
3181		else
3182			phy_addr_set(mp, phydev->mdio.addr);
3183	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
3184		phydev = phy_scan(mp, pd->phy_addr);
3185
3186		if (IS_ERR(phydev))
3187			err = PTR_ERR(phydev);
3188		else
3189			phy_init(mp, pd->speed, pd->duplex);
3190	}
3191	if (err == -ENODEV) {
3192		err = -EPROBE_DEFER;
3193		goto out;
3194	}
3195	if (err)
3196		goto out;
3197
3198	dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
3199
3200	init_pscr(mp, pd->speed, pd->duplex);
3201
3202
3203	mib_counters_clear(mp);
3204
3205	timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
3206	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
3207
3208	spin_lock_init(&mp->mib_counters_lock);
3209
3210	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3211
3212	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
3213
3214	timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
3215
3216
3217	irq = platform_get_irq(pdev, 0);
3218	if (WARN_ON(irq < 0)) {
3219		err = irq;
3220		goto out;
3221	}
3222	dev->irq = irq;
3223
3224	dev->netdev_ops = &mv643xx_eth_netdev_ops;
3225
3226	dev->watchdog_timeo = 2 * HZ;
3227	dev->base_addr = 0;
3228
3229	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3230	dev->vlan_features = dev->features;
3231
3232	dev->features |= NETIF_F_RXCSUM;
3233	dev->hw_features = dev->features;
3234
3235	dev->priv_flags |= IFF_UNICAST_FLT;
3236	netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
3237
3238	/* MTU range: 64 - 9500 */
3239	dev->min_mtu = 64;
3240	dev->max_mtu = 9500;
3241
3242	if (mp->shared->win_protect)
3243		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
3244
3245	netif_carrier_off(dev);
3246
3247	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3248
3249	set_rx_coal(mp, 250);
3250	set_tx_coal(mp, 0);
3251
3252	err = register_netdev(dev);
3253	if (err)
3254		goto out;
3255
3256	netdev_notice(dev, "port %d with MAC address %pM\n",
3257		      mp->port_num, dev->dev_addr);
3258
3259	if (mp->tx_desc_sram_size > 0)
3260		netdev_notice(dev, "configured with sram\n");
3261
3262	return 0;
3263
3264out:
3265	if (!IS_ERR(mp->clk))
3266		clk_disable_unprepare(mp->clk);
3267	free_netdev(dev);
3268
3269	return err;
3270}
3271
3272static void mv643xx_eth_remove(struct platform_device *pdev)
3273{
3274	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3275	struct net_device *dev = mp->dev;
3276
3277	unregister_netdev(mp->dev);
3278	if (dev->phydev)
3279		phy_disconnect(dev->phydev);
3280	cancel_work_sync(&mp->tx_timeout_task);
3281
3282	if (!IS_ERR(mp->clk))
3283		clk_disable_unprepare(mp->clk);
3284
3285	free_netdev(mp->dev);
 
 
3286}
3287
3288static void mv643xx_eth_shutdown(struct platform_device *pdev)
3289{
3290	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3291
3292	/* Mask all interrupts on ethernet port */
3293	wrlp(mp, INT_MASK, 0);
3294	rdlp(mp, INT_MASK);
3295
3296	if (netif_running(mp->dev))
3297		port_reset(mp);
3298}
3299
3300static struct platform_driver mv643xx_eth_driver = {
3301	.probe		= mv643xx_eth_probe,
3302	.remove		= mv643xx_eth_remove,
3303	.shutdown	= mv643xx_eth_shutdown,
3304	.driver = {
3305		.name	= MV643XX_ETH_NAME,
3306	},
3307};
3308
3309static struct platform_driver * const drivers[] = {
3310	&mv643xx_eth_shared_driver,
3311	&mv643xx_eth_driver,
3312};
3313
3314static int __init mv643xx_eth_init_module(void)
3315{
3316	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
3317}
3318module_init(mv643xx_eth_init_module);
3319
3320static void __exit mv643xx_eth_cleanup_module(void)
3321{
3322	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
3323}
3324module_exit(mv643xx_eth_cleanup_module);
3325
3326MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3327	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
3328MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
3329MODULE_LICENSE("GPL");
3330MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
3331MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
   4 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
   5 *
   6 * Based on the 64360 driver from:
   7 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
   8 *		      Rabeeh Khoury <rabeeh@marvell.com>
   9 *
  10 * Copyright (C) 2003 PMC-Sierra, Inc.,
  11 *	written by Manish Lachwani
  12 *
  13 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  14 *
  15 * Copyright (C) 2004-2006 MontaVista Software, Inc.
  16 *			   Dale Farnsworth <dale@farnsworth.org>
  17 *
  18 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  19 *				     <sjhill@realitydiluted.com>
  20 *
  21 * Copyright (C) 2007-2008 Marvell Semiconductor
  22 *			   Lennert Buytenhek <buytenh@marvell.com>
  23 *
  24 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
  25 */
  26
  27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  28
  29#include <linux/init.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/in.h>
  32#include <linux/ip.h>
  33#include <net/tso.h>
  34#include <linux/tcp.h>
  35#include <linux/udp.h>
  36#include <linux/etherdevice.h>
  37#include <linux/delay.h>
  38#include <linux/ethtool.h>
  39#include <linux/platform_device.h>
  40#include <linux/module.h>
  41#include <linux/kernel.h>
  42#include <linux/spinlock.h>
  43#include <linux/workqueue.h>
  44#include <linux/phy.h>
  45#include <linux/mv643xx_eth.h>
  46#include <linux/io.h>
  47#include <linux/interrupt.h>
  48#include <linux/types.h>
  49#include <linux/slab.h>
  50#include <linux/clk.h>
  51#include <linux/of.h>
  52#include <linux/of_irq.h>
  53#include <linux/of_net.h>
  54#include <linux/of_mdio.h>
  55
  56static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  57static char mv643xx_eth_driver_version[] = "1.4";
  58
  59
  60/*
  61 * Registers shared between all ports.
  62 */
  63#define PHY_ADDR			0x0000
  64#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
  65#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
  66#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
  67#define WINDOW_BAR_ENABLE		0x0290
  68#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
  69
  70/*
  71 * Main per-port registers.  These live at offset 0x0400 for
  72 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  73 */
  74#define PORT_CONFIG			0x0000
  75#define  UNICAST_PROMISCUOUS_MODE	0x00000001
  76#define PORT_CONFIG_EXT			0x0004
  77#define MAC_ADDR_LOW			0x0014
  78#define MAC_ADDR_HIGH			0x0018
  79#define SDMA_CONFIG			0x001c
  80#define  TX_BURST_SIZE_16_64BIT		0x01000000
  81#define  TX_BURST_SIZE_4_64BIT		0x00800000
  82#define  BLM_TX_NO_SWAP			0x00000020
  83#define  BLM_RX_NO_SWAP			0x00000010
  84#define  RX_BURST_SIZE_16_64BIT		0x00000008
  85#define  RX_BURST_SIZE_4_64BIT		0x00000004
  86#define PORT_SERIAL_CONTROL		0x003c
  87#define  SET_MII_SPEED_TO_100		0x01000000
  88#define  SET_GMII_SPEED_TO_1000		0x00800000
  89#define  SET_FULL_DUPLEX_MODE		0x00200000
  90#define  MAX_RX_PACKET_9700BYTE		0x000a0000
  91#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
  92#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
  93#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
  94#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
  95#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
  96#define  FORCE_LINK_PASS		0x00000002
  97#define  SERIAL_PORT_ENABLE		0x00000001
  98#define PORT_STATUS			0x0044
  99#define  TX_FIFO_EMPTY			0x00000400
 100#define  TX_IN_PROGRESS			0x00000080
 101#define  PORT_SPEED_MASK		0x00000030
 102#define  PORT_SPEED_1000		0x00000010
 103#define  PORT_SPEED_100			0x00000020
 104#define  PORT_SPEED_10			0x00000000
 105#define  FLOW_CONTROL_ENABLED		0x00000008
 106#define  FULL_DUPLEX			0x00000004
 107#define  LINK_UP			0x00000002
 108#define TXQ_COMMAND			0x0048
 109#define TXQ_FIX_PRIO_CONF		0x004c
 110#define PORT_SERIAL_CONTROL1		0x004c
 
 111#define  CLK125_BYPASS_EN		0x00000010
 112#define TX_BW_RATE			0x0050
 113#define TX_BW_MTU			0x0058
 114#define TX_BW_BURST			0x005c
 115#define INT_CAUSE			0x0060
 116#define  INT_TX_END			0x07f80000
 117#define  INT_TX_END_0			0x00080000
 118#define  INT_RX				0x000003fc
 119#define  INT_RX_0			0x00000004
 120#define  INT_EXT			0x00000002
 121#define INT_CAUSE_EXT			0x0064
 122#define  INT_EXT_LINK_PHY		0x00110000
 123#define  INT_EXT_TX			0x000000ff
 124#define INT_MASK			0x0068
 125#define INT_MASK_EXT			0x006c
 126#define TX_FIFO_URGENT_THRESHOLD	0x0074
 127#define RX_DISCARD_FRAME_CNT		0x0084
 128#define RX_OVERRUN_FRAME_CNT		0x0088
 129#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
 130#define TX_BW_RATE_MOVED		0x00e0
 131#define TX_BW_MTU_MOVED			0x00e8
 132#define TX_BW_BURST_MOVED		0x00ec
 133#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
 134#define RXQ_COMMAND			0x0280
 135#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
 136#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
 137#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
 138#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))
 139
 140/*
 141 * Misc per-port registers.
 142 */
 143#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
 144#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
 145#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
 146#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
 147
 148
 149/*
 150 * SDMA configuration register default value.
 151 */
 152#if defined(__BIG_ENDIAN)
 153#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 154		(RX_BURST_SIZE_4_64BIT	|	\
 155		 TX_BURST_SIZE_4_64BIT)
 156#elif defined(__LITTLE_ENDIAN)
 157#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
 158		(RX_BURST_SIZE_4_64BIT	|	\
 159		 BLM_RX_NO_SWAP		|	\
 160		 BLM_TX_NO_SWAP		|	\
 161		 TX_BURST_SIZE_4_64BIT)
 162#else
 163#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 164#endif
 165
 166
 167/*
 168 * Misc definitions.
 169 */
 170#define DEFAULT_RX_QUEUE_SIZE	128
 171#define DEFAULT_TX_QUEUE_SIZE	512
 172#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 173
 174/* Max number of allowed TCP segments for software TSO */
 175#define MV643XX_MAX_TSO_SEGS 100
 176#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 177
 178#define IS_TSO_HEADER(txq, addr) \
 179	((addr >= txq->tso_hdrs_dma) && \
 180	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
 181
 182#define DESC_DMA_MAP_SINGLE 0
 183#define DESC_DMA_MAP_PAGE 1
 184
 185/*
 186 * RX/TX descriptors.
 187 */
 188#if defined(__BIG_ENDIAN)
 189struct rx_desc {
 190	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 191	u16 buf_size;		/* Buffer size				*/
 192	u32 cmd_sts;		/* Descriptor command status		*/
 193	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 194	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 195};
 196
 197struct tx_desc {
 198	u16 byte_cnt;		/* buffer byte count			*/
 199	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 200	u32 cmd_sts;		/* Command/status field			*/
 201	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 202	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 203};
 204#elif defined(__LITTLE_ENDIAN)
 205struct rx_desc {
 206	u32 cmd_sts;		/* Descriptor command status		*/
 207	u16 buf_size;		/* Buffer size				*/
 208	u16 byte_cnt;		/* Descriptor buffer byte count		*/
 209	u32 buf_ptr;		/* Descriptor buffer pointer		*/
 210	u32 next_desc_ptr;	/* Next descriptor pointer		*/
 211};
 212
 213struct tx_desc {
 214	u32 cmd_sts;		/* Command/status field			*/
 215	u16 l4i_chk;		/* CPU provided TCP checksum		*/
 216	u16 byte_cnt;		/* buffer byte count			*/
 217	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
 218	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
 219};
 220#else
 221#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
 222#endif
 223
 224/* RX & TX descriptor command */
 225#define BUFFER_OWNED_BY_DMA		0x80000000
 226
 227/* RX & TX descriptor status */
 228#define ERROR_SUMMARY			0x00000001
 229
 230/* RX descriptor status */
 231#define LAYER_4_CHECKSUM_OK		0x40000000
 232#define RX_ENABLE_INTERRUPT		0x20000000
 233#define RX_FIRST_DESC			0x08000000
 234#define RX_LAST_DESC			0x04000000
 235#define RX_IP_HDR_OK			0x02000000
 236#define RX_PKT_IS_IPV4			0x01000000
 237#define RX_PKT_IS_ETHERNETV2		0x00800000
 238#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
 239#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
 240#define RX_PKT_IS_VLAN_TAGGED		0x00080000
 241
 242/* TX descriptor command */
 243#define TX_ENABLE_INTERRUPT		0x00800000
 244#define GEN_CRC				0x00400000
 245#define TX_FIRST_DESC			0x00200000
 246#define TX_LAST_DESC			0x00100000
 247#define ZERO_PADDING			0x00080000
 248#define GEN_IP_V4_CHECKSUM		0x00040000
 249#define GEN_TCP_UDP_CHECKSUM		0x00020000
 250#define UDP_FRAME			0x00010000
 251#define MAC_HDR_EXTRA_4_BYTES		0x00008000
 252#define GEN_TCP_UDP_CHK_FULL		0x00000400
 253#define MAC_HDR_EXTRA_8_BYTES		0x00000200
 254
 255#define TX_IHL_SHIFT			11
 256
 257
 258/* global *******************************************************************/
 259struct mv643xx_eth_shared_private {
 260	/*
 261	 * Ethernet controller base address.
 262	 */
 263	void __iomem *base;
 264
 265	/*
 266	 * Per-port MBUS window access register value.
 267	 */
 268	u32 win_protect;
 269
 270	/*
 271	 * Hardware-specific parameters.
 272	 */
 273	int extended_rx_coal_limit;
 274	int tx_bw_control;
 275	int tx_csum_limit;
 276	struct clk *clk;
 277};
 278
 279#define TX_BW_CONTROL_ABSENT		0
 280#define TX_BW_CONTROL_OLD_LAYOUT	1
 281#define TX_BW_CONTROL_NEW_LAYOUT	2
 282
 283static int mv643xx_eth_open(struct net_device *dev);
 284static int mv643xx_eth_stop(struct net_device *dev);
 285
 286
 287/* per-port *****************************************************************/
 288struct mib_counters {
 289	u64 good_octets_received;
 290	u32 bad_octets_received;
 291	u32 internal_mac_transmit_err;
 292	u32 good_frames_received;
 293	u32 bad_frames_received;
 294	u32 broadcast_frames_received;
 295	u32 multicast_frames_received;
 296	u32 frames_64_octets;
 297	u32 frames_65_to_127_octets;
 298	u32 frames_128_to_255_octets;
 299	u32 frames_256_to_511_octets;
 300	u32 frames_512_to_1023_octets;
 301	u32 frames_1024_to_max_octets;
 302	u64 good_octets_sent;
 303	u32 good_frames_sent;
 304	u32 excessive_collision;
 305	u32 multicast_frames_sent;
 306	u32 broadcast_frames_sent;
 307	u32 unrec_mac_control_received;
 308	u32 fc_sent;
 309	u32 good_fc_received;
 310	u32 bad_fc_received;
 311	u32 undersize_received;
 312	u32 fragments_received;
 313	u32 oversize_received;
 314	u32 jabber_received;
 315	u32 mac_receive_error;
 316	u32 bad_crc_event;
 317	u32 collision;
 318	u32 late_collision;
 319	/* Non MIB hardware counters */
 320	u32 rx_discard;
 321	u32 rx_overrun;
 322};
 323
 324struct rx_queue {
 325	int index;
 326
 327	int rx_ring_size;
 328
 329	int rx_desc_count;
 330	int rx_curr_desc;
 331	int rx_used_desc;
 332
 333	struct rx_desc *rx_desc_area;
 334	dma_addr_t rx_desc_dma;
 335	int rx_desc_area_size;
 336	struct sk_buff **rx_skb;
 337};
 338
 339struct tx_queue {
 340	int index;
 341
 342	int tx_ring_size;
 343
 344	int tx_desc_count;
 345	int tx_curr_desc;
 346	int tx_used_desc;
 347
 348	int tx_stop_threshold;
 349	int tx_wake_threshold;
 350
 351	char *tso_hdrs;
 352	dma_addr_t tso_hdrs_dma;
 353
 354	struct tx_desc *tx_desc_area;
 355	char *tx_desc_mapping; /* array to track the type of the dma mapping */
 356	dma_addr_t tx_desc_dma;
 357	int tx_desc_area_size;
 358
 359	struct sk_buff_head tx_skb;
 360
 361	unsigned long tx_packets;
 362	unsigned long tx_bytes;
 363	unsigned long tx_dropped;
 364};
 365
 366struct mv643xx_eth_private {
 367	struct mv643xx_eth_shared_private *shared;
 368	void __iomem *base;
 369	int port_num;
 370
 371	struct net_device *dev;
 372
 373	struct timer_list mib_counters_timer;
 374	spinlock_t mib_counters_lock;
 375	struct mib_counters mib_counters;
 376
 377	struct work_struct tx_timeout_task;
 378
 379	struct napi_struct napi;
 380	u32 int_mask;
 381	u8 oom;
 382	u8 work_link;
 383	u8 work_tx;
 384	u8 work_tx_end;
 385	u8 work_rx;
 386	u8 work_rx_refill;
 387
 388	int skb_size;
 389
 390	/*
 391	 * RX state.
 392	 */
 393	int rx_ring_size;
 394	unsigned long rx_desc_sram_addr;
 395	int rx_desc_sram_size;
 396	int rxq_count;
 397	struct timer_list rx_oom;
 398	struct rx_queue rxq[8];
 399
 400	/*
 401	 * TX state.
 402	 */
 403	int tx_ring_size;
 404	unsigned long tx_desc_sram_addr;
 405	int tx_desc_sram_size;
 406	int txq_count;
 407	struct tx_queue txq[8];
 408
 409	/*
 410	 * Hardware-specific parameters.
 411	 */
 412	struct clk *clk;
 413	unsigned int t_clk;
 414};
 415
 416
 417/* port register accessors **************************************************/
 418static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
 419{
 420	return readl(mp->shared->base + offset);
 421}
 422
 423static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
 424{
 425	return readl(mp->base + offset);
 426}
 427
 428static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
 429{
 430	writel(data, mp->shared->base + offset);
 431}
 432
 433static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
 434{
 435	writel(data, mp->base + offset);
 436}
 437
 438
 439/* rxq/txq helper functions *************************************************/
 440static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
 441{
 442	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
 443}
 444
 445static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
 446{
 447	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
 448}
 449
 450static void rxq_enable(struct rx_queue *rxq)
 451{
 452	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 453	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
 454}
 455
 456static void rxq_disable(struct rx_queue *rxq)
 457{
 458	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 459	u8 mask = 1 << rxq->index;
 460
 461	wrlp(mp, RXQ_COMMAND, mask << 8);
 462	while (rdlp(mp, RXQ_COMMAND) & mask)
 463		udelay(10);
 464}
 465
 466static void txq_reset_hw_ptr(struct tx_queue *txq)
 467{
 468	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 469	u32 addr;
 470
 471	addr = (u32)txq->tx_desc_dma;
 472	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
 473	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
 474}
 475
 476static void txq_enable(struct tx_queue *txq)
 477{
 478	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 479	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
 480}
 481
 482static void txq_disable(struct tx_queue *txq)
 483{
 484	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 485	u8 mask = 1 << txq->index;
 486
 487	wrlp(mp, TXQ_COMMAND, mask << 8);
 488	while (rdlp(mp, TXQ_COMMAND) & mask)
 489		udelay(10);
 490}
 491
 492static void txq_maybe_wake(struct tx_queue *txq)
 493{
 494	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 495	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 496
 497	if (netif_tx_queue_stopped(nq)) {
 498		__netif_tx_lock(nq, smp_processor_id());
 499		if (txq->tx_desc_count <= txq->tx_wake_threshold)
 500			netif_tx_wake_queue(nq);
 501		__netif_tx_unlock(nq);
 502	}
 503}
 504
 505static int rxq_process(struct rx_queue *rxq, int budget)
 506{
 507	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 508	struct net_device_stats *stats = &mp->dev->stats;
 509	int rx;
 510
 511	rx = 0;
 512	while (rx < budget && rxq->rx_desc_count) {
 513		struct rx_desc *rx_desc;
 514		unsigned int cmd_sts;
 515		struct sk_buff *skb;
 516		u16 byte_cnt;
 517
 518		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
 519
 520		cmd_sts = rx_desc->cmd_sts;
 521		if (cmd_sts & BUFFER_OWNED_BY_DMA)
 522			break;
 523		rmb();
 524
 525		skb = rxq->rx_skb[rxq->rx_curr_desc];
 526		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
 527
 528		rxq->rx_curr_desc++;
 529		if (rxq->rx_curr_desc == rxq->rx_ring_size)
 530			rxq->rx_curr_desc = 0;
 531
 532		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
 533				 rx_desc->buf_size, DMA_FROM_DEVICE);
 534		rxq->rx_desc_count--;
 535		rx++;
 536
 537		mp->work_rx_refill |= 1 << rxq->index;
 538
 539		byte_cnt = rx_desc->byte_cnt;
 540
 541		/*
 542		 * Update statistics.
 543		 *
 544		 * Note that the descriptor byte count includes 2 dummy
 545		 * bytes automatically inserted by the hardware at the
 546		 * start of the packet (which we don't count), and a 4
 547		 * byte CRC at the end of the packet (which we do count).
 548		 */
 549		stats->rx_packets++;
 550		stats->rx_bytes += byte_cnt - 2;
 551
 552		/*
 553		 * In case we received a packet without first / last bits
 554		 * on, or the error summary bit is set, the packet needs
 555		 * to be dropped.
 556		 */
 557		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
 558			!= (RX_FIRST_DESC | RX_LAST_DESC))
 559			goto err;
 560
 561		/*
 562		 * The -4 is for the CRC in the trailer of the
 563		 * received packet
 564		 */
 565		skb_put(skb, byte_cnt - 2 - 4);
 566
 567		if (cmd_sts & LAYER_4_CHECKSUM_OK)
 568			skb->ip_summed = CHECKSUM_UNNECESSARY;
 569		skb->protocol = eth_type_trans(skb, mp->dev);
 570
 571		napi_gro_receive(&mp->napi, skb);
 572
 573		continue;
 574
 575err:
 576		stats->rx_dropped++;
 577
 578		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
 579			(RX_FIRST_DESC | RX_LAST_DESC)) {
 580			if (net_ratelimit())
 581				netdev_err(mp->dev,
 582					   "received packet spanning multiple descriptors\n");
 583		}
 584
 585		if (cmd_sts & ERROR_SUMMARY)
 586			stats->rx_errors++;
 587
 588		dev_kfree_skb(skb);
 589	}
 590
 591	if (rx < budget)
 592		mp->work_rx &= ~(1 << rxq->index);
 593
 594	return rx;
 595}
 596
 597static int rxq_refill(struct rx_queue *rxq, int budget)
 598{
 599	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
 600	int refilled;
 601
 602	refilled = 0;
 603	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
 604		struct sk_buff *skb;
 605		int rx;
 606		struct rx_desc *rx_desc;
 607		int size;
 608
 609		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
 610
 611		if (skb == NULL) {
 612			mp->oom = 1;
 613			goto oom;
 614		}
 615
 616		if (SKB_DMA_REALIGN)
 617			skb_reserve(skb, SKB_DMA_REALIGN);
 618
 619		refilled++;
 620		rxq->rx_desc_count++;
 621
 622		rx = rxq->rx_used_desc++;
 623		if (rxq->rx_used_desc == rxq->rx_ring_size)
 624			rxq->rx_used_desc = 0;
 625
 626		rx_desc = rxq->rx_desc_area + rx;
 627
 628		size = skb_end_pointer(skb) - skb->data;
 629		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
 630						  skb->data, size,
 631						  DMA_FROM_DEVICE);
 632		rx_desc->buf_size = size;
 633		rxq->rx_skb[rx] = skb;
 634		wmb();
 635		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
 636		wmb();
 637
 638		/*
 639		 * The hardware automatically prepends 2 bytes of
 640		 * dummy data to each received packet, so that the
 641		 * IP header ends up 16-byte aligned.
 642		 */
 643		skb_reserve(skb, 2);
 644	}
 645
 646	if (refilled < budget)
 647		mp->work_rx_refill &= ~(1 << rxq->index);
 648
 649oom:
 650	return refilled;
 651}
 652
 653
 654/* tx ***********************************************************************/
 655static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
 656{
 657	int frag;
 658
 659	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 660		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 661
 662		if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
 663			return 1;
 664	}
 665
 666	return 0;
 667}
 668
 669static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
 670		       u16 *l4i_chk, u32 *command, int length)
 671{
 672	int ret;
 673	u32 cmd = 0;
 674
 675	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 676		int hdr_len;
 677		int tag_bytes;
 678
 679		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
 680		       skb->protocol != htons(ETH_P_8021Q));
 681
 682		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
 683		tag_bytes = hdr_len - ETH_HLEN;
 684
 685		if (length - hdr_len > mp->shared->tx_csum_limit ||
 686		    unlikely(tag_bytes & ~12)) {
 687			ret = skb_checksum_help(skb);
 688			if (!ret)
 689				goto no_csum;
 690			return ret;
 691		}
 692
 693		if (tag_bytes & 4)
 694			cmd |= MAC_HDR_EXTRA_4_BYTES;
 695		if (tag_bytes & 8)
 696			cmd |= MAC_HDR_EXTRA_8_BYTES;
 697
 698		cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
 699			   GEN_IP_V4_CHECKSUM   |
 700			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
 701
 702		/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
 703		 * it seems we don't need to pass the initial checksum. */
 
 704		switch (ip_hdr(skb)->protocol) {
 705		case IPPROTO_UDP:
 706			cmd |= UDP_FRAME;
 707			*l4i_chk = 0;
 708			break;
 709		case IPPROTO_TCP:
 710			*l4i_chk = 0;
 711			break;
 712		default:
 713			WARN(1, "protocol not supported");
 714		}
 715	} else {
 716no_csum:
 717		/* Errata BTS #50, IHL must be 5 if no HW checksum */
 718		cmd |= 5 << TX_IHL_SHIFT;
 719	}
 720	*command = cmd;
 721	return 0;
 722}
 723
 724static inline int
 725txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 726		 struct sk_buff *skb, char *data, int length,
 727		 bool last_tcp, bool is_last)
 728{
 729	int tx_index;
 730	u32 cmd_sts;
 731	struct tx_desc *desc;
 732
 733	tx_index = txq->tx_curr_desc++;
 734	if (txq->tx_curr_desc == txq->tx_ring_size)
 735		txq->tx_curr_desc = 0;
 736	desc = &txq->tx_desc_area[tx_index];
 737	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 738
 739	desc->l4i_chk = 0;
 740	desc->byte_cnt = length;
 741
 742	if (length <= 8 && (uintptr_t)data & 0x7) {
 743		/* Copy unaligned small data fragment to TSO header data area */
 744		memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
 745		       data, length);
 746		desc->buf_ptr = txq->tso_hdrs_dma
 747			+ tx_index * TSO_HEADER_SIZE;
 748	} else {
 749		/* Alignment is okay, map buffer and hand off to hardware */
 750		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 751		desc->buf_ptr = dma_map_single(dev->dev.parent, data,
 752			length, DMA_TO_DEVICE);
 753		if (unlikely(dma_mapping_error(dev->dev.parent,
 754					       desc->buf_ptr))) {
 755			WARN(1, "dma_map_single failed!\n");
 756			return -ENOMEM;
 757		}
 758	}
 759
 760	cmd_sts = BUFFER_OWNED_BY_DMA;
 761	if (last_tcp) {
 762		/* last descriptor in the TCP packet */
 763		cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
 764		/* last descriptor in SKB */
 765		if (is_last)
 766			cmd_sts |= TX_ENABLE_INTERRUPT;
 767	}
 768	desc->cmd_sts = cmd_sts;
 769	return 0;
 770}
 771
 772static inline void
 773txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
 774		u32 *first_cmd_sts, bool first_desc)
 775{
 776	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 777	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 778	int tx_index;
 779	struct tx_desc *desc;
 780	int ret;
 781	u32 cmd_csum = 0;
 782	u16 l4i_chk = 0;
 783	u32 cmd_sts;
 784
 785	tx_index = txq->tx_curr_desc;
 786	desc = &txq->tx_desc_area[tx_index];
 787
 788	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
 789	if (ret)
 790		WARN(1, "failed to prepare checksum!");
 791
 792	/* Should we set this? Can't use the value from skb_tx_csum()
 793	 * as it's not the correct initial L4 checksum to use. */
 
 794	desc->l4i_chk = 0;
 795
 796	desc->byte_cnt = hdr_len;
 797	desc->buf_ptr = txq->tso_hdrs_dma +
 798			txq->tx_curr_desc * TSO_HEADER_SIZE;
 799	cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
 800				   GEN_CRC;
 801
 802	/* Defer updating the first command descriptor until all
 803	 * following descriptors have been written.
 804	 */
 805	if (first_desc)
 806		*first_cmd_sts = cmd_sts;
 807	else
 808		desc->cmd_sts = cmd_sts;
 809
 810	txq->tx_curr_desc++;
 811	if (txq->tx_curr_desc == txq->tx_ring_size)
 812		txq->tx_curr_desc = 0;
 813}
 814
 815static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
 816			  struct net_device *dev)
 817{
 818	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 819	int hdr_len, total_len, data_left, ret;
 820	int desc_count = 0;
 821	struct tso_t tso;
 822	struct tx_desc *first_tx_desc;
 823	u32 first_cmd_sts = 0;
 824
 825	/* Count needed descriptors */
 826	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
 827		netdev_dbg(dev, "not enough descriptors for TSO!\n");
 828		return -EBUSY;
 829	}
 830
 831	first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
 832
 833	/* Initialize the TSO handler, and prepare the first payload */
 834	hdr_len = tso_start(skb, &tso);
 835
 836	total_len = skb->len - hdr_len;
 837	while (total_len > 0) {
 838		bool first_desc = (desc_count == 0);
 839		char *hdr;
 840
 841		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
 842		total_len -= data_left;
 843		desc_count++;
 844
 845		/* prepare packet headers: MAC + IP + TCP */
 846		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
 847		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
 848		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
 849				first_desc);
 850
 851		while (data_left > 0) {
 852			int size;
 853			desc_count++;
 854
 855			size = min_t(int, tso.size, data_left);
 856			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
 857					       size == data_left,
 858					       total_len == 0);
 859			if (ret)
 860				goto err_release;
 861			data_left -= size;
 862			tso_build_data(skb, &tso, size);
 863		}
 864	}
 865
 866	__skb_queue_tail(&txq->tx_skb, skb);
 867	skb_tx_timestamp(skb);
 868
 869	/* ensure all other descriptors are written before first cmd_sts */
 870	wmb();
 871	first_tx_desc->cmd_sts = first_cmd_sts;
 872
 873	/* clear TX_END status */
 874	mp->work_tx_end &= ~(1 << txq->index);
 875
 876	/* ensure all descriptors are written before poking hardware */
 877	wmb();
 878	txq_enable(txq);
 879	txq->tx_desc_count += desc_count;
 880	return 0;
 881err_release:
 882	/* TODO: Release all used data descriptors; header descriptors must not
 883	 * be DMA-unmapped.
 884	 */
 885	return ret;
 886}
 887
 888static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 889{
 890	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 891	int nr_frags = skb_shinfo(skb)->nr_frags;
 892	int frag;
 893
 894	for (frag = 0; frag < nr_frags; frag++) {
 895		skb_frag_t *this_frag;
 896		int tx_index;
 897		struct tx_desc *desc;
 898
 899		this_frag = &skb_shinfo(skb)->frags[frag];
 900		tx_index = txq->tx_curr_desc++;
 901		if (txq->tx_curr_desc == txq->tx_ring_size)
 902			txq->tx_curr_desc = 0;
 903		desc = &txq->tx_desc_area[tx_index];
 904		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
 905
 906		/*
 907		 * The last fragment will generate an interrupt
 908		 * which will free the skb on TX completion.
 909		 */
 910		if (frag == nr_frags - 1) {
 911			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
 912					ZERO_PADDING | TX_LAST_DESC |
 913					TX_ENABLE_INTERRUPT;
 914		} else {
 915			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
 916		}
 917
 918		desc->l4i_chk = 0;
 919		desc->byte_cnt = skb_frag_size(this_frag);
 920		desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
 921						 this_frag, 0, desc->byte_cnt,
 922						 DMA_TO_DEVICE);
 923	}
 924}
 925
 926static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
 927			  struct net_device *dev)
 928{
 929	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 930	int nr_frags = skb_shinfo(skb)->nr_frags;
 931	int tx_index;
 932	struct tx_desc *desc;
 933	u32 cmd_sts;
 934	u16 l4i_chk;
 935	int length, ret;
 936
 937	cmd_sts = 0;
 938	l4i_chk = 0;
 939
 940	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
 941		if (net_ratelimit())
 942			netdev_err(dev, "tx queue full?!\n");
 943		return -EBUSY;
 944	}
 945
 946	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
 947	if (ret)
 948		return ret;
 949	cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
 950
 951	tx_index = txq->tx_curr_desc++;
 952	if (txq->tx_curr_desc == txq->tx_ring_size)
 953		txq->tx_curr_desc = 0;
 954	desc = &txq->tx_desc_area[tx_index];
 955	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 956
 957	if (nr_frags) {
 958		txq_submit_frag_skb(txq, skb);
 959		length = skb_headlen(skb);
 960	} else {
 961		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
 962		length = skb->len;
 963	}
 964
 965	desc->l4i_chk = l4i_chk;
 966	desc->byte_cnt = length;
 967	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
 968				       length, DMA_TO_DEVICE);
 969
 970	__skb_queue_tail(&txq->tx_skb, skb);
 971
 972	skb_tx_timestamp(skb);
 973
 974	/* ensure all other descriptors are written before first cmd_sts */
 975	wmb();
 976	desc->cmd_sts = cmd_sts;
 977
 978	/* clear TX_END status */
 979	mp->work_tx_end &= ~(1 << txq->index);
 980
 981	/* ensure all descriptors are written before poking hardware */
 982	wmb();
 983	txq_enable(txq);
 984
 985	txq->tx_desc_count += nr_frags + 1;
 986
 987	return 0;
 988}
 989
 990static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 991{
 992	struct mv643xx_eth_private *mp = netdev_priv(dev);
 993	int length, queue, ret;
 994	struct tx_queue *txq;
 995	struct netdev_queue *nq;
 996
 997	queue = skb_get_queue_mapping(skb);
 998	txq = mp->txq + queue;
 999	nq = netdev_get_tx_queue(dev, queue);
1000
1001	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
1002		netdev_printk(KERN_DEBUG, dev,
1003			      "failed to linearize skb with tiny unaligned fragment\n");
1004		return NETDEV_TX_BUSY;
1005	}
1006
1007	length = skb->len;
1008
1009	if (skb_is_gso(skb))
1010		ret = txq_submit_tso(txq, skb, dev);
1011	else
1012		ret = txq_submit_skb(txq, skb, dev);
1013	if (!ret) {
1014		txq->tx_bytes += length;
1015		txq->tx_packets++;
1016
1017		if (txq->tx_desc_count >= txq->tx_stop_threshold)
1018			netif_tx_stop_queue(nq);
1019	} else {
1020		txq->tx_dropped++;
1021		dev_kfree_skb_any(skb);
1022	}
1023
1024	return NETDEV_TX_OK;
1025}
1026
1027
1028/* tx napi ******************************************************************/
1029static void txq_kick(struct tx_queue *txq)
1030{
1031	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1032	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1033	u32 hw_desc_ptr;
1034	u32 expected_ptr;
1035
1036	__netif_tx_lock(nq, smp_processor_id());
1037
1038	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1039		goto out;
1040
1041	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1042	expected_ptr = (u32)txq->tx_desc_dma +
1043				txq->tx_curr_desc * sizeof(struct tx_desc);
1044
1045	if (hw_desc_ptr != expected_ptr)
1046		txq_enable(txq);
1047
1048out:
1049	__netif_tx_unlock(nq);
1050
1051	mp->work_tx_end &= ~(1 << txq->index);
1052}
1053
1054static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1055{
1056	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1057	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1058	int reclaimed;
1059
1060	__netif_tx_lock_bh(nq);
1061
1062	reclaimed = 0;
1063	while (reclaimed < budget && txq->tx_desc_count > 0) {
1064		int tx_index;
1065		struct tx_desc *desc;
1066		u32 cmd_sts;
1067		char desc_dma_map;
1068
1069		tx_index = txq->tx_used_desc;
1070		desc = &txq->tx_desc_area[tx_index];
1071		desc_dma_map = txq->tx_desc_mapping[tx_index];
1072
1073		cmd_sts = desc->cmd_sts;
1074
1075		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1076			if (!force)
1077				break;
1078			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1079		}
1080
1081		txq->tx_used_desc = tx_index + 1;
1082		if (txq->tx_used_desc == txq->tx_ring_size)
1083			txq->tx_used_desc = 0;
1084
1085		reclaimed++;
1086		txq->tx_desc_count--;
1087
1088		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1089
1090			if (desc_dma_map == DESC_DMA_MAP_PAGE)
1091				dma_unmap_page(mp->dev->dev.parent,
1092					       desc->buf_ptr,
1093					       desc->byte_cnt,
1094					       DMA_TO_DEVICE);
1095			else
1096				dma_unmap_single(mp->dev->dev.parent,
1097						 desc->buf_ptr,
1098						 desc->byte_cnt,
1099						 DMA_TO_DEVICE);
1100		}
1101
1102		if (cmd_sts & TX_ENABLE_INTERRUPT) {
1103			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1104
1105			if (!WARN_ON(!skb))
1106				dev_consume_skb_any(skb);
1107		}
1108
1109		if (cmd_sts & ERROR_SUMMARY) {
1110			netdev_info(mp->dev, "tx error\n");
1111			mp->dev->stats.tx_errors++;
1112		}
1113
1114	}
1115
1116	__netif_tx_unlock_bh(nq);
1117
1118	if (reclaimed < budget)
1119		mp->work_tx &= ~(1 << txq->index);
1120
1121	return reclaimed;
1122}
1123
1124
1125/* tx rate control **********************************************************/
1126/*
1127 * Set total maximum TX rate (shared by all TX queues for this port)
1128 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1129 */
1130static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1131{
1132	int token_rate;
1133	int mtu;
1134	int bucket_size;
1135
1136	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1137	if (token_rate > 1023)
1138		token_rate = 1023;
1139
1140	mtu = (mp->dev->mtu + 255) >> 8;
1141	if (mtu > 63)
1142		mtu = 63;
1143
1144	bucket_size = (burst + 255) >> 8;
1145	if (bucket_size > 65535)
1146		bucket_size = 65535;
1147
1148	switch (mp->shared->tx_bw_control) {
1149	case TX_BW_CONTROL_OLD_LAYOUT:
1150		wrlp(mp, TX_BW_RATE, token_rate);
1151		wrlp(mp, TX_BW_MTU, mtu);
1152		wrlp(mp, TX_BW_BURST, bucket_size);
1153		break;
1154	case TX_BW_CONTROL_NEW_LAYOUT:
1155		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1156		wrlp(mp, TX_BW_MTU_MOVED, mtu);
1157		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1158		break;
1159	}
1160}
1161
1162static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1163{
1164	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1165	int token_rate;
1166	int bucket_size;
1167
1168	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1169	if (token_rate > 1023)
1170		token_rate = 1023;
1171
1172	bucket_size = (burst + 255) >> 8;
1173	if (bucket_size > 65535)
1174		bucket_size = 65535;
1175
1176	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1177	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1178}
1179
1180static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1181{
1182	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1183	int off;
1184	u32 val;
1185
1186	/*
1187	 * Turn on fixed priority mode.
1188	 */
1189	off = 0;
1190	switch (mp->shared->tx_bw_control) {
1191	case TX_BW_CONTROL_OLD_LAYOUT:
1192		off = TXQ_FIX_PRIO_CONF;
1193		break;
1194	case TX_BW_CONTROL_NEW_LAYOUT:
1195		off = TXQ_FIX_PRIO_CONF_MOVED;
1196		break;
1197	}
1198
1199	if (off) {
1200		val = rdlp(mp, off);
1201		val |= 1 << txq->index;
1202		wrlp(mp, off, val);
1203	}
1204}
1205
1206
1207/* mii management interface *************************************************/
1208static void mv643xx_eth_adjust_link(struct net_device *dev)
1209{
1210	struct mv643xx_eth_private *mp = netdev_priv(dev);
1211	u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1212	u32 autoneg_disable = FORCE_LINK_PASS |
1213	             DISABLE_AUTO_NEG_SPEED_GMII |
1214		     DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1215		     DISABLE_AUTO_NEG_FOR_DUPLEX;
1216
1217	if (dev->phydev->autoneg == AUTONEG_ENABLE) {
1218		/* enable auto negotiation */
1219		pscr &= ~autoneg_disable;
1220		goto out_write;
1221	}
1222
1223	pscr |= autoneg_disable;
1224
1225	if (dev->phydev->speed == SPEED_1000) {
1226		/* force gigabit, half duplex not supported */
1227		pscr |= SET_GMII_SPEED_TO_1000;
1228		pscr |= SET_FULL_DUPLEX_MODE;
1229		goto out_write;
1230	}
1231
1232	pscr &= ~SET_GMII_SPEED_TO_1000;
1233
1234	if (dev->phydev->speed == SPEED_100)
1235		pscr |= SET_MII_SPEED_TO_100;
1236	else
1237		pscr &= ~SET_MII_SPEED_TO_100;
1238
1239	if (dev->phydev->duplex == DUPLEX_FULL)
1240		pscr |= SET_FULL_DUPLEX_MODE;
1241	else
1242		pscr &= ~SET_FULL_DUPLEX_MODE;
1243
1244out_write:
1245	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1246}
1247
1248/* statistics ***************************************************************/
1249static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1250{
1251	struct mv643xx_eth_private *mp = netdev_priv(dev);
1252	struct net_device_stats *stats = &dev->stats;
1253	unsigned long tx_packets = 0;
1254	unsigned long tx_bytes = 0;
1255	unsigned long tx_dropped = 0;
1256	int i;
1257
1258	for (i = 0; i < mp->txq_count; i++) {
1259		struct tx_queue *txq = mp->txq + i;
1260
1261		tx_packets += txq->tx_packets;
1262		tx_bytes += txq->tx_bytes;
1263		tx_dropped += txq->tx_dropped;
1264	}
1265
1266	stats->tx_packets = tx_packets;
1267	stats->tx_bytes = tx_bytes;
1268	stats->tx_dropped = tx_dropped;
1269
1270	return stats;
1271}
1272
1273static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1274{
1275	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1276}
1277
1278static void mib_counters_clear(struct mv643xx_eth_private *mp)
1279{
1280	int i;
1281
1282	for (i = 0; i < 0x80; i += 4)
1283		mib_read(mp, i);
1284
1285	/* Clear non MIB hw counters also */
1286	rdlp(mp, RX_DISCARD_FRAME_CNT);
1287	rdlp(mp, RX_OVERRUN_FRAME_CNT);
1288}
1289
1290static void mib_counters_update(struct mv643xx_eth_private *mp)
1291{
1292	struct mib_counters *p = &mp->mib_counters;
1293
1294	spin_lock_bh(&mp->mib_counters_lock);
1295	p->good_octets_received += mib_read(mp, 0x00);
1296	p->bad_octets_received += mib_read(mp, 0x08);
1297	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1298	p->good_frames_received += mib_read(mp, 0x10);
1299	p->bad_frames_received += mib_read(mp, 0x14);
1300	p->broadcast_frames_received += mib_read(mp, 0x18);
1301	p->multicast_frames_received += mib_read(mp, 0x1c);
1302	p->frames_64_octets += mib_read(mp, 0x20);
1303	p->frames_65_to_127_octets += mib_read(mp, 0x24);
1304	p->frames_128_to_255_octets += mib_read(mp, 0x28);
1305	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1306	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1307	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1308	p->good_octets_sent += mib_read(mp, 0x38);
1309	p->good_frames_sent += mib_read(mp, 0x40);
1310	p->excessive_collision += mib_read(mp, 0x44);
1311	p->multicast_frames_sent += mib_read(mp, 0x48);
1312	p->broadcast_frames_sent += mib_read(mp, 0x4c);
1313	p->unrec_mac_control_received += mib_read(mp, 0x50);
1314	p->fc_sent += mib_read(mp, 0x54);
1315	p->good_fc_received += mib_read(mp, 0x58);
1316	p->bad_fc_received += mib_read(mp, 0x5c);
1317	p->undersize_received += mib_read(mp, 0x60);
1318	p->fragments_received += mib_read(mp, 0x64);
1319	p->oversize_received += mib_read(mp, 0x68);
1320	p->jabber_received += mib_read(mp, 0x6c);
1321	p->mac_receive_error += mib_read(mp, 0x70);
1322	p->bad_crc_event += mib_read(mp, 0x74);
1323	p->collision += mib_read(mp, 0x78);
1324	p->late_collision += mib_read(mp, 0x7c);
1325	/* Non MIB hardware counters */
1326	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1327	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1328	spin_unlock_bh(&mp->mib_counters_lock);
1329}
1330
1331static void mib_counters_timer_wrapper(struct timer_list *t)
1332{
1333	struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
1334	mib_counters_update(mp);
1335	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1336}
1337
1338
1339/* interrupt coalescing *****************************************************/
1340/*
1341 * Hardware coalescing parameters are set in units of 64 t_clk
1342 * cycles.  I.e.:
1343 *
1344 *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1345 *
1346 *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
1347 *
1348 * In the ->set*() methods, we round the computed register value
1349 * to the nearest integer.
1350 */
1351static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1352{
1353	u32 val = rdlp(mp, SDMA_CONFIG);
1354	u64 temp;
1355
1356	if (mp->shared->extended_rx_coal_limit)
1357		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1358	else
1359		temp = (val & 0x003fff00) >> 8;
1360
1361	temp *= 64000000;
1362	temp += mp->t_clk / 2;
1363	do_div(temp, mp->t_clk);
1364
1365	return (unsigned int)temp;
1366}
1367
1368static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1369{
1370	u64 temp;
1371	u32 val;
1372
1373	temp = (u64)usec * mp->t_clk;
1374	temp += 31999999;
1375	do_div(temp, 64000000);
1376
1377	val = rdlp(mp, SDMA_CONFIG);
1378	if (mp->shared->extended_rx_coal_limit) {
1379		if (temp > 0xffff)
1380			temp = 0xffff;
1381		val &= ~0x023fff80;
1382		val |= (temp & 0x8000) << 10;
1383		val |= (temp & 0x7fff) << 7;
1384	} else {
1385		if (temp > 0x3fff)
1386			temp = 0x3fff;
1387		val &= ~0x003fff00;
1388		val |= (temp & 0x3fff) << 8;
1389	}
1390	wrlp(mp, SDMA_CONFIG, val);
1391}
1392
1393static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1394{
1395	u64 temp;
1396
1397	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1398	temp *= 64000000;
1399	temp += mp->t_clk / 2;
1400	do_div(temp, mp->t_clk);
1401
1402	return (unsigned int)temp;
1403}
1404
1405static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1406{
1407	u64 temp;
1408
1409	temp = (u64)usec * mp->t_clk;
1410	temp += 31999999;
1411	do_div(temp, 64000000);
1412
1413	if (temp > 0x3fff)
1414		temp = 0x3fff;
1415
1416	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1417}
1418
1419
1420/* ethtool ******************************************************************/
1421struct mv643xx_eth_stats {
1422	char stat_string[ETH_GSTRING_LEN];
1423	int sizeof_stat;
1424	int netdev_off;
1425	int mp_off;
1426};
1427
1428#define SSTAT(m)						\
1429	{ #m, sizeof_field(struct net_device_stats, m),		\
1430	  offsetof(struct net_device, stats.m), -1 }
1431
1432#define MIBSTAT(m)						\
1433	{ #m, sizeof_field(struct mib_counters, m),		\
1434	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1435
1436static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1437	SSTAT(rx_packets),
1438	SSTAT(tx_packets),
1439	SSTAT(rx_bytes),
1440	SSTAT(tx_bytes),
1441	SSTAT(rx_errors),
1442	SSTAT(tx_errors),
1443	SSTAT(rx_dropped),
1444	SSTAT(tx_dropped),
1445	MIBSTAT(good_octets_received),
1446	MIBSTAT(bad_octets_received),
1447	MIBSTAT(internal_mac_transmit_err),
1448	MIBSTAT(good_frames_received),
1449	MIBSTAT(bad_frames_received),
1450	MIBSTAT(broadcast_frames_received),
1451	MIBSTAT(multicast_frames_received),
1452	MIBSTAT(frames_64_octets),
1453	MIBSTAT(frames_65_to_127_octets),
1454	MIBSTAT(frames_128_to_255_octets),
1455	MIBSTAT(frames_256_to_511_octets),
1456	MIBSTAT(frames_512_to_1023_octets),
1457	MIBSTAT(frames_1024_to_max_octets),
1458	MIBSTAT(good_octets_sent),
1459	MIBSTAT(good_frames_sent),
1460	MIBSTAT(excessive_collision),
1461	MIBSTAT(multicast_frames_sent),
1462	MIBSTAT(broadcast_frames_sent),
1463	MIBSTAT(unrec_mac_control_received),
1464	MIBSTAT(fc_sent),
1465	MIBSTAT(good_fc_received),
1466	MIBSTAT(bad_fc_received),
1467	MIBSTAT(undersize_received),
1468	MIBSTAT(fragments_received),
1469	MIBSTAT(oversize_received),
1470	MIBSTAT(jabber_received),
1471	MIBSTAT(mac_receive_error),
1472	MIBSTAT(bad_crc_event),
1473	MIBSTAT(collision),
1474	MIBSTAT(late_collision),
1475	MIBSTAT(rx_discard),
1476	MIBSTAT(rx_overrun),
1477};
1478
1479static int
1480mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
1481				   struct ethtool_link_ksettings *cmd)
1482{
1483	struct net_device *dev = mp->dev;
1484
1485	phy_ethtool_ksettings_get(dev->phydev, cmd);
1486
1487	/*
1488	 * The MAC does not support 1000baseT_Half.
1489	 */
1490	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1491			   cmd->link_modes.supported);
1492	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1493			   cmd->link_modes.advertising);
1494
1495	return 0;
1496}
1497
1498static int
1499mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
1500				       struct ethtool_link_ksettings *cmd)
1501{
1502	u32 port_status;
1503	u32 supported, advertising;
1504
1505	port_status = rdlp(mp, PORT_STATUS);
1506
1507	supported = SUPPORTED_MII;
1508	advertising = ADVERTISED_MII;
1509	switch (port_status & PORT_SPEED_MASK) {
1510	case PORT_SPEED_10:
1511		cmd->base.speed = SPEED_10;
1512		break;
1513	case PORT_SPEED_100:
1514		cmd->base.speed = SPEED_100;
1515		break;
1516	case PORT_SPEED_1000:
1517		cmd->base.speed = SPEED_1000;
1518		break;
1519	default:
1520		cmd->base.speed = -1;
1521		break;
1522	}
1523	cmd->base.duplex = (port_status & FULL_DUPLEX) ?
1524		DUPLEX_FULL : DUPLEX_HALF;
1525	cmd->base.port = PORT_MII;
1526	cmd->base.phy_address = 0;
1527	cmd->base.autoneg = AUTONEG_DISABLE;
1528
1529	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1530						supported);
1531	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1532						advertising);
1533
1534	return 0;
1535}
1536
1537static void
1538mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1539{
1540	wol->supported = 0;
1541	wol->wolopts = 0;
1542	if (dev->phydev)
1543		phy_ethtool_get_wol(dev->phydev, wol);
1544}
1545
1546static int
1547mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1548{
1549	int err;
1550
1551	if (!dev->phydev)
1552		return -EOPNOTSUPP;
1553
1554	err = phy_ethtool_set_wol(dev->phydev, wol);
1555	/* Given that mv643xx_eth works without the marvell-specific PHY driver,
1556	 * this debugging hint is useful to have.
1557	 */
1558	if (err == -EOPNOTSUPP)
1559		netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1560	return err;
1561}
1562
1563static int
1564mv643xx_eth_get_link_ksettings(struct net_device *dev,
1565			       struct ethtool_link_ksettings *cmd)
1566{
1567	struct mv643xx_eth_private *mp = netdev_priv(dev);
1568
1569	if (dev->phydev)
1570		return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
1571	else
1572		return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
1573}
1574
1575static int
1576mv643xx_eth_set_link_ksettings(struct net_device *dev,
1577			       const struct ethtool_link_ksettings *cmd)
1578{
1579	struct ethtool_link_ksettings c = *cmd;
1580	u32 advertising;
1581	int ret;
1582
1583	if (!dev->phydev)
1584		return -EINVAL;
1585
1586	/*
1587	 * The MAC does not support 1000baseT_Half.
1588	 */
1589	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1590						c.link_modes.advertising);
1591	advertising &= ~ADVERTISED_1000baseT_Half;
1592	ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
1593						advertising);
1594
1595	ret = phy_ethtool_ksettings_set(dev->phydev, &c);
1596	if (!ret)
1597		mv643xx_eth_adjust_link(dev);
1598	return ret;
1599}
1600
1601static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1602				    struct ethtool_drvinfo *drvinfo)
1603{
1604	strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
1605		sizeof(drvinfo->driver));
1606	strlcpy(drvinfo->version, mv643xx_eth_driver_version,
1607		sizeof(drvinfo->version));
1608	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1609	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
1610}
1611
1612static int
1613mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 
 
1614{
1615	struct mv643xx_eth_private *mp = netdev_priv(dev);
1616
1617	ec->rx_coalesce_usecs = get_rx_coal(mp);
1618	ec->tx_coalesce_usecs = get_tx_coal(mp);
1619
1620	return 0;
1621}
1622
1623static int
1624mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 
 
1625{
1626	struct mv643xx_eth_private *mp = netdev_priv(dev);
1627
1628	set_rx_coal(mp, ec->rx_coalesce_usecs);
1629	set_tx_coal(mp, ec->tx_coalesce_usecs);
1630
1631	return 0;
1632}
1633
1634static void
1635mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
 
 
1636{
1637	struct mv643xx_eth_private *mp = netdev_priv(dev);
1638
1639	er->rx_max_pending = 4096;
1640	er->tx_max_pending = 4096;
1641
1642	er->rx_pending = mp->rx_ring_size;
1643	er->tx_pending = mp->tx_ring_size;
1644}
1645
1646static int
1647mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
 
 
1648{
1649	struct mv643xx_eth_private *mp = netdev_priv(dev);
1650
1651	if (er->rx_mini_pending || er->rx_jumbo_pending)
1652		return -EINVAL;
1653
1654	mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1655	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1656				   MV643XX_MAX_SKB_DESCS * 2, 4096);
1657	if (mp->tx_ring_size != er->tx_pending)
1658		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1659			    mp->tx_ring_size, er->tx_pending);
1660
1661	if (netif_running(dev)) {
1662		mv643xx_eth_stop(dev);
1663		if (mv643xx_eth_open(dev)) {
1664			netdev_err(dev,
1665				   "fatal error on re-opening device after ring param change\n");
1666			return -ENOMEM;
1667		}
1668	}
1669
1670	return 0;
1671}
1672
1673
1674static int
1675mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
1676{
1677	struct mv643xx_eth_private *mp = netdev_priv(dev);
1678	bool rx_csum = features & NETIF_F_RXCSUM;
1679
1680	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1681
1682	return 0;
1683}
1684
1685static void mv643xx_eth_get_strings(struct net_device *dev,
1686				    uint32_t stringset, uint8_t *data)
1687{
1688	int i;
1689
1690	if (stringset == ETH_SS_STATS) {
1691		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1692			memcpy(data + i * ETH_GSTRING_LEN,
1693				mv643xx_eth_stats[i].stat_string,
1694				ETH_GSTRING_LEN);
1695		}
1696	}
1697}
1698
1699static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1700					  struct ethtool_stats *stats,
1701					  uint64_t *data)
1702{
1703	struct mv643xx_eth_private *mp = netdev_priv(dev);
1704	int i;
1705
1706	mv643xx_eth_get_stats(dev);
1707	mib_counters_update(mp);
1708
1709	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1710		const struct mv643xx_eth_stats *stat;
1711		void *p;
1712
1713		stat = mv643xx_eth_stats + i;
1714
1715		if (stat->netdev_off >= 0)
1716			p = ((void *)mp->dev) + stat->netdev_off;
1717		else
1718			p = ((void *)mp) + stat->mp_off;
1719
1720		data[i] = (stat->sizeof_stat == 8) ?
1721				*(uint64_t *)p : *(uint32_t *)p;
1722	}
1723}
1724
1725static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1726{
1727	if (sset == ETH_SS_STATS)
1728		return ARRAY_SIZE(mv643xx_eth_stats);
1729
1730	return -EOPNOTSUPP;
1731}
1732
1733static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1734	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
1735	.get_drvinfo		= mv643xx_eth_get_drvinfo,
1736	.nway_reset		= phy_ethtool_nway_reset,
1737	.get_link		= ethtool_op_get_link,
1738	.get_coalesce		= mv643xx_eth_get_coalesce,
1739	.set_coalesce		= mv643xx_eth_set_coalesce,
1740	.get_ringparam		= mv643xx_eth_get_ringparam,
1741	.set_ringparam		= mv643xx_eth_set_ringparam,
1742	.get_strings		= mv643xx_eth_get_strings,
1743	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1744	.get_sset_count		= mv643xx_eth_get_sset_count,
1745	.get_ts_info		= ethtool_op_get_ts_info,
1746	.get_wol                = mv643xx_eth_get_wol,
1747	.set_wol                = mv643xx_eth_set_wol,
1748	.get_link_ksettings	= mv643xx_eth_get_link_ksettings,
1749	.set_link_ksettings	= mv643xx_eth_set_link_ksettings,
1750};
1751
1752
1753/* address handling *********************************************************/
1754static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1755{
1756	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1757	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1758
1759	addr[0] = (mac_h >> 24) & 0xff;
1760	addr[1] = (mac_h >> 16) & 0xff;
1761	addr[2] = (mac_h >> 8) & 0xff;
1762	addr[3] = mac_h & 0xff;
1763	addr[4] = (mac_l >> 8) & 0xff;
1764	addr[5] = mac_l & 0xff;
1765}
1766
1767static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1768{
1769	wrlp(mp, MAC_ADDR_HIGH,
1770		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1771	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1772}
1773
1774static u32 uc_addr_filter_mask(struct net_device *dev)
1775{
1776	struct netdev_hw_addr *ha;
1777	u32 nibbles;
1778
1779	if (dev->flags & IFF_PROMISC)
1780		return 0;
1781
1782	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1783	netdev_for_each_uc_addr(ha, dev) {
1784		if (memcmp(dev->dev_addr, ha->addr, 5))
1785			return 0;
1786		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1787			return 0;
1788
1789		nibbles |= 1 << (ha->addr[5] & 0x0f);
1790	}
1791
1792	return nibbles;
1793}
1794
1795static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1796{
1797	struct mv643xx_eth_private *mp = netdev_priv(dev);
1798	u32 port_config;
1799	u32 nibbles;
1800	int i;
1801
1802	uc_addr_set(mp, dev->dev_addr);
1803
1804	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1805
1806	nibbles = uc_addr_filter_mask(dev);
1807	if (!nibbles) {
1808		port_config |= UNICAST_PROMISCUOUS_MODE;
1809		nibbles = 0xffff;
1810	}
1811
1812	for (i = 0; i < 16; i += 4) {
1813		int off = UNICAST_TABLE(mp->port_num) + i;
1814		u32 v;
1815
1816		v = 0;
1817		if (nibbles & 1)
1818			v |= 0x00000001;
1819		if (nibbles & 2)
1820			v |= 0x00000100;
1821		if (nibbles & 4)
1822			v |= 0x00010000;
1823		if (nibbles & 8)
1824			v |= 0x01000000;
1825		nibbles >>= 4;
1826
1827		wrl(mp, off, v);
1828	}
1829
1830	wrlp(mp, PORT_CONFIG, port_config);
1831}
1832
1833static int addr_crc(unsigned char *addr)
1834{
1835	int crc = 0;
1836	int i;
1837
1838	for (i = 0; i < 6; i++) {
1839		int j;
1840
1841		crc = (crc ^ addr[i]) << 8;
1842		for (j = 7; j >= 0; j--) {
1843			if (crc & (0x100 << j))
1844				crc ^= 0x107 << j;
1845		}
1846	}
1847
1848	return crc;
1849}
1850
1851static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1852{
1853	struct mv643xx_eth_private *mp = netdev_priv(dev);
1854	u32 *mc_spec;
1855	u32 *mc_other;
1856	struct netdev_hw_addr *ha;
1857	int i;
1858
1859	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
1860		goto promiscuous;
1861
1862	/* Allocate both mc_spec and mc_other tables */
1863	mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
1864	if (!mc_spec)
1865		goto promiscuous;
1866	mc_other = &mc_spec[64];
1867
1868	netdev_for_each_mc_addr(ha, dev) {
1869		u8 *a = ha->addr;
1870		u32 *table;
1871		u8 entry;
1872
1873		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1874			table = mc_spec;
1875			entry = a[5];
1876		} else {
1877			table = mc_other;
1878			entry = addr_crc(a);
1879		}
1880
1881		table[entry >> 2] |= 1 << (8 * (entry & 3));
1882	}
1883
1884	for (i = 0; i < 64; i++) {
1885		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1886		    mc_spec[i]);
1887		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1888		    mc_other[i]);
1889	}
1890
1891	kfree(mc_spec);
1892	return;
1893
1894promiscuous:
1895	for (i = 0; i < 64; i++) {
1896		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1897		    0x01010101u);
1898		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1899		    0x01010101u);
1900	}
1901}
1902
1903static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1904{
1905	mv643xx_eth_program_unicast_filter(dev);
1906	mv643xx_eth_program_multicast_filter(dev);
1907}
1908
1909static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1910{
1911	struct sockaddr *sa = addr;
1912
1913	if (!is_valid_ether_addr(sa->sa_data))
1914		return -EADDRNOTAVAIL;
1915
1916	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1917
1918	netif_addr_lock_bh(dev);
1919	mv643xx_eth_program_unicast_filter(dev);
1920	netif_addr_unlock_bh(dev);
1921
1922	return 0;
1923}
1924
1925
1926/* rx/tx queue initialisation ***********************************************/
1927static int rxq_init(struct mv643xx_eth_private *mp, int index)
1928{
1929	struct rx_queue *rxq = mp->rxq + index;
1930	struct rx_desc *rx_desc;
1931	int size;
1932	int i;
1933
1934	rxq->index = index;
1935
1936	rxq->rx_ring_size = mp->rx_ring_size;
1937
1938	rxq->rx_desc_count = 0;
1939	rxq->rx_curr_desc = 0;
1940	rxq->rx_used_desc = 0;
1941
1942	size = rxq->rx_ring_size * sizeof(struct rx_desc);
1943
1944	if (index == 0 && size <= mp->rx_desc_sram_size) {
1945		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1946						mp->rx_desc_sram_size);
1947		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1948	} else {
1949		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1950						       size, &rxq->rx_desc_dma,
1951						       GFP_KERNEL);
1952	}
1953
1954	if (rxq->rx_desc_area == NULL) {
1955		netdev_err(mp->dev,
1956			   "can't allocate rx ring (%d bytes)\n", size);
1957		goto out;
1958	}
1959	memset(rxq->rx_desc_area, 0, size);
1960
1961	rxq->rx_desc_area_size = size;
1962	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1963				    GFP_KERNEL);
1964	if (rxq->rx_skb == NULL)
1965		goto out_free;
1966
1967	rx_desc = rxq->rx_desc_area;
1968	for (i = 0; i < rxq->rx_ring_size; i++) {
1969		int nexti;
1970
1971		nexti = i + 1;
1972		if (nexti == rxq->rx_ring_size)
1973			nexti = 0;
1974
1975		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1976					nexti * sizeof(struct rx_desc);
1977	}
1978
1979	return 0;
1980
1981
1982out_free:
1983	if (index == 0 && size <= mp->rx_desc_sram_size)
1984		iounmap(rxq->rx_desc_area);
1985	else
1986		dma_free_coherent(mp->dev->dev.parent, size,
1987				  rxq->rx_desc_area,
1988				  rxq->rx_desc_dma);
1989
1990out:
1991	return -ENOMEM;
1992}
1993
1994static void rxq_deinit(struct rx_queue *rxq)
1995{
1996	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1997	int i;
1998
1999	rxq_disable(rxq);
2000
2001	for (i = 0; i < rxq->rx_ring_size; i++) {
2002		if (rxq->rx_skb[i]) {
2003			dev_consume_skb_any(rxq->rx_skb[i]);
2004			rxq->rx_desc_count--;
2005		}
2006	}
2007
2008	if (rxq->rx_desc_count) {
2009		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
2010			   rxq->rx_desc_count);
2011	}
2012
2013	if (rxq->index == 0 &&
2014	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
2015		iounmap(rxq->rx_desc_area);
2016	else
2017		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
2018				  rxq->rx_desc_area, rxq->rx_desc_dma);
2019
2020	kfree(rxq->rx_skb);
2021}
2022
2023static int txq_init(struct mv643xx_eth_private *mp, int index)
2024{
2025	struct tx_queue *txq = mp->txq + index;
2026	struct tx_desc *tx_desc;
2027	int size;
2028	int ret;
2029	int i;
2030
2031	txq->index = index;
2032
2033	txq->tx_ring_size = mp->tx_ring_size;
2034
2035	/* A queue must always have room for at least one skb.
2036	 * Therefore, stop the queue when the free entries reaches
2037	 * the maximum number of descriptors per skb.
2038	 */
2039	txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2040	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2041
2042	txq->tx_desc_count = 0;
2043	txq->tx_curr_desc = 0;
2044	txq->tx_used_desc = 0;
2045
2046	size = txq->tx_ring_size * sizeof(struct tx_desc);
2047
2048	if (index == 0 && size <= mp->tx_desc_sram_size) {
2049		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2050						mp->tx_desc_sram_size);
2051		txq->tx_desc_dma = mp->tx_desc_sram_addr;
2052	} else {
2053		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2054						       size, &txq->tx_desc_dma,
2055						       GFP_KERNEL);
2056	}
2057
2058	if (txq->tx_desc_area == NULL) {
2059		netdev_err(mp->dev,
2060			   "can't allocate tx ring (%d bytes)\n", size);
2061		return -ENOMEM;
2062	}
2063	memset(txq->tx_desc_area, 0, size);
2064
2065	txq->tx_desc_area_size = size;
2066
2067	tx_desc = txq->tx_desc_area;
2068	for (i = 0; i < txq->tx_ring_size; i++) {
2069		struct tx_desc *txd = tx_desc + i;
2070		int nexti;
2071
2072		nexti = i + 1;
2073		if (nexti == txq->tx_ring_size)
2074			nexti = 0;
2075
2076		txd->cmd_sts = 0;
2077		txd->next_desc_ptr = txq->tx_desc_dma +
2078					nexti * sizeof(struct tx_desc);
2079	}
2080
2081	txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2082				       GFP_KERNEL);
2083	if (!txq->tx_desc_mapping) {
2084		ret = -ENOMEM;
2085		goto err_free_desc_area;
2086	}
2087
2088	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2089	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2090					   txq->tx_ring_size * TSO_HEADER_SIZE,
2091					   &txq->tso_hdrs_dma, GFP_KERNEL);
2092	if (txq->tso_hdrs == NULL) {
2093		ret = -ENOMEM;
2094		goto err_free_desc_mapping;
2095	}
2096	skb_queue_head_init(&txq->tx_skb);
2097
2098	return 0;
2099
2100err_free_desc_mapping:
2101	kfree(txq->tx_desc_mapping);
2102err_free_desc_area:
2103	if (index == 0 && size <= mp->tx_desc_sram_size)
2104		iounmap(txq->tx_desc_area);
2105	else
2106		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2107				  txq->tx_desc_area, txq->tx_desc_dma);
2108	return ret;
2109}
2110
2111static void txq_deinit(struct tx_queue *txq)
2112{
2113	struct mv643xx_eth_private *mp = txq_to_mp(txq);
2114
2115	txq_disable(txq);
2116	txq_reclaim(txq, txq->tx_ring_size, 1);
2117
2118	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2119
2120	if (txq->index == 0 &&
2121	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2122		iounmap(txq->tx_desc_area);
2123	else
2124		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2125				  txq->tx_desc_area, txq->tx_desc_dma);
2126	kfree(txq->tx_desc_mapping);
2127
2128	if (txq->tso_hdrs)
2129		dma_free_coherent(mp->dev->dev.parent,
2130				  txq->tx_ring_size * TSO_HEADER_SIZE,
2131				  txq->tso_hdrs, txq->tso_hdrs_dma);
2132}
2133
2134
2135/* netdev ops and related ***************************************************/
2136static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2137{
2138	u32 int_cause;
2139	u32 int_cause_ext;
2140
2141	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2142	if (int_cause == 0)
2143		return 0;
2144
2145	int_cause_ext = 0;
2146	if (int_cause & INT_EXT) {
2147		int_cause &= ~INT_EXT;
2148		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2149	}
2150
2151	if (int_cause) {
2152		wrlp(mp, INT_CAUSE, ~int_cause);
2153		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2154				~(rdlp(mp, TXQ_COMMAND) & 0xff);
2155		mp->work_rx |= (int_cause & INT_RX) >> 2;
2156	}
2157
2158	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2159	if (int_cause_ext) {
2160		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2161		if (int_cause_ext & INT_EXT_LINK_PHY)
2162			mp->work_link = 1;
2163		mp->work_tx |= int_cause_ext & INT_EXT_TX;
2164	}
2165
2166	return 1;
2167}
2168
2169static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2170{
2171	struct net_device *dev = (struct net_device *)dev_id;
2172	struct mv643xx_eth_private *mp = netdev_priv(dev);
2173
2174	if (unlikely(!mv643xx_eth_collect_events(mp)))
2175		return IRQ_NONE;
2176
2177	wrlp(mp, INT_MASK, 0);
2178	napi_schedule(&mp->napi);
2179
2180	return IRQ_HANDLED;
2181}
2182
2183static void handle_link_event(struct mv643xx_eth_private *mp)
2184{
2185	struct net_device *dev = mp->dev;
2186	u32 port_status;
2187	int speed;
2188	int duplex;
2189	int fc;
2190
2191	port_status = rdlp(mp, PORT_STATUS);
2192	if (!(port_status & LINK_UP)) {
2193		if (netif_carrier_ok(dev)) {
2194			int i;
2195
2196			netdev_info(dev, "link down\n");
2197
2198			netif_carrier_off(dev);
2199
2200			for (i = 0; i < mp->txq_count; i++) {
2201				struct tx_queue *txq = mp->txq + i;
2202
2203				txq_reclaim(txq, txq->tx_ring_size, 1);
2204				txq_reset_hw_ptr(txq);
2205			}
2206		}
2207		return;
2208	}
2209
2210	switch (port_status & PORT_SPEED_MASK) {
2211	case PORT_SPEED_10:
2212		speed = 10;
2213		break;
2214	case PORT_SPEED_100:
2215		speed = 100;
2216		break;
2217	case PORT_SPEED_1000:
2218		speed = 1000;
2219		break;
2220	default:
2221		speed = -1;
2222		break;
2223	}
2224	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2225	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2226
2227	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2228		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
2229
2230	if (!netif_carrier_ok(dev))
2231		netif_carrier_on(dev);
2232}
2233
2234static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2235{
2236	struct mv643xx_eth_private *mp;
2237	int work_done;
2238
2239	mp = container_of(napi, struct mv643xx_eth_private, napi);
2240
2241	if (unlikely(mp->oom)) {
2242		mp->oom = 0;
2243		del_timer(&mp->rx_oom);
2244	}
2245
2246	work_done = 0;
2247	while (work_done < budget) {
2248		u8 queue_mask;
2249		int queue;
2250		int work_tbd;
2251
2252		if (mp->work_link) {
2253			mp->work_link = 0;
2254			handle_link_event(mp);
2255			work_done++;
2256			continue;
2257		}
2258
2259		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2260		if (likely(!mp->oom))
2261			queue_mask |= mp->work_rx_refill;
2262
2263		if (!queue_mask) {
2264			if (mv643xx_eth_collect_events(mp))
2265				continue;
2266			break;
2267		}
2268
2269		queue = fls(queue_mask) - 1;
2270		queue_mask = 1 << queue;
2271
2272		work_tbd = budget - work_done;
2273		if (work_tbd > 16)
2274			work_tbd = 16;
2275
2276		if (mp->work_tx_end & queue_mask) {
2277			txq_kick(mp->txq + queue);
2278		} else if (mp->work_tx & queue_mask) {
2279			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2280			txq_maybe_wake(mp->txq + queue);
2281		} else if (mp->work_rx & queue_mask) {
2282			work_done += rxq_process(mp->rxq + queue, work_tbd);
2283		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2284			work_done += rxq_refill(mp->rxq + queue, work_tbd);
2285		} else {
2286			BUG();
2287		}
2288	}
2289
2290	if (work_done < budget) {
2291		if (mp->oom)
2292			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2293		napi_complete_done(napi, work_done);
2294		wrlp(mp, INT_MASK, mp->int_mask);
2295	}
2296
2297	return work_done;
2298}
2299
2300static inline void oom_timer_wrapper(struct timer_list *t)
2301{
2302	struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
2303
2304	napi_schedule(&mp->napi);
2305}
2306
2307static void port_start(struct mv643xx_eth_private *mp)
2308{
2309	struct net_device *dev = mp->dev;
2310	u32 pscr;
2311	int i;
2312
2313	/*
2314	 * Perform PHY reset, if there is a PHY.
2315	 */
2316	if (dev->phydev) {
2317		struct ethtool_link_ksettings cmd;
2318
2319		mv643xx_eth_get_link_ksettings(dev, &cmd);
2320		phy_init_hw(dev->phydev);
2321		mv643xx_eth_set_link_ksettings(
2322			dev, (const struct ethtool_link_ksettings *)&cmd);
2323		phy_start(dev->phydev);
2324	}
2325
2326	/*
2327	 * Configure basic link parameters.
2328	 */
2329	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2330
2331	pscr |= SERIAL_PORT_ENABLE;
2332	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2333
2334	pscr |= DO_NOT_FORCE_LINK_FAIL;
2335	if (!dev->phydev)
2336		pscr |= FORCE_LINK_PASS;
2337	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2338
2339	/*
2340	 * Configure TX path and queues.
2341	 */
2342	tx_set_rate(mp, 1000000000, 16777216);
2343	for (i = 0; i < mp->txq_count; i++) {
2344		struct tx_queue *txq = mp->txq + i;
2345
2346		txq_reset_hw_ptr(txq);
2347		txq_set_rate(txq, 1000000000, 16777216);
2348		txq_set_fixed_prio_mode(txq);
2349	}
2350
2351	/*
2352	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2353	 * frames to RX queue #0, and include the pseudo-header when
2354	 * calculating receive checksums.
2355	 */
2356	mv643xx_eth_set_features(mp->dev, mp->dev->features);
2357
2358	/*
2359	 * Treat BPDUs as normal multicasts, and disable partition mode.
2360	 */
2361	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2362
2363	/*
2364	 * Add configured unicast addresses to address filter table.
2365	 */
2366	mv643xx_eth_program_unicast_filter(mp->dev);
2367
2368	/*
2369	 * Enable the receive queues.
2370	 */
2371	for (i = 0; i < mp->rxq_count; i++) {
2372		struct rx_queue *rxq = mp->rxq + i;
2373		u32 addr;
2374
2375		addr = (u32)rxq->rx_desc_dma;
2376		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2377		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2378
2379		rxq_enable(rxq);
2380	}
2381}
2382
2383static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2384{
2385	int skb_size;
2386
2387	/*
2388	 * Reserve 2+14 bytes for an ethernet header (the hardware
2389	 * automatically prepends 2 bytes of dummy data to each
2390	 * received packet), 16 bytes for up to four VLAN tags, and
2391	 * 4 bytes for the trailing FCS -- 36 bytes total.
2392	 */
2393	skb_size = mp->dev->mtu + 36;
2394
2395	/*
2396	 * Make sure that the skb size is a multiple of 8 bytes, as
2397	 * the lower three bits of the receive descriptor's buffer
2398	 * size field are ignored by the hardware.
2399	 */
2400	mp->skb_size = (skb_size + 7) & ~7;
2401
2402	/*
2403	 * If NET_SKB_PAD is smaller than a cache line,
2404	 * netdev_alloc_skb() will cause skb->data to be misaligned
2405	 * to a cache line boundary.  If this is the case, include
2406	 * some extra space to allow re-aligning the data area.
2407	 */
2408	mp->skb_size += SKB_DMA_REALIGN;
2409}
2410
2411static int mv643xx_eth_open(struct net_device *dev)
2412{
2413	struct mv643xx_eth_private *mp = netdev_priv(dev);
2414	int err;
2415	int i;
2416
2417	wrlp(mp, INT_CAUSE, 0);
2418	wrlp(mp, INT_CAUSE_EXT, 0);
2419	rdlp(mp, INT_CAUSE_EXT);
2420
2421	err = request_irq(dev->irq, mv643xx_eth_irq,
2422			  IRQF_SHARED, dev->name, dev);
2423	if (err) {
2424		netdev_err(dev, "can't assign irq\n");
2425		return -EAGAIN;
2426	}
2427
2428	mv643xx_eth_recalc_skb_size(mp);
2429
2430	napi_enable(&mp->napi);
2431
2432	mp->int_mask = INT_EXT;
2433
2434	for (i = 0; i < mp->rxq_count; i++) {
2435		err = rxq_init(mp, i);
2436		if (err) {
2437			while (--i >= 0)
2438				rxq_deinit(mp->rxq + i);
2439			goto out;
2440		}
2441
2442		rxq_refill(mp->rxq + i, INT_MAX);
2443		mp->int_mask |= INT_RX_0 << i;
2444	}
2445
2446	if (mp->oom) {
2447		mp->rx_oom.expires = jiffies + (HZ / 10);
2448		add_timer(&mp->rx_oom);
2449	}
2450
2451	for (i = 0; i < mp->txq_count; i++) {
2452		err = txq_init(mp, i);
2453		if (err) {
2454			while (--i >= 0)
2455				txq_deinit(mp->txq + i);
2456			goto out_free;
2457		}
2458		mp->int_mask |= INT_TX_END_0 << i;
2459	}
2460
2461	add_timer(&mp->mib_counters_timer);
2462	port_start(mp);
2463
2464	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2465	wrlp(mp, INT_MASK, mp->int_mask);
2466
2467	return 0;
2468
2469
2470out_free:
2471	for (i = 0; i < mp->rxq_count; i++)
2472		rxq_deinit(mp->rxq + i);
2473out:
 
2474	free_irq(dev->irq, dev);
2475
2476	return err;
2477}
2478
2479static void port_reset(struct mv643xx_eth_private *mp)
2480{
2481	unsigned int data;
2482	int i;
2483
2484	for (i = 0; i < mp->rxq_count; i++)
2485		rxq_disable(mp->rxq + i);
2486	for (i = 0; i < mp->txq_count; i++)
2487		txq_disable(mp->txq + i);
2488
2489	while (1) {
2490		u32 ps = rdlp(mp, PORT_STATUS);
2491
2492		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2493			break;
2494		udelay(10);
2495	}
2496
2497	/* Reset the Enable bit in the Configuration Register */
2498	data = rdlp(mp, PORT_SERIAL_CONTROL);
2499	data &= ~(SERIAL_PORT_ENABLE		|
2500		  DO_NOT_FORCE_LINK_FAIL	|
2501		  FORCE_LINK_PASS);
2502	wrlp(mp, PORT_SERIAL_CONTROL, data);
2503}
2504
2505static int mv643xx_eth_stop(struct net_device *dev)
2506{
2507	struct mv643xx_eth_private *mp = netdev_priv(dev);
2508	int i;
2509
2510	wrlp(mp, INT_MASK_EXT, 0x00000000);
2511	wrlp(mp, INT_MASK, 0x00000000);
2512	rdlp(mp, INT_MASK);
2513
2514	napi_disable(&mp->napi);
2515
2516	del_timer_sync(&mp->rx_oom);
2517
2518	netif_carrier_off(dev);
2519	if (dev->phydev)
2520		phy_stop(dev->phydev);
2521	free_irq(dev->irq, dev);
2522
2523	port_reset(mp);
2524	mv643xx_eth_get_stats(dev);
2525	mib_counters_update(mp);
2526	del_timer_sync(&mp->mib_counters_timer);
2527
2528	for (i = 0; i < mp->rxq_count; i++)
2529		rxq_deinit(mp->rxq + i);
2530	for (i = 0; i < mp->txq_count; i++)
2531		txq_deinit(mp->txq + i);
2532
2533	return 0;
2534}
2535
2536static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2537{
2538	int ret;
2539
2540	if (!dev->phydev)
2541		return -ENOTSUPP;
2542
2543	ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
2544	if (!ret)
2545		mv643xx_eth_adjust_link(dev);
2546	return ret;
2547}
2548
2549static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2550{
2551	struct mv643xx_eth_private *mp = netdev_priv(dev);
2552
2553	dev->mtu = new_mtu;
2554	mv643xx_eth_recalc_skb_size(mp);
2555	tx_set_rate(mp, 1000000000, 16777216);
2556
2557	if (!netif_running(dev))
2558		return 0;
2559
2560	/*
2561	 * Stop and then re-open the interface. This will allocate RX
2562	 * skbs of the new MTU.
2563	 * There is a possible danger that the open will not succeed,
2564	 * due to memory being full.
2565	 */
2566	mv643xx_eth_stop(dev);
2567	if (mv643xx_eth_open(dev)) {
2568		netdev_err(dev,
2569			   "fatal error on re-opening device after MTU change\n");
2570	}
2571
2572	return 0;
2573}
2574
2575static void tx_timeout_task(struct work_struct *ugly)
2576{
2577	struct mv643xx_eth_private *mp;
2578
2579	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2580	if (netif_running(mp->dev)) {
2581		netif_tx_stop_all_queues(mp->dev);
2582		port_reset(mp);
2583		port_start(mp);
2584		netif_tx_wake_all_queues(mp->dev);
2585	}
2586}
2587
2588static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
2589{
2590	struct mv643xx_eth_private *mp = netdev_priv(dev);
2591
2592	netdev_info(dev, "tx timeout\n");
2593
2594	schedule_work(&mp->tx_timeout_task);
2595}
2596
2597#ifdef CONFIG_NET_POLL_CONTROLLER
2598static void mv643xx_eth_netpoll(struct net_device *dev)
2599{
2600	struct mv643xx_eth_private *mp = netdev_priv(dev);
2601
2602	wrlp(mp, INT_MASK, 0x00000000);
2603	rdlp(mp, INT_MASK);
2604
2605	mv643xx_eth_irq(dev->irq, dev);
2606
2607	wrlp(mp, INT_MASK, mp->int_mask);
2608}
2609#endif
2610
2611
2612/* platform glue ************************************************************/
2613static void
2614mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2615			      const struct mbus_dram_target_info *dram)
2616{
2617	void __iomem *base = msp->base;
2618	u32 win_enable;
2619	u32 win_protect;
2620	int i;
2621
2622	for (i = 0; i < 6; i++) {
2623		writel(0, base + WINDOW_BASE(i));
2624		writel(0, base + WINDOW_SIZE(i));
2625		if (i < 4)
2626			writel(0, base + WINDOW_REMAP_HIGH(i));
2627	}
2628
2629	win_enable = 0x3f;
2630	win_protect = 0;
2631
2632	for (i = 0; i < dram->num_cs; i++) {
2633		const struct mbus_dram_window *cs = dram->cs + i;
2634
2635		writel((cs->base & 0xffff0000) |
2636			(cs->mbus_attr << 8) |
2637			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2638		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2639
2640		win_enable &= ~(1 << i);
2641		win_protect |= 3 << (2 * i);
2642	}
2643
2644	writel(win_enable, base + WINDOW_BAR_ENABLE);
2645	msp->win_protect = win_protect;
2646}
2647
2648static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2649{
2650	/*
2651	 * Check whether we have a 14-bit coal limit field in bits
2652	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2653	 * SDMA config register.
2654	 */
2655	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2656	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2657		msp->extended_rx_coal_limit = 1;
2658	else
2659		msp->extended_rx_coal_limit = 0;
2660
2661	/*
2662	 * Check whether the MAC supports TX rate control, and if
2663	 * yes, whether its associated registers are in the old or
2664	 * the new place.
2665	 */
2666	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2667	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2668		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2669	} else {
2670		writel(7, msp->base + 0x0400 + TX_BW_RATE);
2671		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2672			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2673		else
2674			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2675	}
2676}
2677
2678#if defined(CONFIG_OF)
2679static const struct of_device_id mv643xx_eth_shared_ids[] = {
2680	{ .compatible = "marvell,orion-eth", },
2681	{ .compatible = "marvell,kirkwood-eth", },
2682	{ }
2683};
2684MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2685#endif
2686
2687#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
2688#define mv643xx_eth_property(_np, _name, _v)				\
2689	do {								\
2690		u32 tmp;						\
2691		if (!of_property_read_u32(_np, "marvell," _name, &tmp))	\
2692			_v = tmp;					\
2693	} while (0)
2694
2695static struct platform_device *port_platdev[3];
2696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2697static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2698					  struct device_node *pnp)
2699{
2700	struct platform_device *ppdev;
2701	struct mv643xx_eth_platform_data ppd;
2702	struct resource res;
2703	const char *mac_addr;
2704	int ret;
2705	int dev_num = 0;
2706
2707	memset(&ppd, 0, sizeof(ppd));
2708	ppd.shared = pdev;
2709
2710	memset(&res, 0, sizeof(res));
2711	if (of_irq_to_resource(pnp, 0, &res) <= 0) {
2712		dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
2713		return -EINVAL;
2714	}
2715
2716	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2717		dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
2718		return -EINVAL;
2719	}
2720
2721	if (ppd.port_number >= 3) {
2722		dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
2723		return -EINVAL;
2724	}
2725
2726	while (dev_num < 3 && port_platdev[dev_num])
2727		dev_num++;
2728
2729	if (dev_num == 3) {
2730		dev_err(&pdev->dev, "too many ports registered\n");
2731		return -EINVAL;
2732	}
2733
2734	mac_addr = of_get_mac_address(pnp);
2735	if (!IS_ERR(mac_addr))
2736		ether_addr_copy(ppd.mac_addr, mac_addr);
2737
2738	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2739	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2740	mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2741	mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2742	mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2743	mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2744
 
 
2745	ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2746	if (!ppd.phy_node) {
2747		ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2748		of_property_read_u32(pnp, "speed", &ppd.speed);
2749		of_property_read_u32(pnp, "duplex", &ppd.duplex);
2750	}
2751
2752	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
2753	if (!ppdev)
2754		return -ENOMEM;
 
 
2755	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2756	ppdev->dev.of_node = pnp;
2757
2758	ret = platform_device_add_resources(ppdev, &res, 1);
2759	if (ret)
2760		goto port_err;
2761
2762	ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2763	if (ret)
2764		goto port_err;
2765
2766	ret = platform_device_add(ppdev);
2767	if (ret)
2768		goto port_err;
2769
2770	port_platdev[dev_num] = ppdev;
2771
2772	return 0;
2773
2774port_err:
2775	platform_device_put(ppdev);
 
 
2776	return ret;
2777}
2778
2779static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2780{
2781	struct mv643xx_eth_shared_platform_data *pd;
2782	struct device_node *pnp, *np = pdev->dev.of_node;
2783	int ret;
2784
2785	/* bail out if not registered from DT */
2786	if (!np)
2787		return 0;
2788
2789	pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2790	if (!pd)
2791		return -ENOMEM;
2792	pdev->dev.platform_data = pd;
2793
2794	mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2795
2796	for_each_available_child_of_node(np, pnp) {
2797		ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2798		if (ret) {
2799			of_node_put(pnp);
2800			return ret;
2801		}
2802	}
2803	return 0;
2804}
2805
2806static void mv643xx_eth_shared_of_remove(void)
2807{
2808	int n;
2809
2810	for (n = 0; n < 3; n++) {
2811		platform_device_del(port_platdev[n]);
2812		port_platdev[n] = NULL;
2813	}
2814}
2815#else
2816static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2817{
2818	return 0;
2819}
2820
2821static inline void mv643xx_eth_shared_of_remove(void)
2822{
2823}
2824#endif
2825
2826static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2827{
2828	static int mv643xx_eth_version_printed;
2829	struct mv643xx_eth_shared_platform_data *pd;
2830	struct mv643xx_eth_shared_private *msp;
2831	const struct mbus_dram_target_info *dram;
2832	struct resource *res;
2833	int ret;
2834
2835	if (!mv643xx_eth_version_printed++)
2836		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2837			  mv643xx_eth_driver_version);
2838
2839	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2840	if (res == NULL)
2841		return -EINVAL;
2842
2843	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2844	if (msp == NULL)
2845		return -ENOMEM;
2846	platform_set_drvdata(pdev, msp);
2847
2848	msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2849	if (msp->base == NULL)
2850		return -ENOMEM;
2851
2852	msp->clk = devm_clk_get(&pdev->dev, NULL);
2853	if (!IS_ERR(msp->clk))
2854		clk_prepare_enable(msp->clk);
2855
2856	/*
2857	 * (Re-)program MBUS remapping windows if we are asked to.
2858	 */
2859	dram = mv_mbus_dram_info();
2860	if (dram)
2861		mv643xx_eth_conf_mbus_windows(msp, dram);
2862
2863	ret = mv643xx_eth_shared_of_probe(pdev);
2864	if (ret)
2865		goto err_put_clk;
2866	pd = dev_get_platdata(&pdev->dev);
2867
2868	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2869					pd->tx_csum_limit : 9 * 1024;
2870	infer_hw_params(msp);
2871
2872	return 0;
2873
2874err_put_clk:
2875	if (!IS_ERR(msp->clk))
2876		clk_disable_unprepare(msp->clk);
2877	return ret;
2878}
2879
2880static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2881{
2882	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2883
2884	mv643xx_eth_shared_of_remove();
2885	if (!IS_ERR(msp->clk))
2886		clk_disable_unprepare(msp->clk);
2887	return 0;
2888}
2889
2890static struct platform_driver mv643xx_eth_shared_driver = {
2891	.probe		= mv643xx_eth_shared_probe,
2892	.remove		= mv643xx_eth_shared_remove,
2893	.driver = {
2894		.name	= MV643XX_ETH_SHARED_NAME,
2895		.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
2896	},
2897};
2898
2899static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2900{
2901	int addr_shift = 5 * mp->port_num;
2902	u32 data;
2903
2904	data = rdl(mp, PHY_ADDR);
2905	data &= ~(0x1f << addr_shift);
2906	data |= (phy_addr & 0x1f) << addr_shift;
2907	wrl(mp, PHY_ADDR, data);
2908}
2909
2910static int phy_addr_get(struct mv643xx_eth_private *mp)
2911{
2912	unsigned int data;
2913
2914	data = rdl(mp, PHY_ADDR);
2915
2916	return (data >> (5 * mp->port_num)) & 0x1f;
2917}
2918
2919static void set_params(struct mv643xx_eth_private *mp,
2920		       struct mv643xx_eth_platform_data *pd)
2921{
2922	struct net_device *dev = mp->dev;
2923	unsigned int tx_ring_size;
2924
2925	if (is_valid_ether_addr(pd->mac_addr))
2926		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2927	else
2928		uc_addr_get(mp, dev->dev_addr);
 
 
 
 
2929
2930	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2931	if (pd->rx_queue_size)
2932		mp->rx_ring_size = pd->rx_queue_size;
2933	mp->rx_desc_sram_addr = pd->rx_sram_addr;
2934	mp->rx_desc_sram_size = pd->rx_sram_size;
2935
2936	mp->rxq_count = pd->rx_queue_count ? : 1;
2937
2938	tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2939	if (pd->tx_queue_size)
2940		tx_ring_size = pd->tx_queue_size;
2941
2942	mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2943				   MV643XX_MAX_SKB_DESCS * 2, 4096);
2944	if (mp->tx_ring_size != tx_ring_size)
2945		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2946			    mp->tx_ring_size, tx_ring_size);
2947
2948	mp->tx_desc_sram_addr = pd->tx_sram_addr;
2949	mp->tx_desc_sram_size = pd->tx_sram_size;
2950
2951	mp->txq_count = pd->tx_queue_count ? : 1;
2952}
2953
2954static int get_phy_mode(struct mv643xx_eth_private *mp)
2955{
2956	struct device *dev = mp->dev->dev.parent;
2957	phy_interface_t iface;
2958	int err;
2959
2960	if (dev->of_node)
2961		err = of_get_phy_mode(dev->of_node, &iface);
2962
2963	/* Historical default if unspecified. We could also read/write
2964	 * the interface state in the PSC1
2965	 */
2966	if (!dev->of_node || err)
2967		iface = PHY_INTERFACE_MODE_GMII;
2968	return iface;
2969}
2970
2971static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2972				   int phy_addr)
2973{
2974	struct phy_device *phydev;
2975	int start;
2976	int num;
2977	int i;
2978	char phy_id[MII_BUS_ID_SIZE + 3];
2979
2980	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2981		start = phy_addr_get(mp) & 0x1f;
2982		num = 32;
2983	} else {
2984		start = phy_addr & 0x1f;
2985		num = 1;
2986	}
2987
2988	/* Attempt to connect to the PHY using orion-mdio */
2989	phydev = ERR_PTR(-ENODEV);
2990	for (i = 0; i < num; i++) {
2991		int addr = (start + i) & 0x1f;
2992
2993		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2994				"orion-mdio-mii", addr);
2995
2996		phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2997				     get_phy_mode(mp));
2998		if (!IS_ERR(phydev)) {
2999			phy_addr_set(mp, addr);
3000			break;
3001		}
3002	}
3003
3004	return phydev;
3005}
3006
3007static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
3008{
3009	struct net_device *dev = mp->dev;
3010	struct phy_device *phy = dev->phydev;
3011
3012	if (speed == 0) {
3013		phy->autoneg = AUTONEG_ENABLE;
3014		phy->speed = 0;
3015		phy->duplex = 0;
3016		linkmode_copy(phy->advertising, phy->supported);
3017		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3018				 phy->advertising);
3019	} else {
3020		phy->autoneg = AUTONEG_DISABLE;
3021		linkmode_zero(phy->advertising);
3022		phy->speed = speed;
3023		phy->duplex = duplex;
3024	}
3025	phy_start_aneg(phy);
3026}
3027
3028static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
3029{
3030	struct net_device *dev = mp->dev;
3031	u32 pscr;
3032
3033	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
3034	if (pscr & SERIAL_PORT_ENABLE) {
3035		pscr &= ~SERIAL_PORT_ENABLE;
3036		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3037	}
3038
3039	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
3040	if (!dev->phydev) {
3041		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
3042		if (speed == SPEED_1000)
3043			pscr |= SET_GMII_SPEED_TO_1000;
3044		else if (speed == SPEED_100)
3045			pscr |= SET_MII_SPEED_TO_100;
3046
3047		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
3048
3049		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
3050		if (duplex == DUPLEX_FULL)
3051			pscr |= SET_FULL_DUPLEX_MODE;
3052	}
3053
3054	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3055}
3056
3057static const struct net_device_ops mv643xx_eth_netdev_ops = {
3058	.ndo_open		= mv643xx_eth_open,
3059	.ndo_stop		= mv643xx_eth_stop,
3060	.ndo_start_xmit		= mv643xx_eth_xmit,
3061	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
3062	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
3063	.ndo_validate_addr	= eth_validate_addr,
3064	.ndo_do_ioctl		= mv643xx_eth_ioctl,
3065	.ndo_change_mtu		= mv643xx_eth_change_mtu,
3066	.ndo_set_features	= mv643xx_eth_set_features,
3067	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
3068	.ndo_get_stats		= mv643xx_eth_get_stats,
3069#ifdef CONFIG_NET_POLL_CONTROLLER
3070	.ndo_poll_controller	= mv643xx_eth_netpoll,
3071#endif
3072};
3073
3074static int mv643xx_eth_probe(struct platform_device *pdev)
3075{
3076	struct mv643xx_eth_platform_data *pd;
3077	struct mv643xx_eth_private *mp;
3078	struct net_device *dev;
3079	struct phy_device *phydev = NULL;
3080	struct resource *res;
3081	int err;
3082
3083	pd = dev_get_platdata(&pdev->dev);
3084	if (pd == NULL) {
3085		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
3086		return -ENODEV;
3087	}
3088
3089	if (pd->shared == NULL) {
3090		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
3091		return -ENODEV;
3092	}
3093
3094	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
3095	if (!dev)
3096		return -ENOMEM;
3097
3098	SET_NETDEV_DEV(dev, &pdev->dev);
3099	mp = netdev_priv(dev);
3100	platform_set_drvdata(pdev, mp);
3101
3102	mp->shared = platform_get_drvdata(pd->shared);
3103	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
3104	mp->port_num = pd->port_number;
3105
3106	mp->dev = dev;
3107
3108	/* Kirkwood resets some registers on gated clocks. Especially
3109	 * CLK125_BYPASS_EN must be cleared but is not available on
3110	 * all other SoCs/System Controllers using this driver.
3111	 */
3112	if (of_device_is_compatible(pdev->dev.of_node,
3113				    "marvell,kirkwood-eth-port"))
3114		wrlp(mp, PORT_SERIAL_CONTROL1,
3115		     rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3116
3117	/*
3118	 * Start with a default rate, and if there is a clock, allow
3119	 * it to override the default.
3120	 */
3121	mp->t_clk = 133000000;
3122	mp->clk = devm_clk_get(&pdev->dev, NULL);
3123	if (!IS_ERR(mp->clk)) {
3124		clk_prepare_enable(mp->clk);
3125		mp->t_clk = clk_get_rate(mp->clk);
3126	} else if (!IS_ERR(mp->shared->clk)) {
3127		mp->t_clk = clk_get_rate(mp->shared->clk);
3128	}
3129
3130	set_params(mp, pd);
3131	netif_set_real_num_tx_queues(dev, mp->txq_count);
3132	netif_set_real_num_rx_queues(dev, mp->rxq_count);
3133
3134	err = 0;
3135	if (pd->phy_node) {
3136		phydev = of_phy_connect(mp->dev, pd->phy_node,
3137					mv643xx_eth_adjust_link, 0,
3138					get_phy_mode(mp));
3139		if (!phydev)
3140			err = -ENODEV;
3141		else
3142			phy_addr_set(mp, phydev->mdio.addr);
3143	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
3144		phydev = phy_scan(mp, pd->phy_addr);
3145
3146		if (IS_ERR(phydev))
3147			err = PTR_ERR(phydev);
3148		else
3149			phy_init(mp, pd->speed, pd->duplex);
3150	}
3151	if (err == -ENODEV) {
3152		err = -EPROBE_DEFER;
3153		goto out;
3154	}
3155	if (err)
3156		goto out;
3157
3158	dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
3159
3160	init_pscr(mp, pd->speed, pd->duplex);
3161
3162
3163	mib_counters_clear(mp);
3164
3165	timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
3166	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
3167
3168	spin_lock_init(&mp->mib_counters_lock);
3169
3170	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3171
3172	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
3173
3174	timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
3175
3176
3177	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
3178	BUG_ON(!res);
3179	dev->irq = res->start;
 
 
 
3180
3181	dev->netdev_ops = &mv643xx_eth_netdev_ops;
3182
3183	dev->watchdog_timeo = 2 * HZ;
3184	dev->base_addr = 0;
3185
3186	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3187	dev->vlan_features = dev->features;
3188
3189	dev->features |= NETIF_F_RXCSUM;
3190	dev->hw_features = dev->features;
3191
3192	dev->priv_flags |= IFF_UNICAST_FLT;
3193	dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
3194
3195	/* MTU range: 64 - 9500 */
3196	dev->min_mtu = 64;
3197	dev->max_mtu = 9500;
3198
3199	if (mp->shared->win_protect)
3200		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
3201
3202	netif_carrier_off(dev);
3203
3204	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3205
3206	set_rx_coal(mp, 250);
3207	set_tx_coal(mp, 0);
3208
3209	err = register_netdev(dev);
3210	if (err)
3211		goto out;
3212
3213	netdev_notice(dev, "port %d with MAC address %pM\n",
3214		      mp->port_num, dev->dev_addr);
3215
3216	if (mp->tx_desc_sram_size > 0)
3217		netdev_notice(dev, "configured with sram\n");
3218
3219	return 0;
3220
3221out:
3222	if (!IS_ERR(mp->clk))
3223		clk_disable_unprepare(mp->clk);
3224	free_netdev(dev);
3225
3226	return err;
3227}
3228
3229static int mv643xx_eth_remove(struct platform_device *pdev)
3230{
3231	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3232	struct net_device *dev = mp->dev;
3233
3234	unregister_netdev(mp->dev);
3235	if (dev->phydev)
3236		phy_disconnect(dev->phydev);
3237	cancel_work_sync(&mp->tx_timeout_task);
3238
3239	if (!IS_ERR(mp->clk))
3240		clk_disable_unprepare(mp->clk);
3241
3242	free_netdev(mp->dev);
3243
3244	return 0;
3245}
3246
3247static void mv643xx_eth_shutdown(struct platform_device *pdev)
3248{
3249	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3250
3251	/* Mask all interrupts on ethernet port */
3252	wrlp(mp, INT_MASK, 0);
3253	rdlp(mp, INT_MASK);
3254
3255	if (netif_running(mp->dev))
3256		port_reset(mp);
3257}
3258
3259static struct platform_driver mv643xx_eth_driver = {
3260	.probe		= mv643xx_eth_probe,
3261	.remove		= mv643xx_eth_remove,
3262	.shutdown	= mv643xx_eth_shutdown,
3263	.driver = {
3264		.name	= MV643XX_ETH_NAME,
3265	},
3266};
3267
3268static struct platform_driver * const drivers[] = {
3269	&mv643xx_eth_shared_driver,
3270	&mv643xx_eth_driver,
3271};
3272
3273static int __init mv643xx_eth_init_module(void)
3274{
3275	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
3276}
3277module_init(mv643xx_eth_init_module);
3278
3279static void __exit mv643xx_eth_cleanup_module(void)
3280{
3281	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
3282}
3283module_exit(mv643xx_eth_cleanup_module);
3284
3285MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3286	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
3287MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
3288MODULE_LICENSE("GPL");
3289MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
3290MODULE_ALIAS("platform:" MV643XX_ETH_NAME);