Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2009-2012 Cavium, Inc
   7 */
   8
   9#include <linux/platform_device.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/etherdevice.h>
  12#include <linux/capability.h>
  13#include <linux/net_tstamp.h>
  14#include <linux/interrupt.h>
  15#include <linux/netdevice.h>
  16#include <linux/spinlock.h>
  17#include <linux/if_vlan.h>
  18#include <linux/of_mdio.h>
  19#include <linux/module.h>
  20#include <linux/of_net.h>
  21#include <linux/init.h>
  22#include <linux/slab.h>
  23#include <linux/phy.h>
  24#include <linux/io.h>
  25
  26#include <asm/octeon/octeon.h>
  27#include <asm/octeon/cvmx-mixx-defs.h>
  28#include <asm/octeon/cvmx-agl-defs.h>
  29
  30#define DRV_NAME "octeon_mgmt"
 
  31#define DRV_DESCRIPTION \
  32	"Cavium Networks Octeon MII (management) port Network Driver"
  33
  34#define OCTEON_MGMT_NAPI_WEIGHT 16
  35
  36/* Ring sizes that are powers of two allow for more efficient modulo
  37 * opertions.
  38 */
  39#define OCTEON_MGMT_RX_RING_SIZE 512
  40#define OCTEON_MGMT_TX_RING_SIZE 128
  41
  42/* Allow 8 bytes for vlan and FCS. */
  43#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
  44
  45union mgmt_port_ring_entry {
  46	u64 d64;
  47	struct {
  48#define RING_ENTRY_CODE_DONE 0xf
  49#define RING_ENTRY_CODE_MORE 0x10
  50#ifdef __BIG_ENDIAN_BITFIELD
  51		u64 reserved_62_63:2;
  52		/* Length of the buffer/packet in bytes */
  53		u64 len:14;
  54		/* For TX, signals that the packet should be timestamped */
  55		u64 tstamp:1;
  56		/* The RX error code */
  57		u64 code:7;
  58		/* Physical address of the buffer */
  59		u64 addr:40;
  60#else
  61		u64 addr:40;
  62		u64 code:7;
  63		u64 tstamp:1;
  64		u64 len:14;
  65		u64 reserved_62_63:2;
  66#endif
  67	} s;
  68};
  69
  70#define MIX_ORING1	0x0
  71#define MIX_ORING2	0x8
  72#define MIX_IRING1	0x10
  73#define MIX_IRING2	0x18
  74#define MIX_CTL		0x20
  75#define MIX_IRHWM	0x28
  76#define MIX_IRCNT	0x30
  77#define MIX_ORHWM	0x38
  78#define MIX_ORCNT	0x40
  79#define MIX_ISR		0x48
  80#define MIX_INTENA	0x50
  81#define MIX_REMCNT	0x58
  82#define MIX_BIST	0x78
  83
  84#define AGL_GMX_PRT_CFG			0x10
  85#define AGL_GMX_RX_FRM_CTL		0x18
  86#define AGL_GMX_RX_FRM_MAX		0x30
  87#define AGL_GMX_RX_JABBER		0x38
  88#define AGL_GMX_RX_STATS_CTL		0x50
  89
  90#define AGL_GMX_RX_STATS_PKTS_DRP	0xb0
  91#define AGL_GMX_RX_STATS_OCTS_DRP	0xb8
  92#define AGL_GMX_RX_STATS_PKTS_BAD	0xc0
  93
  94#define AGL_GMX_RX_ADR_CTL		0x100
  95#define AGL_GMX_RX_ADR_CAM_EN		0x108
  96#define AGL_GMX_RX_ADR_CAM0		0x180
  97#define AGL_GMX_RX_ADR_CAM1		0x188
  98#define AGL_GMX_RX_ADR_CAM2		0x190
  99#define AGL_GMX_RX_ADR_CAM3		0x198
 100#define AGL_GMX_RX_ADR_CAM4		0x1a0
 101#define AGL_GMX_RX_ADR_CAM5		0x1a8
 102
 103#define AGL_GMX_TX_CLK			0x208
 104#define AGL_GMX_TX_STATS_CTL		0x268
 105#define AGL_GMX_TX_CTL			0x270
 106#define AGL_GMX_TX_STAT0		0x280
 107#define AGL_GMX_TX_STAT1		0x288
 108#define AGL_GMX_TX_STAT2		0x290
 109#define AGL_GMX_TX_STAT3		0x298
 110#define AGL_GMX_TX_STAT4		0x2a0
 111#define AGL_GMX_TX_STAT5		0x2a8
 112#define AGL_GMX_TX_STAT6		0x2b0
 113#define AGL_GMX_TX_STAT7		0x2b8
 114#define AGL_GMX_TX_STAT8		0x2c0
 115#define AGL_GMX_TX_STAT9		0x2c8
 116
 117struct octeon_mgmt {
 118	struct net_device *netdev;
 119	u64 mix;
 120	u64 agl;
 121	u64 agl_prt_ctl;
 122	int port;
 123	int irq;
 124	bool has_rx_tstamp;
 125	u64 *tx_ring;
 126	dma_addr_t tx_ring_handle;
 127	unsigned int tx_next;
 128	unsigned int tx_next_clean;
 129	unsigned int tx_current_fill;
 130	/* The tx_list lock also protects the ring related variables */
 131	struct sk_buff_head tx_list;
 132
 133	/* RX variables only touched in napi_poll.  No locking necessary. */
 134	u64 *rx_ring;
 135	dma_addr_t rx_ring_handle;
 136	unsigned int rx_next;
 137	unsigned int rx_next_fill;
 138	unsigned int rx_current_fill;
 139	struct sk_buff_head rx_list;
 140
 141	spinlock_t lock;
 142	unsigned int last_duplex;
 143	unsigned int last_link;
 144	unsigned int last_speed;
 145	struct device *dev;
 146	struct napi_struct napi;
 147	struct tasklet_struct tx_clean_tasklet;
 
 148	struct device_node *phy_np;
 149	resource_size_t mix_phys;
 150	resource_size_t mix_size;
 151	resource_size_t agl_phys;
 152	resource_size_t agl_size;
 153	resource_size_t agl_prt_ctl_phys;
 154	resource_size_t agl_prt_ctl_size;
 155};
 156
 157static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
 158{
 159	union cvmx_mixx_intena mix_intena;
 160	unsigned long flags;
 161
 162	spin_lock_irqsave(&p->lock, flags);
 163	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
 164	mix_intena.s.ithena = enable ? 1 : 0;
 165	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
 166	spin_unlock_irqrestore(&p->lock, flags);
 167}
 168
 169static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
 170{
 171	union cvmx_mixx_intena mix_intena;
 172	unsigned long flags;
 173
 174	spin_lock_irqsave(&p->lock, flags);
 175	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
 176	mix_intena.s.othena = enable ? 1 : 0;
 177	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
 178	spin_unlock_irqrestore(&p->lock, flags);
 179}
 180
 181static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
 182{
 183	octeon_mgmt_set_rx_irq(p, 1);
 184}
 185
 186static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
 187{
 188	octeon_mgmt_set_rx_irq(p, 0);
 189}
 190
 191static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
 192{
 193	octeon_mgmt_set_tx_irq(p, 1);
 194}
 195
 196static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
 197{
 198	octeon_mgmt_set_tx_irq(p, 0);
 199}
 200
 201static unsigned int ring_max_fill(unsigned int ring_size)
 202{
 203	return ring_size - 8;
 204}
 205
 206static unsigned int ring_size_to_bytes(unsigned int ring_size)
 207{
 208	return ring_size * sizeof(union mgmt_port_ring_entry);
 209}
 210
 211static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
 212{
 213	struct octeon_mgmt *p = netdev_priv(netdev);
 214
 215	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
 216		unsigned int size;
 217		union mgmt_port_ring_entry re;
 218		struct sk_buff *skb;
 219
 220		/* CN56XX pass 1 needs 8 bytes of padding.  */
 221		size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
 222
 223		skb = netdev_alloc_skb(netdev, size);
 224		if (!skb)
 225			break;
 226		skb_reserve(skb, NET_IP_ALIGN);
 227		__skb_queue_tail(&p->rx_list, skb);
 228
 229		re.d64 = 0;
 230		re.s.len = size;
 231		re.s.addr = dma_map_single(p->dev, skb->data,
 232					   size,
 233					   DMA_FROM_DEVICE);
 234
 235		/* Put it in the ring.  */
 236		p->rx_ring[p->rx_next_fill] = re.d64;
 237		/* Make sure there is no reorder of filling the ring and ringing
 238		 * the bell
 239		 */
 240		wmb();
 241
 242		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
 243					   ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 244					   DMA_BIDIRECTIONAL);
 245		p->rx_next_fill =
 246			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
 247		p->rx_current_fill++;
 248		/* Ring the bell.  */
 249		cvmx_write_csr(p->mix + MIX_IRING2, 1);
 250	}
 251}
 252
 253static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 254{
 255	union cvmx_mixx_orcnt mix_orcnt;
 256	union mgmt_port_ring_entry re;
 257	struct sk_buff *skb;
 258	int cleaned = 0;
 259	unsigned long flags;
 260
 261	mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 262	while (mix_orcnt.s.orcnt) {
 263		spin_lock_irqsave(&p->tx_list.lock, flags);
 264
 265		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 266
 267		if (mix_orcnt.s.orcnt == 0) {
 268			spin_unlock_irqrestore(&p->tx_list.lock, flags);
 269			break;
 270		}
 271
 272		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
 273					ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 274					DMA_BIDIRECTIONAL);
 275
 276		re.d64 = p->tx_ring[p->tx_next_clean];
 277		p->tx_next_clean =
 278			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
 279		skb = __skb_dequeue(&p->tx_list);
 280
 281		mix_orcnt.u64 = 0;
 282		mix_orcnt.s.orcnt = 1;
 283
 284		/* Acknowledge to hardware that we have the buffer.  */
 285		cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
 286		p->tx_current_fill--;
 287
 288		spin_unlock_irqrestore(&p->tx_list.lock, flags);
 289
 290		dma_unmap_single(p->dev, re.s.addr, re.s.len,
 291				 DMA_TO_DEVICE);
 292
 293		/* Read the hardware TX timestamp if one was recorded */
 294		if (unlikely(re.s.tstamp)) {
 295			struct skb_shared_hwtstamps ts;
 296			u64 ns;
 297
 298			memset(&ts, 0, sizeof(ts));
 299			/* Read the timestamp */
 300			ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
 301			/* Remove the timestamp from the FIFO */
 302			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
 303			/* Tell the kernel about the timestamp */
 304			ts.hwtstamp = ns_to_ktime(ns);
 305			skb_tstamp_tx(skb, &ts);
 306		}
 307
 308		dev_kfree_skb_any(skb);
 309		cleaned++;
 310
 311		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 312	}
 313
 314	if (cleaned && netif_queue_stopped(p->netdev))
 315		netif_wake_queue(p->netdev);
 316}
 317
 318static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
 319{
 320	struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
 321	octeon_mgmt_clean_tx_buffers(p);
 322	octeon_mgmt_enable_tx_irq(p);
 323}
 324
 325static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
 326{
 327	struct octeon_mgmt *p = netdev_priv(netdev);
 328	unsigned long flags;
 329	u64 drop, bad;
 330
 331	/* These reads also clear the count registers.  */
 332	drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
 333	bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
 334
 335	if (drop || bad) {
 336		/* Do an atomic update. */
 337		spin_lock_irqsave(&p->lock, flags);
 338		netdev->stats.rx_errors += bad;
 339		netdev->stats.rx_dropped += drop;
 340		spin_unlock_irqrestore(&p->lock, flags);
 341	}
 342}
 343
 344static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
 345{
 346	struct octeon_mgmt *p = netdev_priv(netdev);
 347	unsigned long flags;
 348
 349	union cvmx_agl_gmx_txx_stat0 s0;
 350	union cvmx_agl_gmx_txx_stat1 s1;
 351
 352	/* These reads also clear the count registers.  */
 353	s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
 354	s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
 355
 356	if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
 357		/* Do an atomic update. */
 358		spin_lock_irqsave(&p->lock, flags);
 359		netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
 360		netdev->stats.collisions += s1.s.scol + s1.s.mcol;
 361		spin_unlock_irqrestore(&p->lock, flags);
 362	}
 363}
 364
 365/*
 366 * Dequeue a receive skb and its corresponding ring entry.  The ring
 367 * entry is returned, *pskb is updated to point to the skb.
 368 */
 369static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
 370					 struct sk_buff **pskb)
 371{
 372	union mgmt_port_ring_entry re;
 373
 374	dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
 375				ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 376				DMA_BIDIRECTIONAL);
 377
 378	re.d64 = p->rx_ring[p->rx_next];
 379	p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
 380	p->rx_current_fill--;
 381	*pskb = __skb_dequeue(&p->rx_list);
 382
 383	dma_unmap_single(p->dev, re.s.addr,
 384			 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
 385			 DMA_FROM_DEVICE);
 386
 387	return re.d64;
 388}
 389
 390
 391static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
 392{
 393	struct net_device *netdev = p->netdev;
 394	union cvmx_mixx_ircnt mix_ircnt;
 395	union mgmt_port_ring_entry re;
 396	struct sk_buff *skb;
 397	struct sk_buff *skb2;
 398	struct sk_buff *skb_new;
 399	union mgmt_port_ring_entry re2;
 400	int rc = 1;
 401
 402
 403	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
 404	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
 405		/* A good packet, send it up. */
 406		skb_put(skb, re.s.len);
 407good:
 408		/* Process the RX timestamp if it was recorded */
 409		if (p->has_rx_tstamp) {
 410			/* The first 8 bytes are the timestamp */
 411			u64 ns = *(u64 *)skb->data;
 412			struct skb_shared_hwtstamps *ts;
 413			ts = skb_hwtstamps(skb);
 414			ts->hwtstamp = ns_to_ktime(ns);
 415			__skb_pull(skb, 8);
 416		}
 417		skb->protocol = eth_type_trans(skb, netdev);
 418		netdev->stats.rx_packets++;
 419		netdev->stats.rx_bytes += skb->len;
 420		netif_receive_skb(skb);
 421		rc = 0;
 422	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
 423		/* Packet split across skbs.  This can happen if we
 424		 * increase the MTU.  Buffers that are already in the
 425		 * rx ring can then end up being too small.  As the rx
 426		 * ring is refilled, buffers sized for the new MTU
 427		 * will be used and we should go back to the normal
 428		 * non-split case.
 429		 */
 430		skb_put(skb, re.s.len);
 431		do {
 432			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 433			if (re2.s.code != RING_ENTRY_CODE_MORE
 434				&& re2.s.code != RING_ENTRY_CODE_DONE)
 435				goto split_error;
 436			skb_put(skb2,  re2.s.len);
 437			skb_new = skb_copy_expand(skb, 0, skb2->len,
 438						  GFP_ATOMIC);
 439			if (!skb_new)
 440				goto split_error;
 441			if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
 442					  skb2->len))
 443				goto split_error;
 444			skb_put(skb_new, skb2->len);
 445			dev_kfree_skb_any(skb);
 446			dev_kfree_skb_any(skb2);
 447			skb = skb_new;
 448		} while (re2.s.code == RING_ENTRY_CODE_MORE);
 449		goto good;
 450	} else {
 451		/* Some other error, discard it. */
 452		dev_kfree_skb_any(skb);
 453		/* Error statistics are accumulated in
 454		 * octeon_mgmt_update_rx_stats.
 455		 */
 456	}
 457	goto done;
 458split_error:
 459	/* Discard the whole mess. */
 460	dev_kfree_skb_any(skb);
 461	dev_kfree_skb_any(skb2);
 462	while (re2.s.code == RING_ENTRY_CODE_MORE) {
 463		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 464		dev_kfree_skb_any(skb2);
 465	}
 466	netdev->stats.rx_errors++;
 467
 468done:
 469	/* Tell the hardware we processed a packet.  */
 470	mix_ircnt.u64 = 0;
 471	mix_ircnt.s.ircnt = 1;
 472	cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
 473	return rc;
 474}
 475
 476static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
 477{
 478	unsigned int work_done = 0;
 479	union cvmx_mixx_ircnt mix_ircnt;
 480	int rc;
 481
 482	mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
 483	while (work_done < budget && mix_ircnt.s.ircnt) {
 484
 485		rc = octeon_mgmt_receive_one(p);
 486		if (!rc)
 487			work_done++;
 488
 489		/* Check for more packets. */
 490		mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
 491	}
 492
 493	octeon_mgmt_rx_fill_ring(p->netdev);
 494
 495	return work_done;
 496}
 497
 498static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
 499{
 500	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
 501	struct net_device *netdev = p->netdev;
 502	unsigned int work_done = 0;
 503
 504	work_done = octeon_mgmt_receive_packets(p, budget);
 505
 506	if (work_done < budget) {
 507		/* We stopped because no more packets were available. */
 508		napi_complete_done(napi, work_done);
 509		octeon_mgmt_enable_rx_irq(p);
 510	}
 511	octeon_mgmt_update_rx_stats(netdev);
 512
 513	return work_done;
 514}
 515
 516/* Reset the hardware to clean state.  */
 517static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
 518{
 519	union cvmx_mixx_ctl mix_ctl;
 520	union cvmx_mixx_bist mix_bist;
 521	union cvmx_agl_gmx_bist agl_gmx_bist;
 522
 523	mix_ctl.u64 = 0;
 524	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
 525	do {
 526		mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
 527	} while (mix_ctl.s.busy);
 528	mix_ctl.s.reset = 1;
 529	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
 530	cvmx_read_csr(p->mix + MIX_CTL);
 531	octeon_io_clk_delay(64);
 532
 533	mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
 534	if (mix_bist.u64)
 535		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
 536			(unsigned long long)mix_bist.u64);
 537
 538	agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
 539	if (agl_gmx_bist.u64)
 540		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
 541			 (unsigned long long)agl_gmx_bist.u64);
 542}
 543
 544struct octeon_mgmt_cam_state {
 545	u64 cam[6];
 546	u64 cam_mask;
 547	int cam_index;
 548};
 549
 550static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
 551				      const unsigned char *addr)
 552{
 553	int i;
 554
 555	for (i = 0; i < 6; i++)
 556		cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
 557	cs->cam_mask |= (1ULL << cs->cam_index);
 558	cs->cam_index++;
 559}
 560
 561static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
 562{
 563	struct octeon_mgmt *p = netdev_priv(netdev);
 564	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
 565	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
 566	unsigned long flags;
 567	unsigned int prev_packet_enable;
 568	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
 569	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
 570	struct octeon_mgmt_cam_state cam_state;
 571	struct netdev_hw_addr *ha;
 572	int available_cam_entries;
 573
 574	memset(&cam_state, 0, sizeof(cam_state));
 575
 576	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
 577		cam_mode = 0;
 578		available_cam_entries = 8;
 579	} else {
 580		/* One CAM entry for the primary address, leaves seven
 581		 * for the secondary addresses.
 582		 */
 583		available_cam_entries = 7 - netdev->uc.count;
 584	}
 585
 586	if (netdev->flags & IFF_MULTICAST) {
 587		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
 588		    netdev_mc_count(netdev) > available_cam_entries)
 589			multicast_mode = 2; /* 2 - Accept all multicast.  */
 590		else
 591			multicast_mode = 0; /* 0 - Use CAM.  */
 592	}
 593
 594	if (cam_mode == 1) {
 595		/* Add primary address. */
 596		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
 597		netdev_for_each_uc_addr(ha, netdev)
 598			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 599	}
 600	if (multicast_mode == 0) {
 601		netdev_for_each_mc_addr(ha, netdev)
 602			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 603	}
 604
 605	spin_lock_irqsave(&p->lock, flags);
 606
 607	/* Disable packet I/O. */
 608	agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 609	prev_packet_enable = agl_gmx_prtx.s.en;
 610	agl_gmx_prtx.s.en = 0;
 611	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
 612
 613	adr_ctl.u64 = 0;
 614	adr_ctl.s.cam_mode = cam_mode;
 615	adr_ctl.s.mcst = multicast_mode;
 616	adr_ctl.s.bcst = 1;     /* Allow broadcast */
 617
 618	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
 619
 620	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
 621	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
 622	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
 623	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
 624	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
 625	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
 626	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
 627
 628	/* Restore packet I/O. */
 629	agl_gmx_prtx.s.en = prev_packet_enable;
 630	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
 631
 632	spin_unlock_irqrestore(&p->lock, flags);
 633}
 634
 635static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 636{
 637	int r = eth_mac_addr(netdev, addr);
 638
 639	if (r)
 640		return r;
 641
 642	octeon_mgmt_set_rx_filtering(netdev);
 643
 644	return 0;
 645}
 646
 647static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 648{
 649	struct octeon_mgmt *p = netdev_priv(netdev);
 650	int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 651
 652	WRITE_ONCE(netdev->mtu, new_mtu);
 653
 654	/* HW lifts the limit if the frame is VLAN tagged
 655	 * (+4 bytes per each tag, up to two tags)
 656	 */
 657	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
 658	/* Set the hardware to truncate packets larger than the MTU. The jabber
 659	 * register must be set to a multiple of 8 bytes, so round up. JABBER is
 660	 * an unconditional limit, so we need to account for two possible VLAN
 661	 * tags.
 662	 */
 
 
 
 
 
 
 
 
 
 
 663	cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
 664		       (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
 665
 666	return 0;
 667}
 668
 669static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
 670{
 671	struct net_device *netdev = dev_id;
 672	struct octeon_mgmt *p = netdev_priv(netdev);
 673	union cvmx_mixx_isr mixx_isr;
 674
 675	mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
 676
 677	/* Clear any pending interrupts */
 678	cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
 679	cvmx_read_csr(p->mix + MIX_ISR);
 680
 681	if (mixx_isr.s.irthresh) {
 682		octeon_mgmt_disable_rx_irq(p);
 683		napi_schedule(&p->napi);
 684	}
 685	if (mixx_isr.s.orthresh) {
 686		octeon_mgmt_disable_tx_irq(p);
 687		tasklet_schedule(&p->tx_clean_tasklet);
 688	}
 689
 690	return IRQ_HANDLED;
 691}
 692
 693static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
 694				      struct ifreq *rq, int cmd)
 695{
 696	struct octeon_mgmt *p = netdev_priv(netdev);
 697	struct hwtstamp_config config;
 698	union cvmx_mio_ptp_clock_cfg ptp;
 699	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
 700	bool have_hw_timestamps = false;
 701
 702	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
 703		return -EFAULT;
 704
 
 
 
 705	/* Check the status of hardware for tiemstamps */
 706	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 707		/* Get the current state of the PTP clock */
 708		ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
 709		if (!ptp.s.ext_clk_en) {
 710			/* The clock has not been configured to use an
 711			 * external source.  Program it to use the main clock
 712			 * reference.
 713			 */
 714			u64 clock_comp = (NSEC_PER_SEC << 32) /	octeon_get_io_clock_rate();
 715			if (!ptp.s.ptp_en)
 716				cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
 717			netdev_info(netdev,
 718				    "PTP Clock using sclk reference @ %lldHz\n",
 719				    (NSEC_PER_SEC << 32) / clock_comp);
 720		} else {
 721			/* The clock is already programmed to use a GPIO */
 722			u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
 723			netdev_info(netdev,
 724				    "PTP Clock using GPIO%d @ %lld Hz\n",
 725				    ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
 726		}
 727
 728		/* Enable the clock if it wasn't done already */
 729		if (!ptp.s.ptp_en) {
 730			ptp.s.ptp_en = 1;
 731			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
 732		}
 733		have_hw_timestamps = true;
 734	}
 735
 736	if (!have_hw_timestamps)
 737		return -EINVAL;
 738
 739	switch (config.tx_type) {
 740	case HWTSTAMP_TX_OFF:
 741	case HWTSTAMP_TX_ON:
 742		break;
 743	default:
 744		return -ERANGE;
 745	}
 746
 747	switch (config.rx_filter) {
 748	case HWTSTAMP_FILTER_NONE:
 749		p->has_rx_tstamp = false;
 750		rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
 751		rxx_frm_ctl.s.ptp_mode = 0;
 752		cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
 753		break;
 754	case HWTSTAMP_FILTER_ALL:
 755	case HWTSTAMP_FILTER_SOME:
 756	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 757	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 758	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 759	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 760	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 761	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 762	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 763	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 764	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 765	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 766	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 767	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 768	case HWTSTAMP_FILTER_NTP_ALL:
 769		p->has_rx_tstamp = have_hw_timestamps;
 770		config.rx_filter = HWTSTAMP_FILTER_ALL;
 771		if (p->has_rx_tstamp) {
 772			rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
 773			rxx_frm_ctl.s.ptp_mode = 1;
 774			cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
 775		}
 776		break;
 777	default:
 778		return -ERANGE;
 779	}
 780
 781	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
 782		return -EFAULT;
 783
 784	return 0;
 785}
 786
 787static int octeon_mgmt_ioctl(struct net_device *netdev,
 788			     struct ifreq *rq, int cmd)
 789{
 
 
 790	switch (cmd) {
 791	case SIOCSHWTSTAMP:
 792		return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
 793	default:
 794		return phy_do_ioctl(netdev, rq, cmd);
 
 
 795	}
 796}
 797
 798static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
 799{
 800	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 801
 802	/* Disable GMX before we make any changes. */
 803	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 804	prtx_cfg.s.en = 0;
 805	prtx_cfg.s.tx_en = 0;
 806	prtx_cfg.s.rx_en = 0;
 807	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 808
 809	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 810		int i;
 811		for (i = 0; i < 10; i++) {
 812			prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 813			if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
 814				break;
 815			mdelay(1);
 816			i++;
 817		}
 818	}
 819}
 820
 821static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
 822{
 823	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 824
 825	/* Restore the GMX enable state only if link is set */
 826	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 827	prtx_cfg.s.tx_en = 1;
 828	prtx_cfg.s.rx_en = 1;
 829	prtx_cfg.s.en = 1;
 830	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 831}
 832
 833static void octeon_mgmt_update_link(struct octeon_mgmt *p)
 834{
 835	struct net_device *ndev = p->netdev;
 836	struct phy_device *phydev = ndev->phydev;
 837	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 838
 839	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 840
 841	if (!phydev->link)
 842		prtx_cfg.s.duplex = 1;
 843	else
 844		prtx_cfg.s.duplex = phydev->duplex;
 845
 846	switch (phydev->speed) {
 847	case 10:
 848		prtx_cfg.s.speed = 0;
 849		prtx_cfg.s.slottime = 0;
 850
 851		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 852			prtx_cfg.s.burst = 1;
 853			prtx_cfg.s.speed_msb = 1;
 854		}
 855		break;
 856	case 100:
 857		prtx_cfg.s.speed = 0;
 858		prtx_cfg.s.slottime = 0;
 859
 860		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 861			prtx_cfg.s.burst = 1;
 862			prtx_cfg.s.speed_msb = 0;
 863		}
 864		break;
 865	case 1000:
 866		/* 1000 MBits is only supported on 6XXX chips */
 867		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 868			prtx_cfg.s.speed = 1;
 869			prtx_cfg.s.speed_msb = 0;
 870			/* Only matters for half-duplex */
 871			prtx_cfg.s.slottime = 1;
 872			prtx_cfg.s.burst = phydev->duplex;
 873		}
 874		break;
 875	case 0:  /* No link */
 876	default:
 877		break;
 878	}
 879
 880	/* Write the new GMX setting with the port still disabled. */
 881	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 882
 883	/* Read GMX CFG again to make sure the config is completed. */
 884	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 885
 886	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 887		union cvmx_agl_gmx_txx_clk agl_clk;
 888		union cvmx_agl_prtx_ctl prtx_ctl;
 889
 890		prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
 891		agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
 892		/* MII (both speeds) and RGMII 1000 speed. */
 893		agl_clk.s.clk_cnt = 1;
 894		if (prtx_ctl.s.mode == 0) { /* RGMII mode */
 895			if (phydev->speed == 10)
 896				agl_clk.s.clk_cnt = 50;
 897			else if (phydev->speed == 100)
 898				agl_clk.s.clk_cnt = 5;
 899		}
 900		cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
 901	}
 902}
 903
 904static void octeon_mgmt_adjust_link(struct net_device *netdev)
 905{
 906	struct octeon_mgmt *p = netdev_priv(netdev);
 907	struct phy_device *phydev = netdev->phydev;
 908	unsigned long flags;
 909	int link_changed = 0;
 910
 911	if (!phydev)
 912		return;
 913
 914	spin_lock_irqsave(&p->lock, flags);
 915
 916
 917	if (!phydev->link && p->last_link)
 918		link_changed = -1;
 919
 920	if (phydev->link &&
 921	    (p->last_duplex != phydev->duplex ||
 922	     p->last_link != phydev->link ||
 923	     p->last_speed != phydev->speed)) {
 924		octeon_mgmt_disable_link(p);
 925		link_changed = 1;
 926		octeon_mgmt_update_link(p);
 927		octeon_mgmt_enable_link(p);
 928	}
 929
 930	p->last_link = phydev->link;
 931	p->last_speed = phydev->speed;
 932	p->last_duplex = phydev->duplex;
 933
 934	spin_unlock_irqrestore(&p->lock, flags);
 935
 936	if (link_changed != 0) {
 937		if (link_changed > 0)
 938			netdev_info(netdev, "Link is up - %d/%s\n",
 939				    phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
 940		else
 941			netdev_info(netdev, "Link is down\n");
 
 
 
 942	}
 943}
 944
 945static int octeon_mgmt_init_phy(struct net_device *netdev)
 946{
 947	struct octeon_mgmt *p = netdev_priv(netdev);
 948	struct phy_device *phydev = NULL;
 949
 950	if (octeon_is_simulation() || p->phy_np == NULL) {
 951		/* No PHYs in the simulator. */
 952		netif_carrier_on(netdev);
 953		return 0;
 954	}
 955
 956	phydev = of_phy_connect(netdev, p->phy_np,
 957				octeon_mgmt_adjust_link, 0,
 958				PHY_INTERFACE_MODE_MII);
 959
 960	if (!phydev)
 961		return -EPROBE_DEFER;
 962
 963	return 0;
 964}
 965
 966static int octeon_mgmt_open(struct net_device *netdev)
 967{
 968	struct octeon_mgmt *p = netdev_priv(netdev);
 969	union cvmx_mixx_ctl mix_ctl;
 970	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
 971	union cvmx_mixx_oring1 oring1;
 972	union cvmx_mixx_iring1 iring1;
 973	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
 974	union cvmx_mixx_irhwm mix_irhwm;
 975	union cvmx_mixx_orhwm mix_orhwm;
 976	union cvmx_mixx_intena mix_intena;
 977	struct sockaddr sa;
 978
 979	/* Allocate ring buffers.  */
 980	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 981			     GFP_KERNEL);
 982	if (!p->tx_ring)
 983		return -ENOMEM;
 984	p->tx_ring_handle =
 985		dma_map_single(p->dev, p->tx_ring,
 986			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 987			       DMA_BIDIRECTIONAL);
 988	p->tx_next = 0;
 989	p->tx_next_clean = 0;
 990	p->tx_current_fill = 0;
 991
 992
 993	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 994			     GFP_KERNEL);
 995	if (!p->rx_ring)
 996		goto err_nomem;
 997	p->rx_ring_handle =
 998		dma_map_single(p->dev, p->rx_ring,
 999			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1000			       DMA_BIDIRECTIONAL);
1001
1002	p->rx_next = 0;
1003	p->rx_next_fill = 0;
1004	p->rx_current_fill = 0;
1005
1006	octeon_mgmt_reset_hw(p);
1007
1008	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1009
1010	/* Bring it out of reset if needed. */
1011	if (mix_ctl.s.reset) {
1012		mix_ctl.s.reset = 0;
1013		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1014		do {
1015			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1016		} while (mix_ctl.s.reset);
1017	}
1018
1019	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1020		agl_gmx_inf_mode.u64 = 0;
1021		agl_gmx_inf_mode.s.en = 1;
1022		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1023	}
1024	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1025		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1026		/* Force compensation values, as they are not
1027		 * determined properly by HW
1028		 */
1029		union cvmx_agl_gmx_drv_ctl drv_ctl;
1030
1031		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1032		if (p->port) {
1033			drv_ctl.s.byp_en1 = 1;
1034			drv_ctl.s.nctl1 = 6;
1035			drv_ctl.s.pctl1 = 6;
1036		} else {
1037			drv_ctl.s.byp_en = 1;
1038			drv_ctl.s.nctl = 6;
1039			drv_ctl.s.pctl = 6;
1040		}
1041		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1042	}
1043
1044	oring1.u64 = 0;
1045	oring1.s.obase = p->tx_ring_handle >> 3;
1046	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1047	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1048
1049	iring1.u64 = 0;
1050	iring1.s.ibase = p->rx_ring_handle >> 3;
1051	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1052	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1053
1054	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1055	octeon_mgmt_set_mac_address(netdev, &sa);
1056
1057	octeon_mgmt_change_mtu(netdev, netdev->mtu);
1058
1059	/* Enable the port HW. Packets are not allowed until
1060	 * cvmx_mgmt_port_enable() is called.
1061	 */
1062	mix_ctl.u64 = 0;
1063	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
1064	mix_ctl.s.en = 1;           /* Enable the port */
1065	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
1066	/* MII CB-request FIFO programmable high watermark */
1067	mix_ctl.s.mrq_hwm = 1;
1068#ifdef __LITTLE_ENDIAN
1069	mix_ctl.s.lendian = 1;
1070#endif
1071	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1072
1073	/* Read the PHY to find the mode of the interface. */
1074	if (octeon_mgmt_init_phy(netdev)) {
1075		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1076		goto err_noirq;
1077	}
1078
1079	/* Set the mode of the interface, RGMII/MII. */
1080	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1081		union cvmx_agl_prtx_ctl agl_prtx_ctl;
1082		int rgmii_mode =
1083			(linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1084					   netdev->phydev->supported) |
1085			 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1086					   netdev->phydev->supported)) != 0;
1087
1088		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1089		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1090		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1091
1092		/* MII clocks counts are based on the 125Mhz
1093		 * reference, which has an 8nS period. So our delays
1094		 * need to be multiplied by this factor.
1095		 */
1096#define NS_PER_PHY_CLK 8
1097
1098		/* Take the DLL and clock tree out of reset */
1099		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1100		agl_prtx_ctl.s.clkrst = 0;
1101		if (rgmii_mode) {
1102			agl_prtx_ctl.s.dllrst = 0;
1103			agl_prtx_ctl.s.clktx_byp = 0;
1104		}
1105		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1106		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1107
1108		/* Wait for the DLL to lock. External 125 MHz
1109		 * reference clock must be stable at this point.
1110		 */
1111		ndelay(256 * NS_PER_PHY_CLK);
1112
1113		/* Enable the interface */
1114		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1115		agl_prtx_ctl.s.enable = 1;
1116		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1117
1118		/* Read the value back to force the previous write */
1119		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120
1121		/* Enable the compensation controller */
1122		agl_prtx_ctl.s.comp = 1;
1123		agl_prtx_ctl.s.drv_byp = 0;
1124		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1125		/* Force write out before wait. */
1126		cvmx_read_csr(p->agl_prt_ctl);
1127
1128		/* For compensation state to lock. */
1129		ndelay(1040 * NS_PER_PHY_CLK);
1130
1131		/* Default Interframe Gaps are too small.  Recommended
1132		 * workaround is.
1133		 *
1134		 * AGL_GMX_TX_IFG[IFG1]=14
1135		 * AGL_GMX_TX_IFG[IFG2]=10
1136		 */
1137		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1138	}
1139
1140	octeon_mgmt_rx_fill_ring(netdev);
1141
1142	/* Clear statistics. */
1143	/* Clear on read. */
1144	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1145	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1146	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1147
1148	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1149	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1150	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1151
1152	/* Clear any pending interrupts */
1153	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1154
1155	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1156			netdev)) {
1157		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1158		goto err_noirq;
1159	}
1160
1161	/* Interrupt every single RX packet */
1162	mix_irhwm.u64 = 0;
1163	mix_irhwm.s.irhwm = 0;
1164	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1165
1166	/* Interrupt when we have 1 or more packets to clean.  */
1167	mix_orhwm.u64 = 0;
1168	mix_orhwm.s.orhwm = 0;
1169	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1170
1171	/* Enable receive and transmit interrupts */
1172	mix_intena.u64 = 0;
1173	mix_intena.s.ithena = 1;
1174	mix_intena.s.othena = 1;
1175	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1176
1177	/* Enable packet I/O. */
1178
1179	rxx_frm_ctl.u64 = 0;
1180	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1181	rxx_frm_ctl.s.pre_align = 1;
1182	/* When set, disables the length check for non-min sized pkts
1183	 * with padding in the client data.
1184	 */
1185	rxx_frm_ctl.s.pad_len = 1;
1186	/* When set, disables the length check for VLAN pkts */
1187	rxx_frm_ctl.s.vlan_len = 1;
1188	/* When set, PREAMBLE checking is  less strict */
1189	rxx_frm_ctl.s.pre_free = 1;
1190	/* Control Pause Frames can match station SMAC */
1191	rxx_frm_ctl.s.ctl_smac = 0;
1192	/* Control Pause Frames can match globally assign Multicast address */
1193	rxx_frm_ctl.s.ctl_mcst = 1;
1194	/* Forward pause information to TX block */
1195	rxx_frm_ctl.s.ctl_bck = 1;
1196	/* Drop Control Pause Frames */
1197	rxx_frm_ctl.s.ctl_drp = 1;
1198	/* Strip off the preamble */
1199	rxx_frm_ctl.s.pre_strp = 1;
1200	/* This port is configured to send PREAMBLE+SFD to begin every
1201	 * frame.  GMX checks that the PREAMBLE is sent correctly.
1202	 */
1203	rxx_frm_ctl.s.pre_chk = 1;
1204	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1205
1206	/* Configure the port duplex, speed and enables */
1207	octeon_mgmt_disable_link(p);
1208	if (netdev->phydev)
1209		octeon_mgmt_update_link(p);
1210	octeon_mgmt_enable_link(p);
1211
1212	p->last_link = 0;
1213	p->last_speed = 0;
1214	/* PHY is not present in simulator. The carrier is enabled
1215	 * while initializing the phy for simulator, leave it enabled.
1216	 */
1217	if (netdev->phydev) {
1218		netif_carrier_off(netdev);
1219		phy_start(netdev->phydev);
1220	}
1221
1222	netif_wake_queue(netdev);
1223	napi_enable(&p->napi);
1224
1225	return 0;
1226err_noirq:
1227	octeon_mgmt_reset_hw(p);
1228	dma_unmap_single(p->dev, p->rx_ring_handle,
1229			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1230			 DMA_BIDIRECTIONAL);
1231	kfree(p->rx_ring);
1232err_nomem:
1233	dma_unmap_single(p->dev, p->tx_ring_handle,
1234			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1235			 DMA_BIDIRECTIONAL);
1236	kfree(p->tx_ring);
1237	return -ENOMEM;
1238}
1239
1240static int octeon_mgmt_stop(struct net_device *netdev)
1241{
1242	struct octeon_mgmt *p = netdev_priv(netdev);
1243
1244	napi_disable(&p->napi);
1245	netif_stop_queue(netdev);
1246
1247	if (netdev->phydev) {
1248		phy_stop(netdev->phydev);
1249		phy_disconnect(netdev->phydev);
1250	}
1251
1252	netif_carrier_off(netdev);
1253
1254	octeon_mgmt_reset_hw(p);
1255
1256	free_irq(p->irq, netdev);
1257
1258	/* dma_unmap is a nop on Octeon, so just free everything.  */
1259	skb_queue_purge(&p->tx_list);
1260	skb_queue_purge(&p->rx_list);
1261
1262	dma_unmap_single(p->dev, p->rx_ring_handle,
1263			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1264			 DMA_BIDIRECTIONAL);
1265	kfree(p->rx_ring);
1266
1267	dma_unmap_single(p->dev, p->tx_ring_handle,
1268			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1269			 DMA_BIDIRECTIONAL);
1270	kfree(p->tx_ring);
1271
1272	return 0;
1273}
1274
1275static netdev_tx_t
1276octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1277{
1278	struct octeon_mgmt *p = netdev_priv(netdev);
1279	union mgmt_port_ring_entry re;
1280	unsigned long flags;
1281	netdev_tx_t rv = NETDEV_TX_BUSY;
1282
1283	re.d64 = 0;
1284	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1285	re.s.len = skb->len;
1286	re.s.addr = dma_map_single(p->dev, skb->data,
1287				   skb->len,
1288				   DMA_TO_DEVICE);
1289
1290	spin_lock_irqsave(&p->tx_list.lock, flags);
1291
1292	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1293		spin_unlock_irqrestore(&p->tx_list.lock, flags);
1294		netif_stop_queue(netdev);
1295		spin_lock_irqsave(&p->tx_list.lock, flags);
1296	}
1297
1298	if (unlikely(p->tx_current_fill >=
1299		     ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1300		spin_unlock_irqrestore(&p->tx_list.lock, flags);
1301		dma_unmap_single(p->dev, re.s.addr, re.s.len,
1302				 DMA_TO_DEVICE);
1303		goto out;
1304	}
1305
1306	__skb_queue_tail(&p->tx_list, skb);
1307
1308	/* Put it in the ring.  */
1309	p->tx_ring[p->tx_next] = re.d64;
1310	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1311	p->tx_current_fill++;
1312
1313	spin_unlock_irqrestore(&p->tx_list.lock, flags);
1314
1315	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1316				   ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1317				   DMA_BIDIRECTIONAL);
1318
1319	netdev->stats.tx_packets++;
1320	netdev->stats.tx_bytes += skb->len;
1321
1322	/* Ring the bell.  */
1323	cvmx_write_csr(p->mix + MIX_ORING2, 1);
1324
1325	netif_trans_update(netdev);
1326	rv = NETDEV_TX_OK;
1327out:
1328	octeon_mgmt_update_tx_stats(netdev);
1329	return rv;
1330}
1331
1332#ifdef CONFIG_NET_POLL_CONTROLLER
1333static void octeon_mgmt_poll_controller(struct net_device *netdev)
1334{
1335	struct octeon_mgmt *p = netdev_priv(netdev);
1336
1337	octeon_mgmt_receive_packets(p, 16);
1338	octeon_mgmt_update_rx_stats(netdev);
1339}
1340#endif
1341
1342static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1343				    struct ethtool_drvinfo *info)
1344{
1345	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346}
1347
1348static int octeon_mgmt_nway_reset(struct net_device *dev)
1349{
 
 
1350	if (!capable(CAP_NET_ADMIN))
1351		return -EPERM;
1352
1353	if (dev->phydev)
1354		return phy_start_aneg(dev->phydev);
1355
1356	return -EOPNOTSUPP;
1357}
1358
1359static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1360	.get_drvinfo = octeon_mgmt_get_drvinfo,
 
 
1361	.nway_reset = octeon_mgmt_nway_reset,
1362	.get_link = ethtool_op_get_link,
1363	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1364	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1365};
1366
1367static const struct net_device_ops octeon_mgmt_ops = {
1368	.ndo_open =			octeon_mgmt_open,
1369	.ndo_stop =			octeon_mgmt_stop,
1370	.ndo_start_xmit =		octeon_mgmt_xmit,
1371	.ndo_set_rx_mode =		octeon_mgmt_set_rx_filtering,
1372	.ndo_set_mac_address =		octeon_mgmt_set_mac_address,
1373	.ndo_eth_ioctl =			octeon_mgmt_ioctl,
1374	.ndo_change_mtu =		octeon_mgmt_change_mtu,
1375#ifdef CONFIG_NET_POLL_CONTROLLER
1376	.ndo_poll_controller =		octeon_mgmt_poll_controller,
1377#endif
1378};
1379
1380static int octeon_mgmt_probe(struct platform_device *pdev)
1381{
1382	struct net_device *netdev;
1383	struct octeon_mgmt *p;
1384	const __be32 *data;
 
1385	struct resource *res_mix;
1386	struct resource *res_agl;
1387	struct resource *res_agl_prt_ctl;
1388	int len;
1389	int result;
1390
1391	netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1392	if (netdev == NULL)
1393		return -ENOMEM;
1394
1395	SET_NETDEV_DEV(netdev, &pdev->dev);
1396
1397	platform_set_drvdata(pdev, netdev);
1398	p = netdev_priv(netdev);
1399	netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
1400			      OCTEON_MGMT_NAPI_WEIGHT);
1401
1402	p->netdev = netdev;
1403	p->dev = &pdev->dev;
1404	p->has_rx_tstamp = false;
1405
1406	data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1407	if (data && len == sizeof(*data)) {
1408		p->port = be32_to_cpup(data);
1409	} else {
1410		dev_err(&pdev->dev, "no 'cell-index' property\n");
1411		result = -ENXIO;
1412		goto err;
1413	}
1414
1415	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1416
1417	result = platform_get_irq(pdev, 0);
1418	if (result < 0)
1419		goto err;
1420
1421	p->irq = result;
1422
1423	res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424	if (res_mix == NULL) {
1425		dev_err(&pdev->dev, "no 'reg' resource\n");
1426		result = -ENXIO;
1427		goto err;
1428	}
1429
1430	res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1431	if (res_agl == NULL) {
1432		dev_err(&pdev->dev, "no 'reg' resource\n");
1433		result = -ENXIO;
1434		goto err;
1435	}
1436
1437	res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1438	if (res_agl_prt_ctl == NULL) {
1439		dev_err(&pdev->dev, "no 'reg' resource\n");
1440		result = -ENXIO;
1441		goto err;
1442	}
1443
1444	p->mix_phys = res_mix->start;
1445	p->mix_size = resource_size(res_mix);
1446	p->agl_phys = res_agl->start;
1447	p->agl_size = resource_size(res_agl);
1448	p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1449	p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1450
1451
1452	if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1453				     res_mix->name)) {
1454		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1455			res_mix->name);
1456		result = -ENXIO;
1457		goto err;
1458	}
1459
1460	if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1461				     res_agl->name)) {
1462		result = -ENXIO;
1463		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1464			res_agl->name);
1465		goto err;
1466	}
1467
1468	if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1469				     p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1470		result = -ENXIO;
1471		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1472			res_agl_prt_ctl->name);
1473		goto err;
1474	}
1475
1476	p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1477	p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1478	p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1479					   p->agl_prt_ctl_size);
1480	if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1481		dev_err(&pdev->dev, "failed to map I/O memory\n");
1482		result = -ENOMEM;
1483		goto err;
1484	}
1485
1486	spin_lock_init(&p->lock);
1487
1488	skb_queue_head_init(&p->tx_list);
1489	skb_queue_head_init(&p->rx_list);
1490	tasklet_setup(&p->tx_clean_tasklet,
1491		      octeon_mgmt_clean_tx_tasklet);
1492
1493	netdev->priv_flags |= IFF_UNICAST_FLT;
1494
1495	netdev->netdev_ops = &octeon_mgmt_ops;
1496	netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1497
1498	netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1499	netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1500
1501	result = of_get_ethdev_address(pdev->dev.of_node, netdev);
1502	if (result)
 
1503		eth_hw_addr_random(netdev);
1504
1505	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1506
1507	result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1508	if (result)
1509		goto err;
1510
1511	netif_carrier_off(netdev);
1512	result = register_netdev(netdev);
1513	if (result)
1514		goto err;
1515
 
1516	return 0;
1517
1518err:
1519	of_node_put(p->phy_np);
1520	free_netdev(netdev);
1521	return result;
1522}
1523
1524static void octeon_mgmt_remove(struct platform_device *pdev)
1525{
1526	struct net_device *netdev = platform_get_drvdata(pdev);
1527	struct octeon_mgmt *p = netdev_priv(netdev);
1528
1529	unregister_netdev(netdev);
1530	of_node_put(p->phy_np);
1531	free_netdev(netdev);
 
1532}
1533
1534static const struct of_device_id octeon_mgmt_match[] = {
1535	{
1536		.compatible = "cavium,octeon-5750-mix",
1537	},
1538	{},
1539};
1540MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1541
1542static struct platform_driver octeon_mgmt_driver = {
1543	.driver = {
1544		.name		= "octeon_mgmt",
1545		.of_match_table = octeon_mgmt_match,
1546	},
1547	.probe		= octeon_mgmt_probe,
1548	.remove		= octeon_mgmt_remove,
1549};
1550
1551module_platform_driver(octeon_mgmt_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1552
1553MODULE_SOFTDEP("pre: mdio-cavium");
1554MODULE_DESCRIPTION(DRV_DESCRIPTION);
1555MODULE_AUTHOR("David Daney");
1556MODULE_LICENSE("GPL");
v4.6
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2009-2012 Cavium, Inc
   7 */
   8
   9#include <linux/platform_device.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/etherdevice.h>
  12#include <linux/capability.h>
  13#include <linux/net_tstamp.h>
  14#include <linux/interrupt.h>
  15#include <linux/netdevice.h>
  16#include <linux/spinlock.h>
  17#include <linux/if_vlan.h>
  18#include <linux/of_mdio.h>
  19#include <linux/module.h>
  20#include <linux/of_net.h>
  21#include <linux/init.h>
  22#include <linux/slab.h>
  23#include <linux/phy.h>
  24#include <linux/io.h>
  25
  26#include <asm/octeon/octeon.h>
  27#include <asm/octeon/cvmx-mixx-defs.h>
  28#include <asm/octeon/cvmx-agl-defs.h>
  29
  30#define DRV_NAME "octeon_mgmt"
  31#define DRV_VERSION "2.0"
  32#define DRV_DESCRIPTION \
  33	"Cavium Networks Octeon MII (management) port Network Driver"
  34
  35#define OCTEON_MGMT_NAPI_WEIGHT 16
  36
  37/* Ring sizes that are powers of two allow for more efficient modulo
  38 * opertions.
  39 */
  40#define OCTEON_MGMT_RX_RING_SIZE 512
  41#define OCTEON_MGMT_TX_RING_SIZE 128
  42
  43/* Allow 8 bytes for vlan and FCS. */
  44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
  45
  46union mgmt_port_ring_entry {
  47	u64 d64;
  48	struct {
  49#define RING_ENTRY_CODE_DONE 0xf
  50#define RING_ENTRY_CODE_MORE 0x10
  51#ifdef __BIG_ENDIAN_BITFIELD
  52		u64 reserved_62_63:2;
  53		/* Length of the buffer/packet in bytes */
  54		u64 len:14;
  55		/* For TX, signals that the packet should be timestamped */
  56		u64 tstamp:1;
  57		/* The RX error code */
  58		u64 code:7;
  59		/* Physical address of the buffer */
  60		u64 addr:40;
  61#else
  62		u64 addr:40;
  63		u64 code:7;
  64		u64 tstamp:1;
  65		u64 len:14;
  66		u64 reserved_62_63:2;
  67#endif
  68	} s;
  69};
  70
  71#define MIX_ORING1	0x0
  72#define MIX_ORING2	0x8
  73#define MIX_IRING1	0x10
  74#define MIX_IRING2	0x18
  75#define MIX_CTL		0x20
  76#define MIX_IRHWM	0x28
  77#define MIX_IRCNT	0x30
  78#define MIX_ORHWM	0x38
  79#define MIX_ORCNT	0x40
  80#define MIX_ISR		0x48
  81#define MIX_INTENA	0x50
  82#define MIX_REMCNT	0x58
  83#define MIX_BIST	0x78
  84
  85#define AGL_GMX_PRT_CFG			0x10
  86#define AGL_GMX_RX_FRM_CTL		0x18
  87#define AGL_GMX_RX_FRM_MAX		0x30
  88#define AGL_GMX_RX_JABBER		0x38
  89#define AGL_GMX_RX_STATS_CTL		0x50
  90
  91#define AGL_GMX_RX_STATS_PKTS_DRP	0xb0
  92#define AGL_GMX_RX_STATS_OCTS_DRP	0xb8
  93#define AGL_GMX_RX_STATS_PKTS_BAD	0xc0
  94
  95#define AGL_GMX_RX_ADR_CTL		0x100
  96#define AGL_GMX_RX_ADR_CAM_EN		0x108
  97#define AGL_GMX_RX_ADR_CAM0		0x180
  98#define AGL_GMX_RX_ADR_CAM1		0x188
  99#define AGL_GMX_RX_ADR_CAM2		0x190
 100#define AGL_GMX_RX_ADR_CAM3		0x198
 101#define AGL_GMX_RX_ADR_CAM4		0x1a0
 102#define AGL_GMX_RX_ADR_CAM5		0x1a8
 103
 104#define AGL_GMX_TX_CLK			0x208
 105#define AGL_GMX_TX_STATS_CTL		0x268
 106#define AGL_GMX_TX_CTL			0x270
 107#define AGL_GMX_TX_STAT0		0x280
 108#define AGL_GMX_TX_STAT1		0x288
 109#define AGL_GMX_TX_STAT2		0x290
 110#define AGL_GMX_TX_STAT3		0x298
 111#define AGL_GMX_TX_STAT4		0x2a0
 112#define AGL_GMX_TX_STAT5		0x2a8
 113#define AGL_GMX_TX_STAT6		0x2b0
 114#define AGL_GMX_TX_STAT7		0x2b8
 115#define AGL_GMX_TX_STAT8		0x2c0
 116#define AGL_GMX_TX_STAT9		0x2c8
 117
 118struct octeon_mgmt {
 119	struct net_device *netdev;
 120	u64 mix;
 121	u64 agl;
 122	u64 agl_prt_ctl;
 123	int port;
 124	int irq;
 125	bool has_rx_tstamp;
 126	u64 *tx_ring;
 127	dma_addr_t tx_ring_handle;
 128	unsigned int tx_next;
 129	unsigned int tx_next_clean;
 130	unsigned int tx_current_fill;
 131	/* The tx_list lock also protects the ring related variables */
 132	struct sk_buff_head tx_list;
 133
 134	/* RX variables only touched in napi_poll.  No locking necessary. */
 135	u64 *rx_ring;
 136	dma_addr_t rx_ring_handle;
 137	unsigned int rx_next;
 138	unsigned int rx_next_fill;
 139	unsigned int rx_current_fill;
 140	struct sk_buff_head rx_list;
 141
 142	spinlock_t lock;
 143	unsigned int last_duplex;
 144	unsigned int last_link;
 145	unsigned int last_speed;
 146	struct device *dev;
 147	struct napi_struct napi;
 148	struct tasklet_struct tx_clean_tasklet;
 149	struct phy_device *phydev;
 150	struct device_node *phy_np;
 151	resource_size_t mix_phys;
 152	resource_size_t mix_size;
 153	resource_size_t agl_phys;
 154	resource_size_t agl_size;
 155	resource_size_t agl_prt_ctl_phys;
 156	resource_size_t agl_prt_ctl_size;
 157};
 158
 159static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
 160{
 161	union cvmx_mixx_intena mix_intena;
 162	unsigned long flags;
 163
 164	spin_lock_irqsave(&p->lock, flags);
 165	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
 166	mix_intena.s.ithena = enable ? 1 : 0;
 167	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
 168	spin_unlock_irqrestore(&p->lock, flags);
 169}
 170
 171static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
 172{
 173	union cvmx_mixx_intena mix_intena;
 174	unsigned long flags;
 175
 176	spin_lock_irqsave(&p->lock, flags);
 177	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
 178	mix_intena.s.othena = enable ? 1 : 0;
 179	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
 180	spin_unlock_irqrestore(&p->lock, flags);
 181}
 182
 183static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
 184{
 185	octeon_mgmt_set_rx_irq(p, 1);
 186}
 187
 188static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
 189{
 190	octeon_mgmt_set_rx_irq(p, 0);
 191}
 192
 193static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
 194{
 195	octeon_mgmt_set_tx_irq(p, 1);
 196}
 197
 198static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
 199{
 200	octeon_mgmt_set_tx_irq(p, 0);
 201}
 202
 203static unsigned int ring_max_fill(unsigned int ring_size)
 204{
 205	return ring_size - 8;
 206}
 207
 208static unsigned int ring_size_to_bytes(unsigned int ring_size)
 209{
 210	return ring_size * sizeof(union mgmt_port_ring_entry);
 211}
 212
 213static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
 214{
 215	struct octeon_mgmt *p = netdev_priv(netdev);
 216
 217	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
 218		unsigned int size;
 219		union mgmt_port_ring_entry re;
 220		struct sk_buff *skb;
 221
 222		/* CN56XX pass 1 needs 8 bytes of padding.  */
 223		size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
 224
 225		skb = netdev_alloc_skb(netdev, size);
 226		if (!skb)
 227			break;
 228		skb_reserve(skb, NET_IP_ALIGN);
 229		__skb_queue_tail(&p->rx_list, skb);
 230
 231		re.d64 = 0;
 232		re.s.len = size;
 233		re.s.addr = dma_map_single(p->dev, skb->data,
 234					   size,
 235					   DMA_FROM_DEVICE);
 236
 237		/* Put it in the ring.  */
 238		p->rx_ring[p->rx_next_fill] = re.d64;
 
 
 
 
 
 239		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
 240					   ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 241					   DMA_BIDIRECTIONAL);
 242		p->rx_next_fill =
 243			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
 244		p->rx_current_fill++;
 245		/* Ring the bell.  */
 246		cvmx_write_csr(p->mix + MIX_IRING2, 1);
 247	}
 248}
 249
 250static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 251{
 252	union cvmx_mixx_orcnt mix_orcnt;
 253	union mgmt_port_ring_entry re;
 254	struct sk_buff *skb;
 255	int cleaned = 0;
 256	unsigned long flags;
 257
 258	mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 259	while (mix_orcnt.s.orcnt) {
 260		spin_lock_irqsave(&p->tx_list.lock, flags);
 261
 262		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 263
 264		if (mix_orcnt.s.orcnt == 0) {
 265			spin_unlock_irqrestore(&p->tx_list.lock, flags);
 266			break;
 267		}
 268
 269		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
 270					ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 271					DMA_BIDIRECTIONAL);
 272
 273		re.d64 = p->tx_ring[p->tx_next_clean];
 274		p->tx_next_clean =
 275			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
 276		skb = __skb_dequeue(&p->tx_list);
 277
 278		mix_orcnt.u64 = 0;
 279		mix_orcnt.s.orcnt = 1;
 280
 281		/* Acknowledge to hardware that we have the buffer.  */
 282		cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
 283		p->tx_current_fill--;
 284
 285		spin_unlock_irqrestore(&p->tx_list.lock, flags);
 286
 287		dma_unmap_single(p->dev, re.s.addr, re.s.len,
 288				 DMA_TO_DEVICE);
 289
 290		/* Read the hardware TX timestamp if one was recorded */
 291		if (unlikely(re.s.tstamp)) {
 292			struct skb_shared_hwtstamps ts;
 293			u64 ns;
 294
 295			memset(&ts, 0, sizeof(ts));
 296			/* Read the timestamp */
 297			ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
 298			/* Remove the timestamp from the FIFO */
 299			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
 300			/* Tell the kernel about the timestamp */
 301			ts.hwtstamp = ns_to_ktime(ns);
 302			skb_tstamp_tx(skb, &ts);
 303		}
 304
 305		dev_kfree_skb_any(skb);
 306		cleaned++;
 307
 308		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
 309	}
 310
 311	if (cleaned && netif_queue_stopped(p->netdev))
 312		netif_wake_queue(p->netdev);
 313}
 314
 315static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
 316{
 317	struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
 318	octeon_mgmt_clean_tx_buffers(p);
 319	octeon_mgmt_enable_tx_irq(p);
 320}
 321
 322static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
 323{
 324	struct octeon_mgmt *p = netdev_priv(netdev);
 325	unsigned long flags;
 326	u64 drop, bad;
 327
 328	/* These reads also clear the count registers.  */
 329	drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
 330	bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
 331
 332	if (drop || bad) {
 333		/* Do an atomic update. */
 334		spin_lock_irqsave(&p->lock, flags);
 335		netdev->stats.rx_errors += bad;
 336		netdev->stats.rx_dropped += drop;
 337		spin_unlock_irqrestore(&p->lock, flags);
 338	}
 339}
 340
 341static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
 342{
 343	struct octeon_mgmt *p = netdev_priv(netdev);
 344	unsigned long flags;
 345
 346	union cvmx_agl_gmx_txx_stat0 s0;
 347	union cvmx_agl_gmx_txx_stat1 s1;
 348
 349	/* These reads also clear the count registers.  */
 350	s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
 351	s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
 352
 353	if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
 354		/* Do an atomic update. */
 355		spin_lock_irqsave(&p->lock, flags);
 356		netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
 357		netdev->stats.collisions += s1.s.scol + s1.s.mcol;
 358		spin_unlock_irqrestore(&p->lock, flags);
 359	}
 360}
 361
 362/*
 363 * Dequeue a receive skb and its corresponding ring entry.  The ring
 364 * entry is returned, *pskb is updated to point to the skb.
 365 */
 366static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
 367					 struct sk_buff **pskb)
 368{
 369	union mgmt_port_ring_entry re;
 370
 371	dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
 372				ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 373				DMA_BIDIRECTIONAL);
 374
 375	re.d64 = p->rx_ring[p->rx_next];
 376	p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
 377	p->rx_current_fill--;
 378	*pskb = __skb_dequeue(&p->rx_list);
 379
 380	dma_unmap_single(p->dev, re.s.addr,
 381			 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
 382			 DMA_FROM_DEVICE);
 383
 384	return re.d64;
 385}
 386
 387
 388static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
 389{
 390	struct net_device *netdev = p->netdev;
 391	union cvmx_mixx_ircnt mix_ircnt;
 392	union mgmt_port_ring_entry re;
 393	struct sk_buff *skb;
 394	struct sk_buff *skb2;
 395	struct sk_buff *skb_new;
 396	union mgmt_port_ring_entry re2;
 397	int rc = 1;
 398
 399
 400	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
 401	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
 402		/* A good packet, send it up. */
 403		skb_put(skb, re.s.len);
 404good:
 405		/* Process the RX timestamp if it was recorded */
 406		if (p->has_rx_tstamp) {
 407			/* The first 8 bytes are the timestamp */
 408			u64 ns = *(u64 *)skb->data;
 409			struct skb_shared_hwtstamps *ts;
 410			ts = skb_hwtstamps(skb);
 411			ts->hwtstamp = ns_to_ktime(ns);
 412			__skb_pull(skb, 8);
 413		}
 414		skb->protocol = eth_type_trans(skb, netdev);
 415		netdev->stats.rx_packets++;
 416		netdev->stats.rx_bytes += skb->len;
 417		netif_receive_skb(skb);
 418		rc = 0;
 419	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
 420		/* Packet split across skbs.  This can happen if we
 421		 * increase the MTU.  Buffers that are already in the
 422		 * rx ring can then end up being too small.  As the rx
 423		 * ring is refilled, buffers sized for the new MTU
 424		 * will be used and we should go back to the normal
 425		 * non-split case.
 426		 */
 427		skb_put(skb, re.s.len);
 428		do {
 429			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 430			if (re2.s.code != RING_ENTRY_CODE_MORE
 431				&& re2.s.code != RING_ENTRY_CODE_DONE)
 432				goto split_error;
 433			skb_put(skb2,  re2.s.len);
 434			skb_new = skb_copy_expand(skb, 0, skb2->len,
 435						  GFP_ATOMIC);
 436			if (!skb_new)
 437				goto split_error;
 438			if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
 439					  skb2->len))
 440				goto split_error;
 441			skb_put(skb_new, skb2->len);
 442			dev_kfree_skb_any(skb);
 443			dev_kfree_skb_any(skb2);
 444			skb = skb_new;
 445		} while (re2.s.code == RING_ENTRY_CODE_MORE);
 446		goto good;
 447	} else {
 448		/* Some other error, discard it. */
 449		dev_kfree_skb_any(skb);
 450		/* Error statistics are accumulated in
 451		 * octeon_mgmt_update_rx_stats.
 452		 */
 453	}
 454	goto done;
 455split_error:
 456	/* Discard the whole mess. */
 457	dev_kfree_skb_any(skb);
 458	dev_kfree_skb_any(skb2);
 459	while (re2.s.code == RING_ENTRY_CODE_MORE) {
 460		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 461		dev_kfree_skb_any(skb2);
 462	}
 463	netdev->stats.rx_errors++;
 464
 465done:
 466	/* Tell the hardware we processed a packet.  */
 467	mix_ircnt.u64 = 0;
 468	mix_ircnt.s.ircnt = 1;
 469	cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
 470	return rc;
 471}
 472
 473static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
 474{
 475	unsigned int work_done = 0;
 476	union cvmx_mixx_ircnt mix_ircnt;
 477	int rc;
 478
 479	mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
 480	while (work_done < budget && mix_ircnt.s.ircnt) {
 481
 482		rc = octeon_mgmt_receive_one(p);
 483		if (!rc)
 484			work_done++;
 485
 486		/* Check for more packets. */
 487		mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
 488	}
 489
 490	octeon_mgmt_rx_fill_ring(p->netdev);
 491
 492	return work_done;
 493}
 494
 495static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
 496{
 497	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
 498	struct net_device *netdev = p->netdev;
 499	unsigned int work_done = 0;
 500
 501	work_done = octeon_mgmt_receive_packets(p, budget);
 502
 503	if (work_done < budget) {
 504		/* We stopped because no more packets were available. */
 505		napi_complete(napi);
 506		octeon_mgmt_enable_rx_irq(p);
 507	}
 508	octeon_mgmt_update_rx_stats(netdev);
 509
 510	return work_done;
 511}
 512
 513/* Reset the hardware to clean state.  */
 514static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
 515{
 516	union cvmx_mixx_ctl mix_ctl;
 517	union cvmx_mixx_bist mix_bist;
 518	union cvmx_agl_gmx_bist agl_gmx_bist;
 519
 520	mix_ctl.u64 = 0;
 521	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
 522	do {
 523		mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
 524	} while (mix_ctl.s.busy);
 525	mix_ctl.s.reset = 1;
 526	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
 527	cvmx_read_csr(p->mix + MIX_CTL);
 528	octeon_io_clk_delay(64);
 529
 530	mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
 531	if (mix_bist.u64)
 532		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
 533			(unsigned long long)mix_bist.u64);
 534
 535	agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
 536	if (agl_gmx_bist.u64)
 537		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
 538			 (unsigned long long)agl_gmx_bist.u64);
 539}
 540
 541struct octeon_mgmt_cam_state {
 542	u64 cam[6];
 543	u64 cam_mask;
 544	int cam_index;
 545};
 546
 547static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
 548				      unsigned char *addr)
 549{
 550	int i;
 551
 552	for (i = 0; i < 6; i++)
 553		cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
 554	cs->cam_mask |= (1ULL << cs->cam_index);
 555	cs->cam_index++;
 556}
 557
 558static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
 559{
 560	struct octeon_mgmt *p = netdev_priv(netdev);
 561	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
 562	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
 563	unsigned long flags;
 564	unsigned int prev_packet_enable;
 565	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
 566	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
 567	struct octeon_mgmt_cam_state cam_state;
 568	struct netdev_hw_addr *ha;
 569	int available_cam_entries;
 570
 571	memset(&cam_state, 0, sizeof(cam_state));
 572
 573	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
 574		cam_mode = 0;
 575		available_cam_entries = 8;
 576	} else {
 577		/* One CAM entry for the primary address, leaves seven
 578		 * for the secondary addresses.
 579		 */
 580		available_cam_entries = 7 - netdev->uc.count;
 581	}
 582
 583	if (netdev->flags & IFF_MULTICAST) {
 584		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
 585		    netdev_mc_count(netdev) > available_cam_entries)
 586			multicast_mode = 2; /* 2 - Accept all multicast.  */
 587		else
 588			multicast_mode = 0; /* 0 - Use CAM.  */
 589	}
 590
 591	if (cam_mode == 1) {
 592		/* Add primary address. */
 593		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
 594		netdev_for_each_uc_addr(ha, netdev)
 595			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 596	}
 597	if (multicast_mode == 0) {
 598		netdev_for_each_mc_addr(ha, netdev)
 599			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 600	}
 601
 602	spin_lock_irqsave(&p->lock, flags);
 603
 604	/* Disable packet I/O. */
 605	agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 606	prev_packet_enable = agl_gmx_prtx.s.en;
 607	agl_gmx_prtx.s.en = 0;
 608	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
 609
 610	adr_ctl.u64 = 0;
 611	adr_ctl.s.cam_mode = cam_mode;
 612	adr_ctl.s.mcst = multicast_mode;
 613	adr_ctl.s.bcst = 1;     /* Allow broadcast */
 614
 615	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
 616
 617	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
 618	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
 619	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
 620	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
 621	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
 622	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
 623	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
 624
 625	/* Restore packet I/O. */
 626	agl_gmx_prtx.s.en = prev_packet_enable;
 627	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
 628
 629	spin_unlock_irqrestore(&p->lock, flags);
 630}
 631
 632static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 633{
 634	int r = eth_mac_addr(netdev, addr);
 635
 636	if (r)
 637		return r;
 638
 639	octeon_mgmt_set_rx_filtering(netdev);
 640
 641	return 0;
 642}
 643
 644static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 645{
 646	struct octeon_mgmt *p = netdev_priv(netdev);
 647	int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
 648
 649	/* Limit the MTU to make sure the ethernet packets are between
 650	 * 64 bytes and 16383 bytes.
 
 
 
 
 
 
 
 
 651	 */
 652	if (size_without_fcs < 64 || size_without_fcs > 16383) {
 653		dev_warn(p->dev, "MTU must be between %d and %d.\n",
 654			 64 - OCTEON_MGMT_RX_HEADROOM,
 655			 16383 - OCTEON_MGMT_RX_HEADROOM);
 656		return -EINVAL;
 657	}
 658
 659	netdev->mtu = new_mtu;
 660
 661	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
 662	cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
 663		       (size_without_fcs + 7) & 0xfff8);
 664
 665	return 0;
 666}
 667
 668static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
 669{
 670	struct net_device *netdev = dev_id;
 671	struct octeon_mgmt *p = netdev_priv(netdev);
 672	union cvmx_mixx_isr mixx_isr;
 673
 674	mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
 675
 676	/* Clear any pending interrupts */
 677	cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
 678	cvmx_read_csr(p->mix + MIX_ISR);
 679
 680	if (mixx_isr.s.irthresh) {
 681		octeon_mgmt_disable_rx_irq(p);
 682		napi_schedule(&p->napi);
 683	}
 684	if (mixx_isr.s.orthresh) {
 685		octeon_mgmt_disable_tx_irq(p);
 686		tasklet_schedule(&p->tx_clean_tasklet);
 687	}
 688
 689	return IRQ_HANDLED;
 690}
 691
 692static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
 693				      struct ifreq *rq, int cmd)
 694{
 695	struct octeon_mgmt *p = netdev_priv(netdev);
 696	struct hwtstamp_config config;
 697	union cvmx_mio_ptp_clock_cfg ptp;
 698	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
 699	bool have_hw_timestamps = false;
 700
 701	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
 702		return -EFAULT;
 703
 704	if (config.flags) /* reserved for future extensions */
 705		return -EINVAL;
 706
 707	/* Check the status of hardware for tiemstamps */
 708	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 709		/* Get the current state of the PTP clock */
 710		ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
 711		if (!ptp.s.ext_clk_en) {
 712			/* The clock has not been configured to use an
 713			 * external source.  Program it to use the main clock
 714			 * reference.
 715			 */
 716			u64 clock_comp = (NSEC_PER_SEC << 32) /	octeon_get_io_clock_rate();
 717			if (!ptp.s.ptp_en)
 718				cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
 719			pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
 720				(NSEC_PER_SEC << 32) / clock_comp);
 
 721		} else {
 722			/* The clock is already programmed to use a GPIO */
 723			u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
 724			pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
 725				ptp.s.ext_clk_in,
 726				(NSEC_PER_SEC << 32) / clock_comp);
 727		}
 728
 729		/* Enable the clock if it wasn't done already */
 730		if (!ptp.s.ptp_en) {
 731			ptp.s.ptp_en = 1;
 732			cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
 733		}
 734		have_hw_timestamps = true;
 735	}
 736
 737	if (!have_hw_timestamps)
 738		return -EINVAL;
 739
 740	switch (config.tx_type) {
 741	case HWTSTAMP_TX_OFF:
 742	case HWTSTAMP_TX_ON:
 743		break;
 744	default:
 745		return -ERANGE;
 746	}
 747
 748	switch (config.rx_filter) {
 749	case HWTSTAMP_FILTER_NONE:
 750		p->has_rx_tstamp = false;
 751		rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
 752		rxx_frm_ctl.s.ptp_mode = 0;
 753		cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
 754		break;
 755	case HWTSTAMP_FILTER_ALL:
 756	case HWTSTAMP_FILTER_SOME:
 757	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 758	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 759	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 760	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 761	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 762	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 763	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 764	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 765	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 766	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 767	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 768	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 
 769		p->has_rx_tstamp = have_hw_timestamps;
 770		config.rx_filter = HWTSTAMP_FILTER_ALL;
 771		if (p->has_rx_tstamp) {
 772			rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
 773			rxx_frm_ctl.s.ptp_mode = 1;
 774			cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
 775		}
 776		break;
 777	default:
 778		return -ERANGE;
 779	}
 780
 781	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
 782		return -EFAULT;
 783
 784	return 0;
 785}
 786
 787static int octeon_mgmt_ioctl(struct net_device *netdev,
 788			     struct ifreq *rq, int cmd)
 789{
 790	struct octeon_mgmt *p = netdev_priv(netdev);
 791
 792	switch (cmd) {
 793	case SIOCSHWTSTAMP:
 794		return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
 795	default:
 796		if (p->phydev)
 797			return phy_mii_ioctl(p->phydev, rq, cmd);
 798		return -EINVAL;
 799	}
 800}
 801
 802static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
 803{
 804	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 805
 806	/* Disable GMX before we make any changes. */
 807	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 808	prtx_cfg.s.en = 0;
 809	prtx_cfg.s.tx_en = 0;
 810	prtx_cfg.s.rx_en = 0;
 811	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 812
 813	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 814		int i;
 815		for (i = 0; i < 10; i++) {
 816			prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 817			if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
 818				break;
 819			mdelay(1);
 820			i++;
 821		}
 822	}
 823}
 824
 825static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
 826{
 827	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 828
 829	/* Restore the GMX enable state only if link is set */
 830	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 831	prtx_cfg.s.tx_en = 1;
 832	prtx_cfg.s.rx_en = 1;
 833	prtx_cfg.s.en = 1;
 834	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 835}
 836
 837static void octeon_mgmt_update_link(struct octeon_mgmt *p)
 838{
 
 
 839	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 840
 841	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 842
 843	if (!p->phydev->link)
 844		prtx_cfg.s.duplex = 1;
 845	else
 846		prtx_cfg.s.duplex = p->phydev->duplex;
 847
 848	switch (p->phydev->speed) {
 849	case 10:
 850		prtx_cfg.s.speed = 0;
 851		prtx_cfg.s.slottime = 0;
 852
 853		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 854			prtx_cfg.s.burst = 1;
 855			prtx_cfg.s.speed_msb = 1;
 856		}
 857		break;
 858	case 100:
 859		prtx_cfg.s.speed = 0;
 860		prtx_cfg.s.slottime = 0;
 861
 862		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 863			prtx_cfg.s.burst = 1;
 864			prtx_cfg.s.speed_msb = 0;
 865		}
 866		break;
 867	case 1000:
 868		/* 1000 MBits is only supported on 6XXX chips */
 869		if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 870			prtx_cfg.s.speed = 1;
 871			prtx_cfg.s.speed_msb = 0;
 872			/* Only matters for half-duplex */
 873			prtx_cfg.s.slottime = 1;
 874			prtx_cfg.s.burst = p->phydev->duplex;
 875		}
 876		break;
 877	case 0:  /* No link */
 878	default:
 879		break;
 880	}
 881
 882	/* Write the new GMX setting with the port still disabled. */
 883	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
 884
 885	/* Read GMX CFG again to make sure the config is completed. */
 886	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
 887
 888	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 889		union cvmx_agl_gmx_txx_clk agl_clk;
 890		union cvmx_agl_prtx_ctl prtx_ctl;
 891
 892		prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
 893		agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
 894		/* MII (both speeds) and RGMII 1000 speed. */
 895		agl_clk.s.clk_cnt = 1;
 896		if (prtx_ctl.s.mode == 0) { /* RGMII mode */
 897			if (p->phydev->speed == 10)
 898				agl_clk.s.clk_cnt = 50;
 899			else if (p->phydev->speed == 100)
 900				agl_clk.s.clk_cnt = 5;
 901		}
 902		cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
 903	}
 904}
 905
 906static void octeon_mgmt_adjust_link(struct net_device *netdev)
 907{
 908	struct octeon_mgmt *p = netdev_priv(netdev);
 
 909	unsigned long flags;
 910	int link_changed = 0;
 911
 912	if (!p->phydev)
 913		return;
 914
 915	spin_lock_irqsave(&p->lock, flags);
 916
 917
 918	if (!p->phydev->link && p->last_link)
 919		link_changed = -1;
 920
 921	if (p->phydev->link
 922	    && (p->last_duplex != p->phydev->duplex
 923		|| p->last_link != p->phydev->link
 924		|| p->last_speed != p->phydev->speed)) {
 925		octeon_mgmt_disable_link(p);
 926		link_changed = 1;
 927		octeon_mgmt_update_link(p);
 928		octeon_mgmt_enable_link(p);
 929	}
 930
 931	p->last_link = p->phydev->link;
 932	p->last_speed = p->phydev->speed;
 933	p->last_duplex = p->phydev->duplex;
 934
 935	spin_unlock_irqrestore(&p->lock, flags);
 936
 937	if (link_changed != 0) {
 938		if (link_changed > 0) {
 939			pr_info("%s: Link is up - %d/%s\n", netdev->name,
 940				p->phydev->speed,
 941				DUPLEX_FULL == p->phydev->duplex ?
 942				"Full" : "Half");
 943		} else {
 944			pr_info("%s: Link is down\n", netdev->name);
 945		}
 946	}
 947}
 948
 949static int octeon_mgmt_init_phy(struct net_device *netdev)
 950{
 951	struct octeon_mgmt *p = netdev_priv(netdev);
 
 952
 953	if (octeon_is_simulation() || p->phy_np == NULL) {
 954		/* No PHYs in the simulator. */
 955		netif_carrier_on(netdev);
 956		return 0;
 957	}
 958
 959	p->phydev = of_phy_connect(netdev, p->phy_np,
 960				   octeon_mgmt_adjust_link, 0,
 961				   PHY_INTERFACE_MODE_MII);
 962
 963	if (!p->phydev)
 964		return -ENODEV;
 965
 966	return 0;
 967}
 968
 969static int octeon_mgmt_open(struct net_device *netdev)
 970{
 971	struct octeon_mgmt *p = netdev_priv(netdev);
 972	union cvmx_mixx_ctl mix_ctl;
 973	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
 974	union cvmx_mixx_oring1 oring1;
 975	union cvmx_mixx_iring1 iring1;
 976	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
 977	union cvmx_mixx_irhwm mix_irhwm;
 978	union cvmx_mixx_orhwm mix_orhwm;
 979	union cvmx_mixx_intena mix_intena;
 980	struct sockaddr sa;
 981
 982	/* Allocate ring buffers.  */
 983	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 984			     GFP_KERNEL);
 985	if (!p->tx_ring)
 986		return -ENOMEM;
 987	p->tx_ring_handle =
 988		dma_map_single(p->dev, p->tx_ring,
 989			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 990			       DMA_BIDIRECTIONAL);
 991	p->tx_next = 0;
 992	p->tx_next_clean = 0;
 993	p->tx_current_fill = 0;
 994
 995
 996	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 997			     GFP_KERNEL);
 998	if (!p->rx_ring)
 999		goto err_nomem;
1000	p->rx_ring_handle =
1001		dma_map_single(p->dev, p->rx_ring,
1002			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1003			       DMA_BIDIRECTIONAL);
1004
1005	p->rx_next = 0;
1006	p->rx_next_fill = 0;
1007	p->rx_current_fill = 0;
1008
1009	octeon_mgmt_reset_hw(p);
1010
1011	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1012
1013	/* Bring it out of reset if needed. */
1014	if (mix_ctl.s.reset) {
1015		mix_ctl.s.reset = 0;
1016		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1017		do {
1018			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1019		} while (mix_ctl.s.reset);
1020	}
1021
1022	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1023		agl_gmx_inf_mode.u64 = 0;
1024		agl_gmx_inf_mode.s.en = 1;
1025		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1026	}
1027	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1028		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1029		/* Force compensation values, as they are not
1030		 * determined properly by HW
1031		 */
1032		union cvmx_agl_gmx_drv_ctl drv_ctl;
1033
1034		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1035		if (p->port) {
1036			drv_ctl.s.byp_en1 = 1;
1037			drv_ctl.s.nctl1 = 6;
1038			drv_ctl.s.pctl1 = 6;
1039		} else {
1040			drv_ctl.s.byp_en = 1;
1041			drv_ctl.s.nctl = 6;
1042			drv_ctl.s.pctl = 6;
1043		}
1044		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1045	}
1046
1047	oring1.u64 = 0;
1048	oring1.s.obase = p->tx_ring_handle >> 3;
1049	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1050	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1051
1052	iring1.u64 = 0;
1053	iring1.s.ibase = p->rx_ring_handle >> 3;
1054	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1055	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1056
1057	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1058	octeon_mgmt_set_mac_address(netdev, &sa);
1059
1060	octeon_mgmt_change_mtu(netdev, netdev->mtu);
1061
1062	/* Enable the port HW. Packets are not allowed until
1063	 * cvmx_mgmt_port_enable() is called.
1064	 */
1065	mix_ctl.u64 = 0;
1066	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
1067	mix_ctl.s.en = 1;           /* Enable the port */
1068	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
1069	/* MII CB-request FIFO programmable high watermark */
1070	mix_ctl.s.mrq_hwm = 1;
1071#ifdef __LITTLE_ENDIAN
1072	mix_ctl.s.lendian = 1;
1073#endif
1074	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1075
1076	/* Read the PHY to find the mode of the interface. */
1077	if (octeon_mgmt_init_phy(netdev)) {
1078		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1079		goto err_noirq;
1080	}
1081
1082	/* Set the mode of the interface, RGMII/MII. */
1083	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1084		union cvmx_agl_prtx_ctl agl_prtx_ctl;
1085		int rgmii_mode = (p->phydev->supported &
1086				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
 
 
 
1087
1088		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1089		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1090		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1091
1092		/* MII clocks counts are based on the 125Mhz
1093		 * reference, which has an 8nS period. So our delays
1094		 * need to be multiplied by this factor.
1095		 */
1096#define NS_PER_PHY_CLK 8
1097
1098		/* Take the DLL and clock tree out of reset */
1099		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1100		agl_prtx_ctl.s.clkrst = 0;
1101		if (rgmii_mode) {
1102			agl_prtx_ctl.s.dllrst = 0;
1103			agl_prtx_ctl.s.clktx_byp = 0;
1104		}
1105		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1106		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1107
1108		/* Wait for the DLL to lock. External 125 MHz
1109		 * reference clock must be stable at this point.
1110		 */
1111		ndelay(256 * NS_PER_PHY_CLK);
1112
1113		/* Enable the interface */
1114		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1115		agl_prtx_ctl.s.enable = 1;
1116		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1117
1118		/* Read the value back to force the previous write */
1119		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120
1121		/* Enable the compensation controller */
1122		agl_prtx_ctl.s.comp = 1;
1123		agl_prtx_ctl.s.drv_byp = 0;
1124		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
1125		/* Force write out before wait. */
1126		cvmx_read_csr(p->agl_prt_ctl);
1127
1128		/* For compensation state to lock. */
1129		ndelay(1040 * NS_PER_PHY_CLK);
1130
1131		/* Default Interframe Gaps are too small.  Recommended
1132		 * workaround is.
1133		 *
1134		 * AGL_GMX_TX_IFG[IFG1]=14
1135		 * AGL_GMX_TX_IFG[IFG2]=10
1136		 */
1137		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1138	}
1139
1140	octeon_mgmt_rx_fill_ring(netdev);
1141
1142	/* Clear statistics. */
1143	/* Clear on read. */
1144	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1145	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1146	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1147
1148	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1149	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1150	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1151
1152	/* Clear any pending interrupts */
1153	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1154
1155	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1156			netdev)) {
1157		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1158		goto err_noirq;
1159	}
1160
1161	/* Interrupt every single RX packet */
1162	mix_irhwm.u64 = 0;
1163	mix_irhwm.s.irhwm = 0;
1164	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1165
1166	/* Interrupt when we have 1 or more packets to clean.  */
1167	mix_orhwm.u64 = 0;
1168	mix_orhwm.s.orhwm = 0;
1169	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1170
1171	/* Enable receive and transmit interrupts */
1172	mix_intena.u64 = 0;
1173	mix_intena.s.ithena = 1;
1174	mix_intena.s.othena = 1;
1175	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1176
1177	/* Enable packet I/O. */
1178
1179	rxx_frm_ctl.u64 = 0;
1180	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1181	rxx_frm_ctl.s.pre_align = 1;
1182	/* When set, disables the length check for non-min sized pkts
1183	 * with padding in the client data.
1184	 */
1185	rxx_frm_ctl.s.pad_len = 1;
1186	/* When set, disables the length check for VLAN pkts */
1187	rxx_frm_ctl.s.vlan_len = 1;
1188	/* When set, PREAMBLE checking is  less strict */
1189	rxx_frm_ctl.s.pre_free = 1;
1190	/* Control Pause Frames can match station SMAC */
1191	rxx_frm_ctl.s.ctl_smac = 0;
1192	/* Control Pause Frames can match globally assign Multicast address */
1193	rxx_frm_ctl.s.ctl_mcst = 1;
1194	/* Forward pause information to TX block */
1195	rxx_frm_ctl.s.ctl_bck = 1;
1196	/* Drop Control Pause Frames */
1197	rxx_frm_ctl.s.ctl_drp = 1;
1198	/* Strip off the preamble */
1199	rxx_frm_ctl.s.pre_strp = 1;
1200	/* This port is configured to send PREAMBLE+SFD to begin every
1201	 * frame.  GMX checks that the PREAMBLE is sent correctly.
1202	 */
1203	rxx_frm_ctl.s.pre_chk = 1;
1204	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1205
1206	/* Configure the port duplex, speed and enables */
1207	octeon_mgmt_disable_link(p);
1208	if (p->phydev)
1209		octeon_mgmt_update_link(p);
1210	octeon_mgmt_enable_link(p);
1211
1212	p->last_link = 0;
1213	p->last_speed = 0;
1214	/* PHY is not present in simulator. The carrier is enabled
1215	 * while initializing the phy for simulator, leave it enabled.
1216	 */
1217	if (p->phydev) {
1218		netif_carrier_off(netdev);
1219		phy_start_aneg(p->phydev);
1220	}
1221
1222	netif_wake_queue(netdev);
1223	napi_enable(&p->napi);
1224
1225	return 0;
1226err_noirq:
1227	octeon_mgmt_reset_hw(p);
1228	dma_unmap_single(p->dev, p->rx_ring_handle,
1229			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1230			 DMA_BIDIRECTIONAL);
1231	kfree(p->rx_ring);
1232err_nomem:
1233	dma_unmap_single(p->dev, p->tx_ring_handle,
1234			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1235			 DMA_BIDIRECTIONAL);
1236	kfree(p->tx_ring);
1237	return -ENOMEM;
1238}
1239
1240static int octeon_mgmt_stop(struct net_device *netdev)
1241{
1242	struct octeon_mgmt *p = netdev_priv(netdev);
1243
1244	napi_disable(&p->napi);
1245	netif_stop_queue(netdev);
1246
1247	if (p->phydev)
1248		phy_disconnect(p->phydev);
1249	p->phydev = NULL;
 
1250
1251	netif_carrier_off(netdev);
1252
1253	octeon_mgmt_reset_hw(p);
1254
1255	free_irq(p->irq, netdev);
1256
1257	/* dma_unmap is a nop on Octeon, so just free everything.  */
1258	skb_queue_purge(&p->tx_list);
1259	skb_queue_purge(&p->rx_list);
1260
1261	dma_unmap_single(p->dev, p->rx_ring_handle,
1262			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1263			 DMA_BIDIRECTIONAL);
1264	kfree(p->rx_ring);
1265
1266	dma_unmap_single(p->dev, p->tx_ring_handle,
1267			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1268			 DMA_BIDIRECTIONAL);
1269	kfree(p->tx_ring);
1270
1271	return 0;
1272}
1273
1274static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
 
1275{
1276	struct octeon_mgmt *p = netdev_priv(netdev);
1277	union mgmt_port_ring_entry re;
1278	unsigned long flags;
1279	int rv = NETDEV_TX_BUSY;
1280
1281	re.d64 = 0;
1282	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1283	re.s.len = skb->len;
1284	re.s.addr = dma_map_single(p->dev, skb->data,
1285				   skb->len,
1286				   DMA_TO_DEVICE);
1287
1288	spin_lock_irqsave(&p->tx_list.lock, flags);
1289
1290	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1291		spin_unlock_irqrestore(&p->tx_list.lock, flags);
1292		netif_stop_queue(netdev);
1293		spin_lock_irqsave(&p->tx_list.lock, flags);
1294	}
1295
1296	if (unlikely(p->tx_current_fill >=
1297		     ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1298		spin_unlock_irqrestore(&p->tx_list.lock, flags);
1299		dma_unmap_single(p->dev, re.s.addr, re.s.len,
1300				 DMA_TO_DEVICE);
1301		goto out;
1302	}
1303
1304	__skb_queue_tail(&p->tx_list, skb);
1305
1306	/* Put it in the ring.  */
1307	p->tx_ring[p->tx_next] = re.d64;
1308	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1309	p->tx_current_fill++;
1310
1311	spin_unlock_irqrestore(&p->tx_list.lock, flags);
1312
1313	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1314				   ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1315				   DMA_BIDIRECTIONAL);
1316
1317	netdev->stats.tx_packets++;
1318	netdev->stats.tx_bytes += skb->len;
1319
1320	/* Ring the bell.  */
1321	cvmx_write_csr(p->mix + MIX_ORING2, 1);
1322
1323	netdev->trans_start = jiffies;
1324	rv = NETDEV_TX_OK;
1325out:
1326	octeon_mgmt_update_tx_stats(netdev);
1327	return rv;
1328}
1329
1330#ifdef CONFIG_NET_POLL_CONTROLLER
1331static void octeon_mgmt_poll_controller(struct net_device *netdev)
1332{
1333	struct octeon_mgmt *p = netdev_priv(netdev);
1334
1335	octeon_mgmt_receive_packets(p, 16);
1336	octeon_mgmt_update_rx_stats(netdev);
1337}
1338#endif
1339
1340static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1341				    struct ethtool_drvinfo *info)
1342{
1343	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1344	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1345	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1346	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1347}
1348
1349static int octeon_mgmt_get_settings(struct net_device *netdev,
1350				    struct ethtool_cmd *cmd)
1351{
1352	struct octeon_mgmt *p = netdev_priv(netdev);
1353
1354	if (p->phydev)
1355		return phy_ethtool_gset(p->phydev, cmd);
1356
1357	return -EOPNOTSUPP;
1358}
1359
1360static int octeon_mgmt_set_settings(struct net_device *netdev,
1361				    struct ethtool_cmd *cmd)
1362{
1363	struct octeon_mgmt *p = netdev_priv(netdev);
1364
1365	if (!capable(CAP_NET_ADMIN))
1366		return -EPERM;
1367
1368	if (p->phydev)
1369		return phy_ethtool_sset(p->phydev, cmd);
1370
1371	return -EOPNOTSUPP;
1372}
1373
1374static int octeon_mgmt_nway_reset(struct net_device *dev)
1375{
1376	struct octeon_mgmt *p = netdev_priv(dev);
1377
1378	if (!capable(CAP_NET_ADMIN))
1379		return -EPERM;
1380
1381	if (p->phydev)
1382		return phy_start_aneg(p->phydev);
1383
1384	return -EOPNOTSUPP;
1385}
1386
1387static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1388	.get_drvinfo = octeon_mgmt_get_drvinfo,
1389	.get_settings = octeon_mgmt_get_settings,
1390	.set_settings = octeon_mgmt_set_settings,
1391	.nway_reset = octeon_mgmt_nway_reset,
1392	.get_link = ethtool_op_get_link,
 
 
1393};
1394
1395static const struct net_device_ops octeon_mgmt_ops = {
1396	.ndo_open =			octeon_mgmt_open,
1397	.ndo_stop =			octeon_mgmt_stop,
1398	.ndo_start_xmit =		octeon_mgmt_xmit,
1399	.ndo_set_rx_mode =		octeon_mgmt_set_rx_filtering,
1400	.ndo_set_mac_address =		octeon_mgmt_set_mac_address,
1401	.ndo_do_ioctl =			octeon_mgmt_ioctl,
1402	.ndo_change_mtu =		octeon_mgmt_change_mtu,
1403#ifdef CONFIG_NET_POLL_CONTROLLER
1404	.ndo_poll_controller =		octeon_mgmt_poll_controller,
1405#endif
1406};
1407
1408static int octeon_mgmt_probe(struct platform_device *pdev)
1409{
1410	struct net_device *netdev;
1411	struct octeon_mgmt *p;
1412	const __be32 *data;
1413	const u8 *mac;
1414	struct resource *res_mix;
1415	struct resource *res_agl;
1416	struct resource *res_agl_prt_ctl;
1417	int len;
1418	int result;
1419
1420	netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1421	if (netdev == NULL)
1422		return -ENOMEM;
1423
1424	SET_NETDEV_DEV(netdev, &pdev->dev);
1425
1426	platform_set_drvdata(pdev, netdev);
1427	p = netdev_priv(netdev);
1428	netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1429		       OCTEON_MGMT_NAPI_WEIGHT);
1430
1431	p->netdev = netdev;
1432	p->dev = &pdev->dev;
1433	p->has_rx_tstamp = false;
1434
1435	data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1436	if (data && len == sizeof(*data)) {
1437		p->port = be32_to_cpup(data);
1438	} else {
1439		dev_err(&pdev->dev, "no 'cell-index' property\n");
1440		result = -ENXIO;
1441		goto err;
1442	}
1443
1444	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1445
1446	result = platform_get_irq(pdev, 0);
1447	if (result < 0)
1448		goto err;
1449
1450	p->irq = result;
1451
1452	res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1453	if (res_mix == NULL) {
1454		dev_err(&pdev->dev, "no 'reg' resource\n");
1455		result = -ENXIO;
1456		goto err;
1457	}
1458
1459	res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1460	if (res_agl == NULL) {
1461		dev_err(&pdev->dev, "no 'reg' resource\n");
1462		result = -ENXIO;
1463		goto err;
1464	}
1465
1466	res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1467	if (res_agl_prt_ctl == NULL) {
1468		dev_err(&pdev->dev, "no 'reg' resource\n");
1469		result = -ENXIO;
1470		goto err;
1471	}
1472
1473	p->mix_phys = res_mix->start;
1474	p->mix_size = resource_size(res_mix);
1475	p->agl_phys = res_agl->start;
1476	p->agl_size = resource_size(res_agl);
1477	p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1478	p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1479
1480
1481	if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1482				     res_mix->name)) {
1483		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1484			res_mix->name);
1485		result = -ENXIO;
1486		goto err;
1487	}
1488
1489	if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1490				     res_agl->name)) {
1491		result = -ENXIO;
1492		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1493			res_agl->name);
1494		goto err;
1495	}
1496
1497	if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1498				     p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1499		result = -ENXIO;
1500		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1501			res_agl_prt_ctl->name);
1502		goto err;
1503	}
1504
1505	p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1506	p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1507	p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1508					   p->agl_prt_ctl_size);
 
 
 
 
 
 
1509	spin_lock_init(&p->lock);
1510
1511	skb_queue_head_init(&p->tx_list);
1512	skb_queue_head_init(&p->rx_list);
1513	tasklet_init(&p->tx_clean_tasklet,
1514		     octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1515
1516	netdev->priv_flags |= IFF_UNICAST_FLT;
1517
1518	netdev->netdev_ops = &octeon_mgmt_ops;
1519	netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1520
1521	mac = of_get_mac_address(pdev->dev.of_node);
 
1522
1523	if (mac)
1524		memcpy(netdev->dev_addr, mac, ETH_ALEN);
1525	else
1526		eth_hw_addr_random(netdev);
1527
1528	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1529
1530	result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1531	if (result)
1532		goto err;
1533
1534	netif_carrier_off(netdev);
1535	result = register_netdev(netdev);
1536	if (result)
1537		goto err;
1538
1539	dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1540	return 0;
1541
1542err:
 
1543	free_netdev(netdev);
1544	return result;
1545}
1546
1547static int octeon_mgmt_remove(struct platform_device *pdev)
1548{
1549	struct net_device *netdev = platform_get_drvdata(pdev);
 
1550
1551	unregister_netdev(netdev);
 
1552	free_netdev(netdev);
1553	return 0;
1554}
1555
1556static const struct of_device_id octeon_mgmt_match[] = {
1557	{
1558		.compatible = "cavium,octeon-5750-mix",
1559	},
1560	{},
1561};
1562MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1563
1564static struct platform_driver octeon_mgmt_driver = {
1565	.driver = {
1566		.name		= "octeon_mgmt",
1567		.of_match_table = octeon_mgmt_match,
1568	},
1569	.probe		= octeon_mgmt_probe,
1570	.remove		= octeon_mgmt_remove,
1571};
1572
1573extern void octeon_mdiobus_force_mod_depencency(void);
1574
1575static int __init octeon_mgmt_mod_init(void)
1576{
1577	/* Force our mdiobus driver module to be loaded first. */
1578	octeon_mdiobus_force_mod_depencency();
1579	return platform_driver_register(&octeon_mgmt_driver);
1580}
1581
1582static void __exit octeon_mgmt_mod_exit(void)
1583{
1584	platform_driver_unregister(&octeon_mgmt_driver);
1585}
1586
1587module_init(octeon_mgmt_mod_init);
1588module_exit(octeon_mgmt_mod_exit);
1589
 
1590MODULE_DESCRIPTION(DRV_DESCRIPTION);
1591MODULE_AUTHOR("David Daney");
1592MODULE_LICENSE("GPL");
1593MODULE_VERSION(DRV_VERSION);