Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2009 Cavium Networks
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/interrupt.h>
  14#include <linux/platform_device.h>
  15#include <linux/netdevice.h>
  16#include <linux/etherdevice.h>
  17#include <linux/if.h>
  18#include <linux/if_vlan.h>
  19#include <linux/slab.h>
  20#include <linux/phy.h>
  21#include <linux/spinlock.h>
  22
  23#include <asm/octeon/octeon.h>
  24#include <asm/octeon/cvmx-mixx-defs.h>
  25#include <asm/octeon/cvmx-agl-defs.h>
  26
  27#define DRV_NAME "octeon_mgmt"
  28#define DRV_VERSION "2.0"
  29#define DRV_DESCRIPTION \
  30	"Cavium Networks Octeon MII (management) port Network Driver"
  31
  32#define OCTEON_MGMT_NAPI_WEIGHT 16
  33
  34/*
  35 * Ring sizes that are powers of two allow for more efficient modulo
  36 * opertions.
  37 */
  38#define OCTEON_MGMT_RX_RING_SIZE 512
  39#define OCTEON_MGMT_TX_RING_SIZE 128
  40
  41/* Allow 8 bytes for vlan and FCS. */
  42#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
  43
  44union mgmt_port_ring_entry {
  45	u64 d64;
  46	struct {
  47		u64    reserved_62_63:2;
  48		/* Length of the buffer/packet in bytes */
  49		u64    len:14;
  50		/* For TX, signals that the packet should be timestamped */
  51		u64    tstamp:1;
  52		/* The RX error code */
  53		u64    code:7;
  54#define RING_ENTRY_CODE_DONE 0xf
  55#define RING_ENTRY_CODE_MORE 0x10
  56		/* Physical address of the buffer */
  57		u64    addr:40;
  58	} s;
  59};
  60
  61struct octeon_mgmt {
  62	struct net_device *netdev;
  63	int port;
  64	int irq;
  65	u64 *tx_ring;
  66	dma_addr_t tx_ring_handle;
  67	unsigned int tx_next;
  68	unsigned int tx_next_clean;
  69	unsigned int tx_current_fill;
  70	/* The tx_list lock also protects the ring related variables */
  71	struct sk_buff_head tx_list;
  72
  73	/* RX variables only touched in napi_poll.  No locking necessary. */
  74	u64 *rx_ring;
  75	dma_addr_t rx_ring_handle;
  76	unsigned int rx_next;
  77	unsigned int rx_next_fill;
  78	unsigned int rx_current_fill;
  79	struct sk_buff_head rx_list;
  80
  81	spinlock_t lock;
  82	unsigned int last_duplex;
  83	unsigned int last_link;
  84	struct device *dev;
  85	struct napi_struct napi;
  86	struct tasklet_struct tx_clean_tasklet;
  87	struct phy_device *phydev;
  88};
  89
  90static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
  91{
  92	int port = p->port;
  93	union cvmx_mixx_intena mix_intena;
  94	unsigned long flags;
  95
  96	spin_lock_irqsave(&p->lock, flags);
  97	mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
  98	mix_intena.s.ithena = enable ? 1 : 0;
  99	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
 100	spin_unlock_irqrestore(&p->lock, flags);
 101}
 102
 103static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
 104{
 105	int port = p->port;
 106	union cvmx_mixx_intena mix_intena;
 107	unsigned long flags;
 108
 109	spin_lock_irqsave(&p->lock, flags);
 110	mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
 111	mix_intena.s.othena = enable ? 1 : 0;
 112	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
 113	spin_unlock_irqrestore(&p->lock, flags);
 114}
 115
 116static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
 117{
 118	octeon_mgmt_set_rx_irq(p, 1);
 119}
 120
 121static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
 122{
 123	octeon_mgmt_set_rx_irq(p, 0);
 124}
 125
 126static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
 127{
 128	octeon_mgmt_set_tx_irq(p, 1);
 129}
 130
 131static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
 132{
 133	octeon_mgmt_set_tx_irq(p, 0);
 134}
 135
 136static unsigned int ring_max_fill(unsigned int ring_size)
 137{
 138	return ring_size - 8;
 139}
 140
 141static unsigned int ring_size_to_bytes(unsigned int ring_size)
 142{
 143	return ring_size * sizeof(union mgmt_port_ring_entry);
 144}
 145
 146static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
 147{
 148	struct octeon_mgmt *p = netdev_priv(netdev);
 149	int port = p->port;
 150
 151	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
 152		unsigned int size;
 153		union mgmt_port_ring_entry re;
 154		struct sk_buff *skb;
 155
 156		/* CN56XX pass 1 needs 8 bytes of padding.  */
 157		size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
 158
 159		skb = netdev_alloc_skb(netdev, size);
 160		if (!skb)
 161			break;
 162		skb_reserve(skb, NET_IP_ALIGN);
 163		__skb_queue_tail(&p->rx_list, skb);
 164
 165		re.d64 = 0;
 166		re.s.len = size;
 167		re.s.addr = dma_map_single(p->dev, skb->data,
 168					   size,
 169					   DMA_FROM_DEVICE);
 170
 171		/* Put it in the ring.  */
 172		p->rx_ring[p->rx_next_fill] = re.d64;
 173		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
 174					   ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 175					   DMA_BIDIRECTIONAL);
 176		p->rx_next_fill =
 177			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
 178		p->rx_current_fill++;
 179		/* Ring the bell.  */
 180		cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
 181	}
 182}
 183
 184static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 185{
 186	int port = p->port;
 187	union cvmx_mixx_orcnt mix_orcnt;
 188	union mgmt_port_ring_entry re;
 189	struct sk_buff *skb;
 190	int cleaned = 0;
 191	unsigned long flags;
 192
 193	mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
 194	while (mix_orcnt.s.orcnt) {
 195		spin_lock_irqsave(&p->tx_list.lock, flags);
 196
 197		mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
 198
 199		if (mix_orcnt.s.orcnt == 0) {
 200			spin_unlock_irqrestore(&p->tx_list.lock, flags);
 201			break;
 202		}
 203
 204		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
 205					ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 206					DMA_BIDIRECTIONAL);
 207
 208		re.d64 = p->tx_ring[p->tx_next_clean];
 209		p->tx_next_clean =
 210			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
 211		skb = __skb_dequeue(&p->tx_list);
 212
 213		mix_orcnt.u64 = 0;
 214		mix_orcnt.s.orcnt = 1;
 215
 216		/* Acknowledge to hardware that we have the buffer.  */
 217		cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
 218		p->tx_current_fill--;
 219
 220		spin_unlock_irqrestore(&p->tx_list.lock, flags);
 221
 222		dma_unmap_single(p->dev, re.s.addr, re.s.len,
 223				 DMA_TO_DEVICE);
 224		dev_kfree_skb_any(skb);
 225		cleaned++;
 226
 227		mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
 228	}
 229
 230	if (cleaned && netif_queue_stopped(p->netdev))
 231		netif_wake_queue(p->netdev);
 232}
 233
 234static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
 235{
 236	struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
 237	octeon_mgmt_clean_tx_buffers(p);
 238	octeon_mgmt_enable_tx_irq(p);
 239}
 240
 241static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
 242{
 243	struct octeon_mgmt *p = netdev_priv(netdev);
 244	int port = p->port;
 245	unsigned long flags;
 246	u64 drop, bad;
 247
 248	/* These reads also clear the count registers.  */
 249	drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
 250	bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
 251
 252	if (drop || bad) {
 253		/* Do an atomic update. */
 254		spin_lock_irqsave(&p->lock, flags);
 255		netdev->stats.rx_errors += bad;
 256		netdev->stats.rx_dropped += drop;
 257		spin_unlock_irqrestore(&p->lock, flags);
 258	}
 259}
 260
 261static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
 262{
 263	struct octeon_mgmt *p = netdev_priv(netdev);
 264	int port = p->port;
 265	unsigned long flags;
 266
 267	union cvmx_agl_gmx_txx_stat0 s0;
 268	union cvmx_agl_gmx_txx_stat1 s1;
 269
 270	/* These reads also clear the count registers.  */
 271	s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
 272	s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
 273
 274	if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
 275		/* Do an atomic update. */
 276		spin_lock_irqsave(&p->lock, flags);
 277		netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
 278		netdev->stats.collisions += s1.s.scol + s1.s.mcol;
 279		spin_unlock_irqrestore(&p->lock, flags);
 280	}
 281}
 282
 283/*
 284 * Dequeue a receive skb and its corresponding ring entry.  The ring
 285 * entry is returned, *pskb is updated to point to the skb.
 286 */
 287static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
 288					 struct sk_buff **pskb)
 289{
 290	union mgmt_port_ring_entry re;
 291
 292	dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
 293				ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 294				DMA_BIDIRECTIONAL);
 295
 296	re.d64 = p->rx_ring[p->rx_next];
 297	p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
 298	p->rx_current_fill--;
 299	*pskb = __skb_dequeue(&p->rx_list);
 300
 301	dma_unmap_single(p->dev, re.s.addr,
 302			 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
 303			 DMA_FROM_DEVICE);
 304
 305	return re.d64;
 306}
 307
 308
 309static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
 310{
 311	int port = p->port;
 312	struct net_device *netdev = p->netdev;
 313	union cvmx_mixx_ircnt mix_ircnt;
 314	union mgmt_port_ring_entry re;
 315	struct sk_buff *skb;
 316	struct sk_buff *skb2;
 317	struct sk_buff *skb_new;
 318	union mgmt_port_ring_entry re2;
 319	int rc = 1;
 320
 321
 322	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
 323	if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
 324		/* A good packet, send it up. */
 325		skb_put(skb, re.s.len);
 326good:
 327		skb->protocol = eth_type_trans(skb, netdev);
 328		netdev->stats.rx_packets++;
 329		netdev->stats.rx_bytes += skb->len;
 330		netif_receive_skb(skb);
 331		rc = 0;
 332	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
 333		/*
 334		 * Packet split across skbs.  This can happen if we
 335		 * increase the MTU.  Buffers that are already in the
 336		 * rx ring can then end up being too small.  As the rx
 337		 * ring is refilled, buffers sized for the new MTU
 338		 * will be used and we should go back to the normal
 339		 * non-split case.
 340		 */
 341		skb_put(skb, re.s.len);
 342		do {
 343			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 344			if (re2.s.code != RING_ENTRY_CODE_MORE
 345				&& re2.s.code != RING_ENTRY_CODE_DONE)
 346				goto split_error;
 347			skb_put(skb2,  re2.s.len);
 348			skb_new = skb_copy_expand(skb, 0, skb2->len,
 349						  GFP_ATOMIC);
 350			if (!skb_new)
 351				goto split_error;
 352			if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
 353					  skb2->len))
 354				goto split_error;
 355			skb_put(skb_new, skb2->len);
 356			dev_kfree_skb_any(skb);
 357			dev_kfree_skb_any(skb2);
 358			skb = skb_new;
 359		} while (re2.s.code == RING_ENTRY_CODE_MORE);
 360		goto good;
 361	} else {
 362		/* Some other error, discard it. */
 363		dev_kfree_skb_any(skb);
 364		/*
 365		 * Error statistics are accumulated in
 366		 * octeon_mgmt_update_rx_stats.
 367		 */
 368	}
 369	goto done;
 370split_error:
 371	/* Discard the whole mess. */
 372	dev_kfree_skb_any(skb);
 373	dev_kfree_skb_any(skb2);
 374	while (re2.s.code == RING_ENTRY_CODE_MORE) {
 375		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
 376		dev_kfree_skb_any(skb2);
 377	}
 378	netdev->stats.rx_errors++;
 379
 380done:
 381	/* Tell the hardware we processed a packet.  */
 382	mix_ircnt.u64 = 0;
 383	mix_ircnt.s.ircnt = 1;
 384	cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
 385	return rc;
 386}
 387
 388static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
 389{
 390	int port = p->port;
 391	unsigned int work_done = 0;
 392	union cvmx_mixx_ircnt mix_ircnt;
 393	int rc;
 394
 395	mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
 396	while (work_done < budget && mix_ircnt.s.ircnt) {
 397
 398		rc = octeon_mgmt_receive_one(p);
 399		if (!rc)
 400			work_done++;
 401
 402		/* Check for more packets. */
 403		mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
 404	}
 405
 406	octeon_mgmt_rx_fill_ring(p->netdev);
 407
 408	return work_done;
 409}
 410
 411static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
 412{
 413	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
 414	struct net_device *netdev = p->netdev;
 415	unsigned int work_done = 0;
 416
 417	work_done = octeon_mgmt_receive_packets(p, budget);
 418
 419	if (work_done < budget) {
 420		/* We stopped because no more packets were available. */
 421		napi_complete(napi);
 422		octeon_mgmt_enable_rx_irq(p);
 423	}
 424	octeon_mgmt_update_rx_stats(netdev);
 425
 426	return work_done;
 427}
 428
 429/* Reset the hardware to clean state.  */
 430static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
 431{
 432	union cvmx_mixx_ctl mix_ctl;
 433	union cvmx_mixx_bist mix_bist;
 434	union cvmx_agl_gmx_bist agl_gmx_bist;
 435
 436	mix_ctl.u64 = 0;
 437	cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
 438	do {
 439		mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
 440	} while (mix_ctl.s.busy);
 441	mix_ctl.s.reset = 1;
 442	cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
 443	cvmx_read_csr(CVMX_MIXX_CTL(p->port));
 444	cvmx_wait(64);
 445
 446	mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
 447	if (mix_bist.u64)
 448		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
 449			(unsigned long long)mix_bist.u64);
 450
 451	agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
 452	if (agl_gmx_bist.u64)
 453		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
 454			 (unsigned long long)agl_gmx_bist.u64);
 455}
 456
 457struct octeon_mgmt_cam_state {
 458	u64 cam[6];
 459	u64 cam_mask;
 460	int cam_index;
 461};
 462
 463static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
 464				      unsigned char *addr)
 465{
 466	int i;
 467
 468	for (i = 0; i < 6; i++)
 469		cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
 470	cs->cam_mask |= (1ULL << cs->cam_index);
 471	cs->cam_index++;
 472}
 473
 474static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
 475{
 476	struct octeon_mgmt *p = netdev_priv(netdev);
 477	int port = p->port;
 478	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
 479	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
 480	unsigned long flags;
 481	unsigned int prev_packet_enable;
 482	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
 483	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
 484	struct octeon_mgmt_cam_state cam_state;
 485	struct netdev_hw_addr *ha;
 486	int available_cam_entries;
 487
 488	memset(&cam_state, 0, sizeof(cam_state));
 489
 490	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
 491		cam_mode = 0;
 492		available_cam_entries = 8;
 493	} else {
 494		/*
 495		 * One CAM entry for the primary address, leaves seven
 496		 * for the secondary addresses.
 497		 */
 498		available_cam_entries = 7 - netdev->uc.count;
 499	}
 500
 501	if (netdev->flags & IFF_MULTICAST) {
 502		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
 503		    netdev_mc_count(netdev) > available_cam_entries)
 504			multicast_mode = 2; /* 2 - Accept all multicast.  */
 505		else
 506			multicast_mode = 0; /* 0 - Use CAM.  */
 507	}
 508
 509	if (cam_mode == 1) {
 510		/* Add primary address. */
 511		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
 512		netdev_for_each_uc_addr(ha, netdev)
 513			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 514	}
 515	if (multicast_mode == 0) {
 516		netdev_for_each_mc_addr(ha, netdev)
 517			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
 518	}
 519
 520	spin_lock_irqsave(&p->lock, flags);
 521
 522	/* Disable packet I/O. */
 523	agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
 524	prev_packet_enable = agl_gmx_prtx.s.en;
 525	agl_gmx_prtx.s.en = 0;
 526	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
 527
 528	adr_ctl.u64 = 0;
 529	adr_ctl.s.cam_mode = cam_mode;
 530	adr_ctl.s.mcst = multicast_mode;
 531	adr_ctl.s.bcst = 1;     /* Allow broadcast */
 532
 533	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
 534
 535	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
 536	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
 537	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
 538	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
 539	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
 540	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
 541	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
 542
 543	/* Restore packet I/O. */
 544	agl_gmx_prtx.s.en = prev_packet_enable;
 545	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
 546
 547	spin_unlock_irqrestore(&p->lock, flags);
 548}
 549
 550static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 551{
 552	struct sockaddr *sa = addr;
 553
 554	if (!is_valid_ether_addr(sa->sa_data))
 555		return -EADDRNOTAVAIL;
 556
 557	memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
 558
 559	octeon_mgmt_set_rx_filtering(netdev);
 560
 561	return 0;
 562}
 563
 564static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 565{
 566	struct octeon_mgmt *p = netdev_priv(netdev);
 567	int port = p->port;
 568	int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
 569
 570	/*
 571	 * Limit the MTU to make sure the ethernet packets are between
 572	 * 64 bytes and 16383 bytes.
 573	 */
 574	if (size_without_fcs < 64 || size_without_fcs > 16383) {
 575		dev_warn(p->dev, "MTU must be between %d and %d.\n",
 576			 64 - OCTEON_MGMT_RX_HEADROOM,
 577			 16383 - OCTEON_MGMT_RX_HEADROOM);
 578		return -EINVAL;
 579	}
 580
 581	netdev->mtu = new_mtu;
 582
 583	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
 584	cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
 585		       (size_without_fcs + 7) & 0xfff8);
 586
 587	return 0;
 588}
 589
 590static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
 591{
 592	struct net_device *netdev = dev_id;
 593	struct octeon_mgmt *p = netdev_priv(netdev);
 594	int port = p->port;
 595	union cvmx_mixx_isr mixx_isr;
 596
 597	mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
 598
 599	/* Clear any pending interrupts */
 600	cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
 601	cvmx_read_csr(CVMX_MIXX_ISR(port));
 602
 603	if (mixx_isr.s.irthresh) {
 604		octeon_mgmt_disable_rx_irq(p);
 605		napi_schedule(&p->napi);
 606	}
 607	if (mixx_isr.s.orthresh) {
 608		octeon_mgmt_disable_tx_irq(p);
 609		tasklet_schedule(&p->tx_clean_tasklet);
 610	}
 611
 612	return IRQ_HANDLED;
 613}
 614
 615static int octeon_mgmt_ioctl(struct net_device *netdev,
 616			     struct ifreq *rq, int cmd)
 617{
 618	struct octeon_mgmt *p = netdev_priv(netdev);
 619
 620	if (!netif_running(netdev))
 621		return -EINVAL;
 622
 623	if (!p->phydev)
 624		return -EINVAL;
 625
 626	return phy_mii_ioctl(p->phydev, rq, cmd);
 627}
 628
 629static void octeon_mgmt_adjust_link(struct net_device *netdev)
 630{
 631	struct octeon_mgmt *p = netdev_priv(netdev);
 632	int port = p->port;
 633	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 634	unsigned long flags;
 635	int link_changed = 0;
 636
 637	spin_lock_irqsave(&p->lock, flags);
 638	if (p->phydev->link) {
 639		if (!p->last_link)
 640			link_changed = 1;
 641		if (p->last_duplex != p->phydev->duplex) {
 642			p->last_duplex = p->phydev->duplex;
 643			prtx_cfg.u64 =
 644				cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
 645			prtx_cfg.s.duplex = p->phydev->duplex;
 646			cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
 647				       prtx_cfg.u64);
 648		}
 649	} else {
 650		if (p->last_link)
 651			link_changed = -1;
 652	}
 653	p->last_link = p->phydev->link;
 654	spin_unlock_irqrestore(&p->lock, flags);
 655
 656	if (link_changed != 0) {
 657		if (link_changed > 0) {
 658			netif_carrier_on(netdev);
 659			pr_info("%s: Link is up - %d/%s\n", netdev->name,
 660				p->phydev->speed,
 661				DUPLEX_FULL == p->phydev->duplex ?
 662				"Full" : "Half");
 663		} else {
 664			netif_carrier_off(netdev);
 665			pr_info("%s: Link is down\n", netdev->name);
 666		}
 667	}
 668}
 669
 670static int octeon_mgmt_init_phy(struct net_device *netdev)
 671{
 672	struct octeon_mgmt *p = netdev_priv(netdev);
 673	char phy_id[MII_BUS_ID_SIZE + 3];
 674
 675	if (octeon_is_simulation()) {
 676		/* No PHYs in the simulator. */
 677		netif_carrier_on(netdev);
 678		return 0;
 679	}
 680
 681	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port);
 682
 683	p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
 684				PHY_INTERFACE_MODE_MII);
 685
 686	if (IS_ERR(p->phydev)) {
 687		p->phydev = NULL;
 688		return -1;
 689	}
 690
 691	phy_start_aneg(p->phydev);
 692
 693	return 0;
 694}
 695
 696static int octeon_mgmt_open(struct net_device *netdev)
 697{
 698	struct octeon_mgmt *p = netdev_priv(netdev);
 699	int port = p->port;
 700	union cvmx_mixx_ctl mix_ctl;
 701	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
 702	union cvmx_mixx_oring1 oring1;
 703	union cvmx_mixx_iring1 iring1;
 704	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
 705	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
 706	union cvmx_mixx_irhwm mix_irhwm;
 707	union cvmx_mixx_orhwm mix_orhwm;
 708	union cvmx_mixx_intena mix_intena;
 709	struct sockaddr sa;
 710
 711	/* Allocate ring buffers.  */
 712	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 713			     GFP_KERNEL);
 714	if (!p->tx_ring)
 715		return -ENOMEM;
 716	p->tx_ring_handle =
 717		dma_map_single(p->dev, p->tx_ring,
 718			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 719			       DMA_BIDIRECTIONAL);
 720	p->tx_next = 0;
 721	p->tx_next_clean = 0;
 722	p->tx_current_fill = 0;
 723
 724
 725	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 726			     GFP_KERNEL);
 727	if (!p->rx_ring)
 728		goto err_nomem;
 729	p->rx_ring_handle =
 730		dma_map_single(p->dev, p->rx_ring,
 731			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 732			       DMA_BIDIRECTIONAL);
 733
 734	p->rx_next = 0;
 735	p->rx_next_fill = 0;
 736	p->rx_current_fill = 0;
 737
 738	octeon_mgmt_reset_hw(p);
 739
 740	mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
 741
 742	/* Bring it out of reset if needed. */
 743	if (mix_ctl.s.reset) {
 744		mix_ctl.s.reset = 0;
 745		cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
 746		do {
 747			mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
 748		} while (mix_ctl.s.reset);
 749	}
 750
 751	agl_gmx_inf_mode.u64 = 0;
 752	agl_gmx_inf_mode.s.en = 1;
 753	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
 754
 755	oring1.u64 = 0;
 756	oring1.s.obase = p->tx_ring_handle >> 3;
 757	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
 758	cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
 759
 760	iring1.u64 = 0;
 761	iring1.s.ibase = p->rx_ring_handle >> 3;
 762	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
 763	cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
 764
 765	/* Disable packet I/O. */
 766	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
 767	prtx_cfg.s.en = 0;
 768	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
 769
 770	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
 771	octeon_mgmt_set_mac_address(netdev, &sa);
 772
 773	octeon_mgmt_change_mtu(netdev, netdev->mtu);
 774
 775	/*
 776	 * Enable the port HW. Packets are not allowed until
 777	 * cvmx_mgmt_port_enable() is called.
 778	 */
 779	mix_ctl.u64 = 0;
 780	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
 781	mix_ctl.s.en = 1;           /* Enable the port */
 782	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
 783	/* MII CB-request FIFO programmable high watermark */
 784	mix_ctl.s.mrq_hwm = 1;
 785	cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
 786
 787	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
 788	    || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
 789		/*
 790		 * Force compensation values, as they are not
 791		 * determined properly by HW
 792		 */
 793		union cvmx_agl_gmx_drv_ctl drv_ctl;
 794
 795		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
 796		if (port) {
 797			drv_ctl.s.byp_en1 = 1;
 798			drv_ctl.s.nctl1 = 6;
 799			drv_ctl.s.pctl1 = 6;
 800		} else {
 801			drv_ctl.s.byp_en = 1;
 802			drv_ctl.s.nctl = 6;
 803			drv_ctl.s.pctl = 6;
 804		}
 805		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
 806	}
 807
 808	octeon_mgmt_rx_fill_ring(netdev);
 809
 810	/* Clear statistics. */
 811	/* Clear on read. */
 812	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
 813	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
 814	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
 815
 816	cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
 817	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
 818	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
 819
 820	/* Clear any pending interrupts */
 821	cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
 822
 823	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
 824			netdev)) {
 825		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
 826		goto err_noirq;
 827	}
 828
 829	/* Interrupt every single RX packet */
 830	mix_irhwm.u64 = 0;
 831	mix_irhwm.s.irhwm = 0;
 832	cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
 833
 834	/* Interrupt when we have 1 or more packets to clean.  */
 835	mix_orhwm.u64 = 0;
 836	mix_orhwm.s.orhwm = 1;
 837	cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
 838
 839	/* Enable receive and transmit interrupts */
 840	mix_intena.u64 = 0;
 841	mix_intena.s.ithena = 1;
 842	mix_intena.s.othena = 1;
 843	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
 844
 845
 846	/* Enable packet I/O. */
 847
 848	rxx_frm_ctl.u64 = 0;
 849	rxx_frm_ctl.s.pre_align = 1;
 850	/*
 851	 * When set, disables the length check for non-min sized pkts
 852	 * with padding in the client data.
 853	 */
 854	rxx_frm_ctl.s.pad_len = 1;
 855	/* When set, disables the length check for VLAN pkts */
 856	rxx_frm_ctl.s.vlan_len = 1;
 857	/* When set, PREAMBLE checking is  less strict */
 858	rxx_frm_ctl.s.pre_free = 1;
 859	/* Control Pause Frames can match station SMAC */
 860	rxx_frm_ctl.s.ctl_smac = 0;
 861	/* Control Pause Frames can match globally assign Multicast address */
 862	rxx_frm_ctl.s.ctl_mcst = 1;
 863	/* Forward pause information to TX block */
 864	rxx_frm_ctl.s.ctl_bck = 1;
 865	/* Drop Control Pause Frames */
 866	rxx_frm_ctl.s.ctl_drp = 1;
 867	/* Strip off the preamble */
 868	rxx_frm_ctl.s.pre_strp = 1;
 869	/*
 870	 * This port is configured to send PREAMBLE+SFD to begin every
 871	 * frame.  GMX checks that the PREAMBLE is sent correctly.
 872	 */
 873	rxx_frm_ctl.s.pre_chk = 1;
 874	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
 875
 876	/* Enable the AGL block */
 877	agl_gmx_inf_mode.u64 = 0;
 878	agl_gmx_inf_mode.s.en = 1;
 879	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
 880
 881	/* Configure the port duplex and enables */
 882	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
 883	prtx_cfg.s.tx_en = 1;
 884	prtx_cfg.s.rx_en = 1;
 885	prtx_cfg.s.en = 1;
 886	p->last_duplex = 1;
 887	prtx_cfg.s.duplex = p->last_duplex;
 888	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
 889
 890	p->last_link = 0;
 891	netif_carrier_off(netdev);
 892
 893	if (octeon_mgmt_init_phy(netdev)) {
 894		dev_err(p->dev, "Cannot initialize PHY.\n");
 895		goto err_noirq;
 896	}
 897
 898	netif_wake_queue(netdev);
 899	napi_enable(&p->napi);
 900
 901	return 0;
 902err_noirq:
 903	octeon_mgmt_reset_hw(p);
 904	dma_unmap_single(p->dev, p->rx_ring_handle,
 905			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 906			 DMA_BIDIRECTIONAL);
 907	kfree(p->rx_ring);
 908err_nomem:
 909	dma_unmap_single(p->dev, p->tx_ring_handle,
 910			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 911			 DMA_BIDIRECTIONAL);
 912	kfree(p->tx_ring);
 913	return -ENOMEM;
 914}
 915
 916static int octeon_mgmt_stop(struct net_device *netdev)
 917{
 918	struct octeon_mgmt *p = netdev_priv(netdev);
 919
 920	napi_disable(&p->napi);
 921	netif_stop_queue(netdev);
 922
 923	if (p->phydev)
 924		phy_disconnect(p->phydev);
 925
 926	netif_carrier_off(netdev);
 927
 928	octeon_mgmt_reset_hw(p);
 929
 930	free_irq(p->irq, netdev);
 931
 932	/* dma_unmap is a nop on Octeon, so just free everything.  */
 933	skb_queue_purge(&p->tx_list);
 934	skb_queue_purge(&p->rx_list);
 935
 936	dma_unmap_single(p->dev, p->rx_ring_handle,
 937			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
 938			 DMA_BIDIRECTIONAL);
 939	kfree(p->rx_ring);
 940
 941	dma_unmap_single(p->dev, p->tx_ring_handle,
 942			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 943			 DMA_BIDIRECTIONAL);
 944	kfree(p->tx_ring);
 945
 946	return 0;
 947}
 948
 949static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
 950{
 951	struct octeon_mgmt *p = netdev_priv(netdev);
 952	int port = p->port;
 953	union mgmt_port_ring_entry re;
 954	unsigned long flags;
 955	int rv = NETDEV_TX_BUSY;
 956
 957	re.d64 = 0;
 958	re.s.len = skb->len;
 959	re.s.addr = dma_map_single(p->dev, skb->data,
 960				   skb->len,
 961				   DMA_TO_DEVICE);
 962
 963	spin_lock_irqsave(&p->tx_list.lock, flags);
 964
 965	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
 966		spin_unlock_irqrestore(&p->tx_list.lock, flags);
 967		netif_stop_queue(netdev);
 968		spin_lock_irqsave(&p->tx_list.lock, flags);
 969	}
 970
 971	if (unlikely(p->tx_current_fill >=
 972		     ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
 973		spin_unlock_irqrestore(&p->tx_list.lock, flags);
 974		dma_unmap_single(p->dev, re.s.addr, re.s.len,
 975				 DMA_TO_DEVICE);
 976		goto out;
 977	}
 978
 979	__skb_queue_tail(&p->tx_list, skb);
 980
 981	/* Put it in the ring.  */
 982	p->tx_ring[p->tx_next] = re.d64;
 983	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
 984	p->tx_current_fill++;
 985
 986	spin_unlock_irqrestore(&p->tx_list.lock, flags);
 987
 988	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
 989				   ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
 990				   DMA_BIDIRECTIONAL);
 991
 992	netdev->stats.tx_packets++;
 993	netdev->stats.tx_bytes += skb->len;
 994
 995	/* Ring the bell.  */
 996	cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
 997
 998	rv = NETDEV_TX_OK;
 999out:
1000	octeon_mgmt_update_tx_stats(netdev);
1001	return rv;
1002}
1003
1004#ifdef CONFIG_NET_POLL_CONTROLLER
1005static void octeon_mgmt_poll_controller(struct net_device *netdev)
1006{
1007	struct octeon_mgmt *p = netdev_priv(netdev);
1008
1009	octeon_mgmt_receive_packets(p, 16);
1010	octeon_mgmt_update_rx_stats(netdev);
1011}
1012#endif
1013
1014static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1015				    struct ethtool_drvinfo *info)
1016{
1017	strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1018	strncpy(info->version, DRV_VERSION, sizeof(info->version));
1019	strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1020	strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1021	info->n_stats = 0;
1022	info->testinfo_len = 0;
1023	info->regdump_len = 0;
1024	info->eedump_len = 0;
1025}
1026
1027static int octeon_mgmt_get_settings(struct net_device *netdev,
1028				    struct ethtool_cmd *cmd)
1029{
1030	struct octeon_mgmt *p = netdev_priv(netdev);
1031
1032	if (p->phydev)
1033		return phy_ethtool_gset(p->phydev, cmd);
1034
1035	return -EINVAL;
1036}
1037
1038static int octeon_mgmt_set_settings(struct net_device *netdev,
1039				    struct ethtool_cmd *cmd)
1040{
1041	struct octeon_mgmt *p = netdev_priv(netdev);
1042
1043	if (!capable(CAP_NET_ADMIN))
1044		return -EPERM;
1045
1046	if (p->phydev)
1047		return phy_ethtool_sset(p->phydev, cmd);
1048
1049	return -EINVAL;
1050}
1051
1052static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1053	.get_drvinfo = octeon_mgmt_get_drvinfo,
1054	.get_link = ethtool_op_get_link,
1055	.get_settings = octeon_mgmt_get_settings,
1056	.set_settings = octeon_mgmt_set_settings
1057};
1058
1059static const struct net_device_ops octeon_mgmt_ops = {
1060	.ndo_open =			octeon_mgmt_open,
1061	.ndo_stop =			octeon_mgmt_stop,
1062	.ndo_start_xmit =		octeon_mgmt_xmit,
1063	.ndo_set_rx_mode = 		octeon_mgmt_set_rx_filtering,
1064	.ndo_set_mac_address =		octeon_mgmt_set_mac_address,
1065	.ndo_do_ioctl = 		octeon_mgmt_ioctl,
1066	.ndo_change_mtu =		octeon_mgmt_change_mtu,
1067#ifdef CONFIG_NET_POLL_CONTROLLER
1068	.ndo_poll_controller =		octeon_mgmt_poll_controller,
1069#endif
1070};
1071
1072static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1073{
1074	struct resource *res_irq;
1075	struct net_device *netdev;
1076	struct octeon_mgmt *p;
1077	int i;
1078
1079	netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1080	if (netdev == NULL)
1081		return -ENOMEM;
1082
1083	dev_set_drvdata(&pdev->dev, netdev);
1084	p = netdev_priv(netdev);
1085	netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1086		       OCTEON_MGMT_NAPI_WEIGHT);
1087
1088	p->netdev = netdev;
1089	p->dev = &pdev->dev;
1090
1091	p->port = pdev->id;
1092	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1093
1094	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1095	if (!res_irq)
1096		goto err;
1097
1098	p->irq = res_irq->start;
1099	spin_lock_init(&p->lock);
1100
1101	skb_queue_head_init(&p->tx_list);
1102	skb_queue_head_init(&p->rx_list);
1103	tasklet_init(&p->tx_clean_tasklet,
1104		     octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1105
1106	netdev->priv_flags |= IFF_UNICAST_FLT;
1107
1108	netdev->netdev_ops = &octeon_mgmt_ops;
1109	netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1110
1111	/* The mgmt ports get the first N MACs.  */
1112	for (i = 0; i < 6; i++)
1113		netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
1114	netdev->dev_addr[5] += p->port;
1115
1116	if (p->port >= octeon_bootinfo->mac_addr_count)
1117		dev_err(&pdev->dev,
1118			"Error %s: Using MAC outside of the assigned range: %pM\n",
1119			netdev->name, netdev->dev_addr);
1120
1121	if (register_netdev(netdev))
1122		goto err;
1123
1124	dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1125	return 0;
1126err:
1127	free_netdev(netdev);
1128	return -ENOENT;
1129}
1130
1131static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
1132{
1133	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1134
1135	unregister_netdev(netdev);
1136	free_netdev(netdev);
1137	return 0;
1138}
1139
1140static struct platform_driver octeon_mgmt_driver = {
1141	.driver = {
1142		.name		= "octeon_mgmt",
1143		.owner		= THIS_MODULE,
1144	},
1145	.probe		= octeon_mgmt_probe,
1146	.remove		= __devexit_p(octeon_mgmt_remove),
1147};
1148
1149extern void octeon_mdiobus_force_mod_depencency(void);
1150
1151static int __init octeon_mgmt_mod_init(void)
1152{
1153	/* Force our mdiobus driver module to be loaded first. */
1154	octeon_mdiobus_force_mod_depencency();
1155	return platform_driver_register(&octeon_mgmt_driver);
1156}
1157
1158static void __exit octeon_mgmt_mod_exit(void)
1159{
1160	platform_driver_unregister(&octeon_mgmt_driver);
1161}
1162
1163module_init(octeon_mgmt_mod_init);
1164module_exit(octeon_mgmt_mod_exit);
1165
1166MODULE_DESCRIPTION(DRV_DESCRIPTION);
1167MODULE_AUTHOR("David Daney");
1168MODULE_LICENSE("GPL");
1169MODULE_VERSION(DRV_VERSION);