Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: cxgb2.c                                                             *
   4 * $Revision: 1.25 $                                                         *
   5 * $Date: 2005/06/22 00:43:25 $                                              *
   6 * Description:                                                              *
   7 *  Chelsio 10Gb Ethernet Driver.                                            *
   8 *                                                                           *
   9 * This program is free software; you can redistribute it and/or modify      *
  10 * it under the terms of the GNU General Public License, version 2, as       *
  11 * published by the Free Software Foundation.                                *
  12 *                                                                           *
  13 * You should have received a copy of the GNU General Public License along   *
  14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
  15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
  16 *                                                                           *
  17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  20 *                                                                           *
  21 * http://www.chelsio.com                                                    *
  22 *                                                                           *
  23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  24 * All rights reserved.                                                      *
  25 *                                                                           *
  26 * Maintainers: maintainers@chelsio.com                                      *
  27 *                                                                           *
  28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  29 *          Tina Yang               <tainay@chelsio.com>                     *
  30 *          Felix Marti             <felix@chelsio.com>                      *
  31 *          Scott Bardone           <sbardone@chelsio.com>                   *
  32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  33 *          Frank DiMambro          <frank@chelsio.com>                      *
  34 *                                                                           *
  35 * History:                                                                  *
  36 *                                                                           *
  37 ****************************************************************************/
  38
  39#include "common.h"
  40#include <linux/module.h>
  41#include <linux/init.h>
  42#include <linux/pci.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/if_vlan.h>
  46#include <linux/mii.h>
  47#include <linux/sockios.h>
  48#include <linux/dma-mapping.h>
  49#include <asm/uaccess.h>
  50
  51#include "cpl5_cmd.h"
  52#include "regs.h"
  53#include "gmac.h"
  54#include "cphy.h"
  55#include "sge.h"
  56#include "tp.h"
  57#include "espi.h"
  58#include "elmer0.h"
  59
  60#include <linux/workqueue.h>
  61
  62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
  63{
  64	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
  65}
  66
  67static inline void cancel_mac_stats_update(struct adapter *ap)
  68{
  69	cancel_delayed_work(&ap->stats_update_task);
  70}
  71
  72#define MAX_CMDQ_ENTRIES	16384
  73#define MAX_CMDQ1_ENTRIES	1024
  74#define MAX_RX_BUFFERS		16384
  75#define MAX_RX_JUMBO_BUFFERS	16384
  76#define MAX_TX_BUFFERS_HIGH	16384U
  77#define MAX_TX_BUFFERS_LOW	1536U
  78#define MAX_TX_BUFFERS		1460U
  79#define MIN_FL_ENTRIES		32
  80
  81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  82			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  83			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  84
  85/*
  86 * The EEPROM is actually bigger but only the first few bytes are used so we
  87 * only report those.
  88 */
  89#define EEPROM_SIZE 32
  90
  91MODULE_DESCRIPTION(DRV_DESCRIPTION);
  92MODULE_AUTHOR("Chelsio Communications");
  93MODULE_LICENSE("GPL");
  94
  95static int dflt_msg_enable = DFLT_MSG_ENABLE;
  96
  97module_param(dflt_msg_enable, int, 0);
  98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
  99
 100#define HCLOCK 0x0
 101#define LCLOCK 0x1
 102
 103/* T1 cards powersave mode */
 104static int t1_clock(struct adapter *adapter, int mode);
 105static int t1powersave = 1;	/* HW default is powersave mode. */
 106
 107module_param(t1powersave, int, 0);
 108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
 109
 110static int disable_msi = 0;
 111module_param(disable_msi, int, 0);
 112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 113
 114static const char pci_speed[][4] = {
 115	"33", "66", "100", "133"
 116};
 117
 118/*
 119 * Setup MAC to receive the types of packets we want.
 120 */
 121static void t1_set_rxmode(struct net_device *dev)
 122{
 123	struct adapter *adapter = dev->ml_priv;
 124	struct cmac *mac = adapter->port[dev->if_port].mac;
 125	struct t1_rx_mode rm;
 126
 127	rm.dev = dev;
 128	mac->ops->set_rx_mode(mac, &rm);
 129}
 130
 131static void link_report(struct port_info *p)
 132{
 133	if (!netif_carrier_ok(p->dev))
 134		printk(KERN_INFO "%s: link down\n", p->dev->name);
 135	else {
 136		const char *s = "10Mbps";
 137
 138		switch (p->link_config.speed) {
 139			case SPEED_10000: s = "10Gbps"; break;
 140			case SPEED_1000:  s = "1000Mbps"; break;
 141			case SPEED_100:   s = "100Mbps"; break;
 142		}
 143
 144		printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 145		       p->dev->name, s,
 146		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 147	}
 148}
 149
 150void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 151			int speed, int duplex, int pause)
 152{
 153	struct port_info *p = &adapter->port[port_id];
 154
 155	if (link_stat != netif_carrier_ok(p->dev)) {
 156		if (link_stat)
 157			netif_carrier_on(p->dev);
 158		else
 159			netif_carrier_off(p->dev);
 160		link_report(p);
 161
 162		/* multi-ports: inform toe */
 163		if ((speed > 0) && (adapter->params.nports > 1)) {
 164			unsigned int sched_speed = 10;
 165			switch (speed) {
 166			case SPEED_1000:
 167				sched_speed = 1000;
 168				break;
 169			case SPEED_100:
 170				sched_speed = 100;
 171				break;
 172			case SPEED_10:
 173				sched_speed = 10;
 174				break;
 175			}
 176			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
 177		}
 178	}
 179}
 180
 181static void link_start(struct port_info *p)
 182{
 183	struct cmac *mac = p->mac;
 184
 185	mac->ops->reset(mac);
 186	if (mac->ops->macaddress_set)
 187		mac->ops->macaddress_set(mac, p->dev->dev_addr);
 188	t1_set_rxmode(p->dev);
 189	t1_link_start(p->phy, mac, &p->link_config);
 190	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 191}
 192
 193static void enable_hw_csum(struct adapter *adapter)
 194{
 195	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
 196		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
 197	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 198}
 199
 200/*
 201 * Things to do upon first use of a card.
 202 * This must run with the rtnl lock held.
 203 */
 204static int cxgb_up(struct adapter *adapter)
 205{
 206	int err = 0;
 207
 208	if (!(adapter->flags & FULL_INIT_DONE)) {
 209		err = t1_init_hw_modules(adapter);
 210		if (err)
 211			goto out_err;
 212
 213		enable_hw_csum(adapter);
 214		adapter->flags |= FULL_INIT_DONE;
 215	}
 216
 217	t1_interrupts_clear(adapter);
 218
 219	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
 220	err = request_irq(adapter->pdev->irq, t1_interrupt,
 221			  adapter->params.has_msi ? 0 : IRQF_SHARED,
 222			  adapter->name, adapter);
 223	if (err) {
 224		if (adapter->params.has_msi)
 225			pci_disable_msi(adapter->pdev);
 226
 227		goto out_err;
 228	}
 229
 230	t1_sge_start(adapter->sge);
 231	t1_interrupts_enable(adapter);
 232out_err:
 233	return err;
 234}
 235
 236/*
 237 * Release resources when all the ports have been stopped.
 238 */
 239static void cxgb_down(struct adapter *adapter)
 240{
 241	t1_sge_stop(adapter->sge);
 242	t1_interrupts_disable(adapter);
 243	free_irq(adapter->pdev->irq, adapter);
 244	if (adapter->params.has_msi)
 245		pci_disable_msi(adapter->pdev);
 246}
 247
 248static int cxgb_open(struct net_device *dev)
 249{
 250	int err;
 251	struct adapter *adapter = dev->ml_priv;
 252	int other_ports = adapter->open_device_map & PORT_MASK;
 253
 254	napi_enable(&adapter->napi);
 255	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
 256		napi_disable(&adapter->napi);
 257		return err;
 258	}
 259
 260	__set_bit(dev->if_port, &adapter->open_device_map);
 261	link_start(&adapter->port[dev->if_port]);
 262	netif_start_queue(dev);
 263	if (!other_ports && adapter->params.stats_update_period)
 264		schedule_mac_stats_update(adapter,
 265					  adapter->params.stats_update_period);
 266
 267	t1_vlan_mode(adapter, dev->features);
 268	return 0;
 269}
 270
 271static int cxgb_close(struct net_device *dev)
 272{
 273	struct adapter *adapter = dev->ml_priv;
 274	struct port_info *p = &adapter->port[dev->if_port];
 275	struct cmac *mac = p->mac;
 276
 277	netif_stop_queue(dev);
 278	napi_disable(&adapter->napi);
 279	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 280	netif_carrier_off(dev);
 281
 282	clear_bit(dev->if_port, &adapter->open_device_map);
 283	if (adapter->params.stats_update_period &&
 284	    !(adapter->open_device_map & PORT_MASK)) {
 285		/* Stop statistics accumulation. */
 286		smp_mb__after_clear_bit();
 287		spin_lock(&adapter->work_lock);   /* sync with update task */
 288		spin_unlock(&adapter->work_lock);
 289		cancel_mac_stats_update(adapter);
 290	}
 291
 292	if (!adapter->open_device_map)
 293		cxgb_down(adapter);
 294	return 0;
 295}
 296
 297static struct net_device_stats *t1_get_stats(struct net_device *dev)
 298{
 299	struct adapter *adapter = dev->ml_priv;
 300	struct port_info *p = &adapter->port[dev->if_port];
 301	struct net_device_stats *ns = &p->netstats;
 302	const struct cmac_statistics *pstats;
 303
 304	/* Do a full update of the MAC stats */
 305	pstats = p->mac->ops->statistics_update(p->mac,
 306						MAC_STATS_UPDATE_FULL);
 307
 308	ns->tx_packets = pstats->TxUnicastFramesOK +
 309		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
 310
 311	ns->rx_packets = pstats->RxUnicastFramesOK +
 312		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
 313
 314	ns->tx_bytes = pstats->TxOctetsOK;
 315	ns->rx_bytes = pstats->RxOctetsOK;
 316
 317	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
 318		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
 319	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
 320		pstats->RxFCSErrors + pstats->RxAlignErrors +
 321		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
 322		pstats->RxSymbolErrors + pstats->RxRuntErrors;
 323
 324	ns->multicast  = pstats->RxMulticastFramesOK;
 325	ns->collisions = pstats->TxTotalCollisions;
 326
 327	/* detailed rx_errors */
 328	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
 329		pstats->RxJabberErrors;
 330	ns->rx_over_errors   = 0;
 331	ns->rx_crc_errors    = pstats->RxFCSErrors;
 332	ns->rx_frame_errors  = pstats->RxAlignErrors;
 333	ns->rx_fifo_errors   = 0;
 334	ns->rx_missed_errors = 0;
 335
 336	/* detailed tx_errors */
 337	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
 338	ns->tx_carrier_errors   = 0;
 339	ns->tx_fifo_errors      = pstats->TxUnderrun;
 340	ns->tx_heartbeat_errors = 0;
 341	ns->tx_window_errors    = pstats->TxLateCollisions;
 342	return ns;
 343}
 344
 345static u32 get_msglevel(struct net_device *dev)
 346{
 347	struct adapter *adapter = dev->ml_priv;
 348
 349	return adapter->msg_enable;
 350}
 351
 352static void set_msglevel(struct net_device *dev, u32 val)
 353{
 354	struct adapter *adapter = dev->ml_priv;
 355
 356	adapter->msg_enable = val;
 357}
 358
 359static char stats_strings[][ETH_GSTRING_LEN] = {
 360	"TxOctetsOK",
 361	"TxOctetsBad",
 362	"TxUnicastFramesOK",
 363	"TxMulticastFramesOK",
 364	"TxBroadcastFramesOK",
 365	"TxPauseFrames",
 366	"TxFramesWithDeferredXmissions",
 367	"TxLateCollisions",
 368	"TxTotalCollisions",
 369	"TxFramesAbortedDueToXSCollisions",
 370	"TxUnderrun",
 371	"TxLengthErrors",
 372	"TxInternalMACXmitError",
 373	"TxFramesWithExcessiveDeferral",
 374	"TxFCSErrors",
 375	"TxJumboFramesOk",
 376	"TxJumboOctetsOk",
 377	
 378	"RxOctetsOK",
 379	"RxOctetsBad",
 380	"RxUnicastFramesOK",
 381	"RxMulticastFramesOK",
 382	"RxBroadcastFramesOK",
 383	"RxPauseFrames",
 384	"RxFCSErrors",
 385	"RxAlignErrors",
 386	"RxSymbolErrors",
 387	"RxDataErrors",
 388	"RxSequenceErrors",
 389	"RxRuntErrors",
 390	"RxJabberErrors",
 391	"RxInternalMACRcvError",
 392	"RxInRangeLengthErrors",
 393	"RxOutOfRangeLengthField",
 394	"RxFrameTooLongErrors",
 395	"RxJumboFramesOk",
 396	"RxJumboOctetsOk",
 397
 398	/* Port stats */
 399	"RxCsumGood",
 400	"TxCsumOffload",
 401	"TxTso",
 402	"RxVlan",
 403	"TxVlan",
 404	"TxNeedHeadroom", 
 405	
 406	/* Interrupt stats */
 407	"rx drops",
 408	"pure_rsps",
 409	"unhandled irqs",
 410	"respQ_empty",
 411	"respQ_overflow",
 412	"freelistQ_empty",
 413	"pkt_too_big",
 414	"pkt_mismatch",
 415	"cmdQ_full0",
 416	"cmdQ_full1",
 417
 418	"espi_DIP2ParityErr",
 419	"espi_DIP4Err",
 420	"espi_RxDrops",
 421	"espi_TxDrops",
 422	"espi_RxOvfl",
 423	"espi_ParityErr"
 424};
 425
 426#define T2_REGMAP_SIZE (3 * 1024)
 427
 428static int get_regs_len(struct net_device *dev)
 429{
 430	return T2_REGMAP_SIZE;
 431}
 432
 433static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 434{
 435	struct adapter *adapter = dev->ml_priv;
 436
 437	strcpy(info->driver, DRV_NAME);
 438	strcpy(info->version, DRV_VERSION);
 439	strcpy(info->fw_version, "N/A");
 440	strcpy(info->bus_info, pci_name(adapter->pdev));
 441}
 442
 443static int get_sset_count(struct net_device *dev, int sset)
 444{
 445	switch (sset) {
 446	case ETH_SS_STATS:
 447		return ARRAY_SIZE(stats_strings);
 448	default:
 449		return -EOPNOTSUPP;
 450	}
 451}
 452
 453static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 454{
 455	if (stringset == ETH_SS_STATS)
 456		memcpy(data, stats_strings, sizeof(stats_strings));
 457}
 458
 459static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 460		      u64 *data)
 461{
 462	struct adapter *adapter = dev->ml_priv;
 463	struct cmac *mac = adapter->port[dev->if_port].mac;
 464	const struct cmac_statistics *s;
 465	const struct sge_intr_counts *t;
 466	struct sge_port_stats ss;
 467
 468	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 469	t = t1_sge_get_intr_counts(adapter->sge);
 470	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 471
 472	*data++ = s->TxOctetsOK;
 473	*data++ = s->TxOctetsBad;
 474	*data++ = s->TxUnicastFramesOK;
 475	*data++ = s->TxMulticastFramesOK;
 476	*data++ = s->TxBroadcastFramesOK;
 477	*data++ = s->TxPauseFrames;
 478	*data++ = s->TxFramesWithDeferredXmissions;
 479	*data++ = s->TxLateCollisions;
 480	*data++ = s->TxTotalCollisions;
 481	*data++ = s->TxFramesAbortedDueToXSCollisions;
 482	*data++ = s->TxUnderrun;
 483	*data++ = s->TxLengthErrors;
 484	*data++ = s->TxInternalMACXmitError;
 485	*data++ = s->TxFramesWithExcessiveDeferral;
 486	*data++ = s->TxFCSErrors;
 487	*data++ = s->TxJumboFramesOK;
 488	*data++ = s->TxJumboOctetsOK;
 489
 490	*data++ = s->RxOctetsOK;
 491	*data++ = s->RxOctetsBad;
 492	*data++ = s->RxUnicastFramesOK;
 493	*data++ = s->RxMulticastFramesOK;
 494	*data++ = s->RxBroadcastFramesOK;
 495	*data++ = s->RxPauseFrames;
 496	*data++ = s->RxFCSErrors;
 497	*data++ = s->RxAlignErrors;
 498	*data++ = s->RxSymbolErrors;
 499	*data++ = s->RxDataErrors;
 500	*data++ = s->RxSequenceErrors;
 501	*data++ = s->RxRuntErrors;
 502	*data++ = s->RxJabberErrors;
 503	*data++ = s->RxInternalMACRcvError;
 504	*data++ = s->RxInRangeLengthErrors;
 505	*data++ = s->RxOutOfRangeLengthField;
 506	*data++ = s->RxFrameTooLongErrors;
 507	*data++ = s->RxJumboFramesOK;
 508	*data++ = s->RxJumboOctetsOK;
 509
 510	*data++ = ss.rx_cso_good;
 511	*data++ = ss.tx_cso;
 512	*data++ = ss.tx_tso;
 513	*data++ = ss.vlan_xtract;
 514	*data++ = ss.vlan_insert;
 515	*data++ = ss.tx_need_hdrroom;
 516	
 517	*data++ = t->rx_drops;
 518	*data++ = t->pure_rsps;
 519	*data++ = t->unhandled_irqs;
 520	*data++ = t->respQ_empty;
 521	*data++ = t->respQ_overflow;
 522	*data++ = t->freelistQ_empty;
 523	*data++ = t->pkt_too_big;
 524	*data++ = t->pkt_mismatch;
 525	*data++ = t->cmdQ_full[0];
 526	*data++ = t->cmdQ_full[1];
 527
 528	if (adapter->espi) {
 529		const struct espi_intr_counts *e;
 530
 531		e = t1_espi_get_intr_counts(adapter->espi);
 532		*data++ = e->DIP2_parity_err;
 533		*data++ = e->DIP4_err;
 534		*data++ = e->rx_drops;
 535		*data++ = e->tx_drops;
 536		*data++ = e->rx_ovflw;
 537		*data++ = e->parity_err;
 538	}
 539}
 540
 541static inline void reg_block_dump(struct adapter *ap, void *buf,
 542				  unsigned int start, unsigned int end)
 543{
 544	u32 *p = buf + start;
 545
 546	for ( ; start <= end; start += sizeof(u32))
 547		*p++ = readl(ap->regs + start);
 548}
 549
 550static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 551		     void *buf)
 552{
 553	struct adapter *ap = dev->ml_priv;
 554
 555	/*
 556	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
 557	 */
 558	regs->version = 2;
 559
 560	memset(buf, 0, T2_REGMAP_SIZE);
 561	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
 562	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
 563	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
 564	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
 565	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
 566	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
 567	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
 568	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
 569	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
 570	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 571}
 572
 573static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 574{
 575	struct adapter *adapter = dev->ml_priv;
 576	struct port_info *p = &adapter->port[dev->if_port];
 577
 578	cmd->supported = p->link_config.supported;
 579	cmd->advertising = p->link_config.advertising;
 580
 581	if (netif_carrier_ok(dev)) {
 582		ethtool_cmd_speed_set(cmd, p->link_config.speed);
 583		cmd->duplex = p->link_config.duplex;
 584	} else {
 585		ethtool_cmd_speed_set(cmd, -1);
 586		cmd->duplex = -1;
 587	}
 588
 589	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 590	cmd->phy_address = p->phy->mdio.prtad;
 591	cmd->transceiver = XCVR_EXTERNAL;
 592	cmd->autoneg = p->link_config.autoneg;
 593	cmd->maxtxpkt = 0;
 594	cmd->maxrxpkt = 0;
 595	return 0;
 596}
 597
 598static int speed_duplex_to_caps(int speed, int duplex)
 599{
 600	int cap = 0;
 601
 602	switch (speed) {
 603	case SPEED_10:
 604		if (duplex == DUPLEX_FULL)
 605			cap = SUPPORTED_10baseT_Full;
 606		else
 607			cap = SUPPORTED_10baseT_Half;
 608		break;
 609	case SPEED_100:
 610		if (duplex == DUPLEX_FULL)
 611			cap = SUPPORTED_100baseT_Full;
 612		else
 613			cap = SUPPORTED_100baseT_Half;
 614		break;
 615	case SPEED_1000:
 616		if (duplex == DUPLEX_FULL)
 617			cap = SUPPORTED_1000baseT_Full;
 618		else
 619			cap = SUPPORTED_1000baseT_Half;
 620		break;
 621	case SPEED_10000:
 622		if (duplex == DUPLEX_FULL)
 623			cap = SUPPORTED_10000baseT_Full;
 624	}
 625	return cap;
 626}
 627
 628#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 629		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 630		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 631		      ADVERTISED_10000baseT_Full)
 632
 633static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 634{
 635	struct adapter *adapter = dev->ml_priv;
 636	struct port_info *p = &adapter->port[dev->if_port];
 637	struct link_config *lc = &p->link_config;
 638
 639	if (!(lc->supported & SUPPORTED_Autoneg))
 640		return -EOPNOTSUPP;             /* can't change speed/duplex */
 641
 642	if (cmd->autoneg == AUTONEG_DISABLE) {
 643		u32 speed = ethtool_cmd_speed(cmd);
 644		int cap = speed_duplex_to_caps(speed, cmd->duplex);
 645
 646		if (!(lc->supported & cap) || (speed == SPEED_1000))
 647			return -EINVAL;
 648		lc->requested_speed = speed;
 649		lc->requested_duplex = cmd->duplex;
 650		lc->advertising = 0;
 651	} else {
 652		cmd->advertising &= ADVERTISED_MASK;
 653		if (cmd->advertising & (cmd->advertising - 1))
 654			cmd->advertising = lc->supported;
 655		cmd->advertising &= lc->supported;
 656		if (!cmd->advertising)
 657			return -EINVAL;
 658		lc->requested_speed = SPEED_INVALID;
 659		lc->requested_duplex = DUPLEX_INVALID;
 660		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 661	}
 662	lc->autoneg = cmd->autoneg;
 663	if (netif_running(dev))
 664		t1_link_start(p->phy, p->mac, lc);
 665	return 0;
 666}
 667
 668static void get_pauseparam(struct net_device *dev,
 669			   struct ethtool_pauseparam *epause)
 670{
 671	struct adapter *adapter = dev->ml_priv;
 672	struct port_info *p = &adapter->port[dev->if_port];
 673
 674	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 675	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 676	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 677}
 678
 679static int set_pauseparam(struct net_device *dev,
 680			  struct ethtool_pauseparam *epause)
 681{
 682	struct adapter *adapter = dev->ml_priv;
 683	struct port_info *p = &adapter->port[dev->if_port];
 684	struct link_config *lc = &p->link_config;
 685
 686	if (epause->autoneg == AUTONEG_DISABLE)
 687		lc->requested_fc = 0;
 688	else if (lc->supported & SUPPORTED_Autoneg)
 689		lc->requested_fc = PAUSE_AUTONEG;
 690	else
 691		return -EINVAL;
 692
 693	if (epause->rx_pause)
 694		lc->requested_fc |= PAUSE_RX;
 695	if (epause->tx_pause)
 696		lc->requested_fc |= PAUSE_TX;
 697	if (lc->autoneg == AUTONEG_ENABLE) {
 698		if (netif_running(dev))
 699			t1_link_start(p->phy, p->mac, lc);
 700	} else {
 701		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 702		if (netif_running(dev))
 703			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
 704							 lc->fc);
 705	}
 706	return 0;
 707}
 708
 709static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 710{
 711	struct adapter *adapter = dev->ml_priv;
 712	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 713
 714	e->rx_max_pending = MAX_RX_BUFFERS;
 715	e->rx_mini_max_pending = 0;
 716	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 717	e->tx_max_pending = MAX_CMDQ_ENTRIES;
 718
 719	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
 720	e->rx_mini_pending = 0;
 721	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
 722	e->tx_pending = adapter->params.sge.cmdQ_size[0];
 723}
 724
 725static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 726{
 727	struct adapter *adapter = dev->ml_priv;
 728	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 729
 730	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
 731	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 732	    e->tx_pending > MAX_CMDQ_ENTRIES ||
 733	    e->rx_pending < MIN_FL_ENTRIES ||
 734	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 735	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
 736		return -EINVAL;
 737
 738	if (adapter->flags & FULL_INIT_DONE)
 739		return -EBUSY;
 740
 741	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 742	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 743	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
 744	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
 745		MAX_CMDQ1_ENTRIES : e->tx_pending;
 746	return 0;
 747}
 748
 749static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 750{
 751	struct adapter *adapter = dev->ml_priv;
 752
 753	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 754	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 755	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 756	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 757	return 0;
 758}
 759
 760static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 761{
 762	struct adapter *adapter = dev->ml_priv;
 763
 764	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
 765	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
 766	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
 767	return 0;
 768}
 769
 770static int get_eeprom_len(struct net_device *dev)
 771{
 772	struct adapter *adapter = dev->ml_priv;
 773
 774	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 775}
 776
 777#define EEPROM_MAGIC(ap) \
 778	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
 779
 780static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 781		      u8 *data)
 782{
 783	int i;
 784	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
 785	struct adapter *adapter = dev->ml_priv;
 786
 787	e->magic = EEPROM_MAGIC(adapter);
 788	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
 789		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
 790	memcpy(data, buf + e->offset, e->len);
 791	return 0;
 792}
 793
 794static const struct ethtool_ops t1_ethtool_ops = {
 795	.get_settings      = get_settings,
 796	.set_settings      = set_settings,
 797	.get_drvinfo       = get_drvinfo,
 798	.get_msglevel      = get_msglevel,
 799	.set_msglevel      = set_msglevel,
 800	.get_ringparam     = get_sge_param,
 801	.set_ringparam     = set_sge_param,
 802	.get_coalesce      = get_coalesce,
 803	.set_coalesce      = set_coalesce,
 804	.get_eeprom_len    = get_eeprom_len,
 805	.get_eeprom        = get_eeprom,
 806	.get_pauseparam    = get_pauseparam,
 807	.set_pauseparam    = set_pauseparam,
 808	.get_link          = ethtool_op_get_link,
 809	.get_strings       = get_strings,
 810	.get_sset_count	   = get_sset_count,
 811	.get_ethtool_stats = get_stats,
 812	.get_regs_len      = get_regs_len,
 813	.get_regs          = get_regs,
 814};
 815
 816static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 817{
 818	struct adapter *adapter = dev->ml_priv;
 819	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
 820
 821	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
 822}
 823
 824static int t1_change_mtu(struct net_device *dev, int new_mtu)
 825{
 826	int ret;
 827	struct adapter *adapter = dev->ml_priv;
 828	struct cmac *mac = adapter->port[dev->if_port].mac;
 829
 830	if (!mac->ops->set_mtu)
 831		return -EOPNOTSUPP;
 832	if (new_mtu < 68)
 833		return -EINVAL;
 834	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 835		return ret;
 836	dev->mtu = new_mtu;
 837	return 0;
 838}
 839
 840static int t1_set_mac_addr(struct net_device *dev, void *p)
 841{
 842	struct adapter *adapter = dev->ml_priv;
 843	struct cmac *mac = adapter->port[dev->if_port].mac;
 844	struct sockaddr *addr = p;
 845
 846	if (!mac->ops->macaddress_set)
 847		return -EOPNOTSUPP;
 848
 849	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 850	mac->ops->macaddress_set(mac, dev->dev_addr);
 851	return 0;
 852}
 853
 854static u32 t1_fix_features(struct net_device *dev, u32 features)
 855{
 856	/*
 857	 * Since there is no support for separate rx/tx vlan accel
 858	 * enable/disable make sure tx flag is always in same state as rx.
 859	 */
 860	if (features & NETIF_F_HW_VLAN_RX)
 861		features |= NETIF_F_HW_VLAN_TX;
 862	else
 863		features &= ~NETIF_F_HW_VLAN_TX;
 864
 865	return features;
 866}
 867
 868static int t1_set_features(struct net_device *dev, u32 features)
 869{
 870	u32 changed = dev->features ^ features;
 871	struct adapter *adapter = dev->ml_priv;
 872
 873	if (changed & NETIF_F_HW_VLAN_RX)
 874		t1_vlan_mode(adapter, features);
 875
 876	return 0;
 877}
 878#ifdef CONFIG_NET_POLL_CONTROLLER
 879static void t1_netpoll(struct net_device *dev)
 880{
 881	unsigned long flags;
 882	struct adapter *adapter = dev->ml_priv;
 883
 884	local_irq_save(flags);
 885	t1_interrupt(adapter->pdev->irq, adapter);
 886	local_irq_restore(flags);
 887}
 888#endif
 889
 890/*
 891 * Periodic accumulation of MAC statistics.  This is used only if the MAC
 892 * does not have any other way to prevent stats counter overflow.
 893 */
 894static void mac_stats_task(struct work_struct *work)
 895{
 896	int i;
 897	struct adapter *adapter =
 898		container_of(work, struct adapter, stats_update_task.work);
 899
 900	for_each_port(adapter, i) {
 901		struct port_info *p = &adapter->port[i];
 902
 903		if (netif_running(p->dev))
 904			p->mac->ops->statistics_update(p->mac,
 905						       MAC_STATS_UPDATE_FAST);
 906	}
 907
 908	/* Schedule the next statistics update if any port is active. */
 909	spin_lock(&adapter->work_lock);
 910	if (adapter->open_device_map & PORT_MASK)
 911		schedule_mac_stats_update(adapter,
 912					  adapter->params.stats_update_period);
 913	spin_unlock(&adapter->work_lock);
 914}
 915
 916/*
 917 * Processes elmer0 external interrupts in process context.
 918 */
 919static void ext_intr_task(struct work_struct *work)
 920{
 921	struct adapter *adapter =
 922		container_of(work, struct adapter, ext_intr_handler_task);
 923
 924	t1_elmer0_ext_intr_handler(adapter);
 925
 926	/* Now reenable external interrupts */
 927	spin_lock_irq(&adapter->async_lock);
 928	adapter->slow_intr_mask |= F_PL_INTR_EXT;
 929	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
 930	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 931		   adapter->regs + A_PL_ENABLE);
 932	spin_unlock_irq(&adapter->async_lock);
 933}
 934
 935/*
 936 * Interrupt-context handler for elmer0 external interrupts.
 937 */
 938void t1_elmer0_ext_intr(struct adapter *adapter)
 939{
 940	/*
 941	 * Schedule a task to handle external interrupts as we require
 942	 * a process context.  We disable EXT interrupts in the interim
 943	 * and let the task reenable them when it's done.
 944	 */
 945	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
 946	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 947		   adapter->regs + A_PL_ENABLE);
 948	schedule_work(&adapter->ext_intr_handler_task);
 949}
 950
 951void t1_fatal_err(struct adapter *adapter)
 952{
 953	if (adapter->flags & FULL_INIT_DONE) {
 954		t1_sge_stop(adapter->sge);
 955		t1_interrupts_disable(adapter);
 956	}
 957	pr_alert("%s: encountered fatal error, operation suspended\n",
 958		 adapter->name);
 959}
 960
 961static const struct net_device_ops cxgb_netdev_ops = {
 962	.ndo_open		= cxgb_open,
 963	.ndo_stop		= cxgb_close,
 964	.ndo_start_xmit		= t1_start_xmit,
 965	.ndo_get_stats		= t1_get_stats,
 966	.ndo_validate_addr	= eth_validate_addr,
 967	.ndo_set_multicast_list	= t1_set_rxmode,
 968	.ndo_do_ioctl		= t1_ioctl,
 969	.ndo_change_mtu		= t1_change_mtu,
 970	.ndo_set_mac_address	= t1_set_mac_addr,
 971	.ndo_fix_features	= t1_fix_features,
 972	.ndo_set_features	= t1_set_features,
 973#ifdef CONFIG_NET_POLL_CONTROLLER
 974	.ndo_poll_controller	= t1_netpoll,
 975#endif
 976};
 977
 978static int __devinit init_one(struct pci_dev *pdev,
 979			      const struct pci_device_id *ent)
 980{
 981	static int version_printed;
 982
 983	int i, err, pci_using_dac = 0;
 984	unsigned long mmio_start, mmio_len;
 985	const struct board_info *bi;
 986	struct adapter *adapter = NULL;
 987	struct port_info *pi;
 988
 989	if (!version_printed) {
 990		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
 991		       DRV_VERSION);
 992		++version_printed;
 993	}
 994
 995	err = pci_enable_device(pdev);
 996	if (err)
 997		return err;
 998
 999	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1000		pr_err("%s: cannot find PCI device memory base address\n",
1001		       pci_name(pdev));
1002		err = -ENODEV;
1003		goto out_disable_pdev;
1004	}
1005
1006	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1007		pci_using_dac = 1;
1008
1009		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1010			pr_err("%s: unable to obtain 64-bit DMA for "
1011			       "consistent allocations\n", pci_name(pdev));
1012			err = -ENODEV;
1013			goto out_disable_pdev;
1014		}
1015
1016	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1017		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1018		goto out_disable_pdev;
1019	}
1020
1021	err = pci_request_regions(pdev, DRV_NAME);
1022	if (err) {
1023		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1024		goto out_disable_pdev;
1025	}
1026
1027	pci_set_master(pdev);
1028
1029	mmio_start = pci_resource_start(pdev, 0);
1030	mmio_len = pci_resource_len(pdev, 0);
1031	bi = t1_get_board_info(ent->driver_data);
1032
1033	for (i = 0; i < bi->port_number; ++i) {
1034		struct net_device *netdev;
1035
1036		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1037		if (!netdev) {
1038			err = -ENOMEM;
1039			goto out_free_dev;
1040		}
1041
1042		SET_NETDEV_DEV(netdev, &pdev->dev);
1043
1044		if (!adapter) {
1045			adapter = netdev_priv(netdev);
1046			adapter->pdev = pdev;
1047			adapter->port[0].dev = netdev;  /* so we don't leak it */
1048
1049			adapter->regs = ioremap(mmio_start, mmio_len);
1050			if (!adapter->regs) {
1051				pr_err("%s: cannot map device registers\n",
1052				       pci_name(pdev));
1053				err = -ENOMEM;
1054				goto out_free_dev;
1055			}
1056
1057			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1058				err = -ENODEV;	  /* Can't handle this chip rev */
1059				goto out_free_dev;
1060			}
1061
1062			adapter->name = pci_name(pdev);
1063			adapter->msg_enable = dflt_msg_enable;
1064			adapter->mmio_len = mmio_len;
1065
1066			spin_lock_init(&adapter->tpi_lock);
1067			spin_lock_init(&adapter->work_lock);
1068			spin_lock_init(&adapter->async_lock);
1069			spin_lock_init(&adapter->mac_lock);
1070
1071			INIT_WORK(&adapter->ext_intr_handler_task,
1072				  ext_intr_task);
1073			INIT_DELAYED_WORK(&adapter->stats_update_task,
1074					  mac_stats_task);
1075
1076			pci_set_drvdata(pdev, netdev);
1077		}
1078
1079		pi = &adapter->port[i];
1080		pi->dev = netdev;
1081		netif_carrier_off(netdev);
1082		netdev->irq = pdev->irq;
1083		netdev->if_port = i;
1084		netdev->mem_start = mmio_start;
1085		netdev->mem_end = mmio_start + mmio_len - 1;
1086		netdev->ml_priv = adapter;
1087		netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1088			NETIF_F_RXCSUM;
1089		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1090			NETIF_F_RXCSUM | NETIF_F_LLTX;
1091
1092		if (pci_using_dac)
1093			netdev->features |= NETIF_F_HIGHDMA;
1094		if (vlan_tso_capable(adapter)) {
1095			netdev->features |=
1096				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1097			netdev->hw_features |= NETIF_F_HW_VLAN_RX;
1098
1099			/* T204: disable TSO */
1100			if (!(is_T2(adapter)) || bi->port_number != 4) {
1101				netdev->hw_features |= NETIF_F_TSO;
1102				netdev->features |= NETIF_F_TSO;
1103			}
1104		}
1105
1106		netdev->netdev_ops = &cxgb_netdev_ops;
1107		netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1108			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1109
1110		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1111
1112		SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1113	}
1114
1115	if (t1_init_sw_modules(adapter, bi) < 0) {
1116		err = -ENODEV;
1117		goto out_free_dev;
1118	}
1119
1120	/*
1121	 * The card is now ready to go.  If any errors occur during device
1122	 * registration we do not fail the whole card but rather proceed only
1123	 * with the ports we manage to register successfully.  However we must
1124	 * register at least one net device.
1125	 */
1126	for (i = 0; i < bi->port_number; ++i) {
1127		err = register_netdev(adapter->port[i].dev);
1128		if (err)
1129			pr_warning("%s: cannot register net device %s, skipping\n",
1130				   pci_name(pdev), adapter->port[i].dev->name);
1131		else {
1132			/*
1133			 * Change the name we use for messages to the name of
1134			 * the first successfully registered interface.
1135			 */
1136			if (!adapter->registered_device_map)
1137				adapter->name = adapter->port[i].dev->name;
1138
1139			__set_bit(i, &adapter->registered_device_map);
1140		}
1141	}
1142	if (!adapter->registered_device_map) {
1143		pr_err("%s: could not register any net devices\n",
1144		       pci_name(pdev));
1145		goto out_release_adapter_res;
1146	}
1147
1148	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1149	       bi->desc, adapter->params.chip_revision,
1150	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1151	       adapter->params.pci.speed, adapter->params.pci.width);
1152
1153	/*
1154	 * Set the T1B ASIC and memory clocks.
1155	 */
1156	if (t1powersave)
1157		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1158	else
1159		adapter->t1powersave = HCLOCK;
1160	if (t1_is_T1B(adapter))
1161		t1_clock(adapter, t1powersave);
1162
1163	return 0;
1164
1165out_release_adapter_res:
1166	t1_free_sw_modules(adapter);
1167out_free_dev:
1168	if (adapter) {
1169		if (adapter->regs)
1170			iounmap(adapter->regs);
1171		for (i = bi->port_number - 1; i >= 0; --i)
1172			if (adapter->port[i].dev)
1173				free_netdev(adapter->port[i].dev);
1174	}
1175	pci_release_regions(pdev);
1176out_disable_pdev:
1177	pci_disable_device(pdev);
1178	pci_set_drvdata(pdev, NULL);
1179	return err;
1180}
1181
1182static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1183{
1184	int data;
1185	int i;
1186	u32 val;
1187
1188	enum {
1189		S_CLOCK = 1 << 3,
1190		S_DATA = 1 << 4
1191	};
1192
1193	for (i = (nbits - 1); i > -1; i--) {
1194
1195		udelay(50);
1196
1197		data = ((bitdata >> i) & 0x1);
1198		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1199
1200		if (data)
1201			val |= S_DATA;
1202		else
1203			val &= ~S_DATA;
1204
1205		udelay(50);
1206
1207		/* Set SCLOCK low */
1208		val &= ~S_CLOCK;
1209		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1210
1211		udelay(50);
1212
1213		/* Write SCLOCK high */
1214		val |= S_CLOCK;
1215		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1216
1217	}
1218}
1219
1220static int t1_clock(struct adapter *adapter, int mode)
1221{
1222	u32 val;
1223	int M_CORE_VAL;
1224	int M_MEM_VAL;
1225
1226	enum {
1227		M_CORE_BITS	= 9,
1228		T_CORE_VAL	= 0,
1229		T_CORE_BITS	= 2,
1230		N_CORE_VAL	= 0,
1231		N_CORE_BITS	= 2,
1232		M_MEM_BITS	= 9,
1233		T_MEM_VAL	= 0,
1234		T_MEM_BITS	= 2,
1235		N_MEM_VAL	= 0,
1236		N_MEM_BITS	= 2,
1237		NP_LOAD		= 1 << 17,
1238		S_LOAD_MEM	= 1 << 5,
1239		S_LOAD_CORE	= 1 << 6,
1240		S_CLOCK		= 1 << 3
1241	};
1242
1243	if (!t1_is_T1B(adapter))
1244		return -ENODEV;	/* Can't re-clock this chip. */
1245
1246	if (mode & 2)
1247		return 0;	/* show current mode. */
1248
1249	if ((adapter->t1powersave & 1) == (mode & 1))
1250		return -EALREADY;	/* ASIC already running in mode. */
1251
1252	if ((mode & 1) == HCLOCK) {
1253		M_CORE_VAL = 0x14;
1254		M_MEM_VAL = 0x18;
1255		adapter->t1powersave = HCLOCK;	/* overclock */
1256	} else {
1257		M_CORE_VAL = 0xe;
1258		M_MEM_VAL = 0x10;
1259		adapter->t1powersave = LCLOCK;	/* underclock */
1260	}
1261
1262	/* Don't interrupt this serial stream! */
1263	spin_lock(&adapter->tpi_lock);
1264
1265	/* Initialize for ASIC core */
1266	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1267	val |= NP_LOAD;
1268	udelay(50);
1269	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1270	udelay(50);
1271	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1272	val &= ~S_LOAD_CORE;
1273	val &= ~S_CLOCK;
1274	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1275	udelay(50);
1276
1277	/* Serial program the ASIC clock synthesizer */
1278	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1279	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1280	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1281	udelay(50);
1282
1283	/* Finish ASIC core */
1284	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1285	val |= S_LOAD_CORE;
1286	udelay(50);
1287	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1288	udelay(50);
1289	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1290	val &= ~S_LOAD_CORE;
1291	udelay(50);
1292	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1293	udelay(50);
1294
1295	/* Initialize for memory */
1296	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1297	val |= NP_LOAD;
1298	udelay(50);
1299	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1300	udelay(50);
1301	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1302	val &= ~S_LOAD_MEM;
1303	val &= ~S_CLOCK;
1304	udelay(50);
1305	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1306	udelay(50);
1307
1308	/* Serial program the memory clock synthesizer */
1309	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1310	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1311	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1312	udelay(50);
1313
1314	/* Finish memory */
1315	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316	val |= S_LOAD_MEM;
1317	udelay(50);
1318	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319	udelay(50);
1320	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1321	val &= ~S_LOAD_MEM;
1322	udelay(50);
1323	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1324
1325	spin_unlock(&adapter->tpi_lock);
1326
1327	return 0;
1328}
1329
1330static inline void t1_sw_reset(struct pci_dev *pdev)
1331{
1332	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1333	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1334}
1335
1336static void __devexit remove_one(struct pci_dev *pdev)
1337{
1338	struct net_device *dev = pci_get_drvdata(pdev);
1339	struct adapter *adapter = dev->ml_priv;
1340	int i;
1341
1342	for_each_port(adapter, i) {
1343		if (test_bit(i, &adapter->registered_device_map))
1344			unregister_netdev(adapter->port[i].dev);
1345	}
1346
1347	t1_free_sw_modules(adapter);
1348	iounmap(adapter->regs);
1349
1350	while (--i >= 0) {
1351		if (adapter->port[i].dev)
1352			free_netdev(adapter->port[i].dev);
1353	}
1354
1355	pci_release_regions(pdev);
1356	pci_disable_device(pdev);
1357	pci_set_drvdata(pdev, NULL);
1358	t1_sw_reset(pdev);
1359}
1360
1361static struct pci_driver driver = {
1362	.name     = DRV_NAME,
1363	.id_table = t1_pci_tbl,
1364	.probe    = init_one,
1365	.remove   = __devexit_p(remove_one),
1366};
1367
1368static int __init t1_init_module(void)
1369{
1370	return pci_register_driver(&driver);
1371}
1372
1373static void __exit t1_cleanup_module(void)
1374{
1375	pci_unregister_driver(&driver);
1376}
1377
1378module_init(t1_init_module);
1379module_exit(t1_cleanup_module);