Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/etherdevice.h>
  33#include "common.h"
  34#include "regs.h"
  35#include "sge_defs.h"
  36#include "firmware_exports.h"
  37
  38static void t3_port_intr_clear(struct adapter *adapter, int idx);
  39
  40/**
  41 *	t3_wait_op_done_val - wait until an operation is completed
  42 *	@adapter: the adapter performing the operation
  43 *	@reg: the register to check for completion
  44 *	@mask: a single-bit field within @reg that indicates completion
  45 *	@polarity: the value of the field when the operation is completed
  46 *	@attempts: number of check iterations
  47 *	@delay: delay in usecs between iterations
  48 *	@valp: where to store the value of the register at completion time
  49 *
  50 *	Wait until an operation is completed by checking a bit in a register
  51 *	up to @attempts times.  If @valp is not NULL the value of the register
  52 *	at the time it indicated completion is stored there.  Returns 0 if the
  53 *	operation completes and -EAGAIN otherwise.
  54 */
  55
  56int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  57			int polarity, int attempts, int delay, u32 *valp)
  58{
  59	while (1) {
  60		u32 val = t3_read_reg(adapter, reg);
  61
  62		if (!!(val & mask) == polarity) {
  63			if (valp)
  64				*valp = val;
  65			return 0;
  66		}
  67		if (--attempts == 0)
  68			return -EAGAIN;
  69		if (delay)
  70			udelay(delay);
  71	}
  72}
  73
  74/**
  75 *	t3_write_regs - write a bunch of registers
  76 *	@adapter: the adapter to program
  77 *	@p: an array of register address/register value pairs
  78 *	@n: the number of address/value pairs
  79 *	@offset: register address offset
  80 *
  81 *	Takes an array of register address/register value pairs and writes each
  82 *	value to the corresponding register.  Register addresses are adjusted
  83 *	by the supplied offset.
  84 */
  85void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
  86		   int n, unsigned int offset)
  87{
  88	while (n--) {
  89		t3_write_reg(adapter, p->reg_addr + offset, p->val);
  90		p++;
  91	}
  92}
  93
  94/**
  95 *	t3_set_reg_field - set a register field to a value
  96 *	@adapter: the adapter to program
  97 *	@addr: the register address
  98 *	@mask: specifies the portion of the register to modify
  99 *	@val: the new value for the register field
 100 *
 101 *	Sets a register field specified by the supplied mask to the
 102 *	given value.
 103 */
 104void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
 105		      u32 val)
 106{
 107	u32 v = t3_read_reg(adapter, addr) & ~mask;
 108
 109	t3_write_reg(adapter, addr, v | val);
 110	t3_read_reg(adapter, addr);	/* flush */
 111}
 112
 113/**
 114 *	t3_read_indirect - read indirectly addressed registers
 115 *	@adap: the adapter
 116 *	@addr_reg: register holding the indirect address
 117 *	@data_reg: register holding the value of the indirect register
 118 *	@vals: where the read register values are stored
 119 *	@start_idx: index of first indirect register to read
 120 *	@nregs: how many indirect registers to read
 121 *
 122 *	Reads registers that are accessed indirectly through an address/data
 123 *	register pair.
 124 */
 125static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
 126			     unsigned int data_reg, u32 *vals,
 127			     unsigned int nregs, unsigned int start_idx)
 128{
 129	while (nregs--) {
 130		t3_write_reg(adap, addr_reg, start_idx);
 131		*vals++ = t3_read_reg(adap, data_reg);
 132		start_idx++;
 133	}
 134}
 135
 136/**
 137 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
 138 *	@mc7: identifies MC7 to read from
 139 *	@start: index of first 64-bit word to read
 140 *	@n: number of 64-bit words to read
 141 *	@buf: where to store the read result
 142 *
 143 *	Read n 64-bit words from MC7 starting at word start, using backdoor
 144 *	accesses.
 145 */
 146int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
 147		   u64 *buf)
 148{
 149	static const int shift[] = { 0, 0, 16, 24 };
 150	static const int step[] = { 0, 32, 16, 8 };
 151
 152	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
 153	struct adapter *adap = mc7->adapter;
 154
 155	if (start >= size64 || start + n > size64)
 156		return -EINVAL;
 157
 158	start *= (8 << mc7->width);
 159	while (n--) {
 160		int i;
 161		u64 val64 = 0;
 162
 163		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
 164			int attempts = 10;
 165			u32 val;
 166
 167			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
 168			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
 169			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
 170			while ((val & F_BUSY) && attempts--)
 171				val = t3_read_reg(adap,
 172						  mc7->offset + A_MC7_BD_OP);
 173			if (val & F_BUSY)
 174				return -EIO;
 175
 176			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
 177			if (mc7->width == 0) {
 178				val64 = t3_read_reg(adap,
 179						    mc7->offset +
 180						    A_MC7_BD_DATA0);
 181				val64 |= (u64) val << 32;
 182			} else {
 183				if (mc7->width > 1)
 184					val >>= shift[mc7->width];
 185				val64 |= (u64) val << (step[mc7->width] * i);
 186			}
 187			start += 8;
 188		}
 189		*buf++ = val64;
 190	}
 191	return 0;
 192}
 193
 194/*
 195 * Initialize MI1.
 196 */
 197static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
 198{
 199	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
 200	u32 val = F_PREEN | V_CLKDIV(clkdiv);
 201
 202	t3_write_reg(adap, A_MI1_CFG, val);
 203}
 204
 205#define MDIO_ATTEMPTS 20
 206
 207/*
 208 * MI1 read/write operations for clause 22 PHYs.
 209 */
 210static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
 211		       u16 reg_addr)
 212{
 213	struct port_info *pi = netdev_priv(dev);
 214	struct adapter *adapter = pi->adapter;
 215	int ret;
 216	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 217
 218	mutex_lock(&adapter->mdio_lock);
 219	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 220	t3_write_reg(adapter, A_MI1_ADDR, addr);
 221	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
 222	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 223	if (!ret)
 224		ret = t3_read_reg(adapter, A_MI1_DATA);
 225	mutex_unlock(&adapter->mdio_lock);
 226	return ret;
 227}
 228
 229static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
 230			u16 reg_addr, u16 val)
 231{
 232	struct port_info *pi = netdev_priv(dev);
 233	struct adapter *adapter = pi->adapter;
 234	int ret;
 235	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 236
 237	mutex_lock(&adapter->mdio_lock);
 238	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 239	t3_write_reg(adapter, A_MI1_ADDR, addr);
 240	t3_write_reg(adapter, A_MI1_DATA, val);
 241	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 242	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 243	mutex_unlock(&adapter->mdio_lock);
 244	return ret;
 245}
 246
 247static const struct mdio_ops mi1_mdio_ops = {
 248	.read = t3_mi1_read,
 249	.write = t3_mi1_write,
 250	.mode_support = MDIO_SUPPORTS_C22
 251};
 252
 253/*
 254 * Performs the address cycle for clause 45 PHYs.
 255 * Must be called with the MDIO_LOCK held.
 256 */
 257static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
 258		       int reg_addr)
 259{
 260	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
 261
 262	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
 263	t3_write_reg(adapter, A_MI1_ADDR, addr);
 264	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
 265	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
 266	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 267			       MDIO_ATTEMPTS, 10);
 268}
 269
 270/*
 271 * MI1 read/write operations for indirect-addressed PHYs.
 272 */
 273static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
 274			u16 reg_addr)
 275{
 276	struct port_info *pi = netdev_priv(dev);
 277	struct adapter *adapter = pi->adapter;
 278	int ret;
 279
 280	mutex_lock(&adapter->mdio_lock);
 281	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 282	if (!ret) {
 283		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
 284		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 285				      MDIO_ATTEMPTS, 10);
 286		if (!ret)
 287			ret = t3_read_reg(adapter, A_MI1_DATA);
 288	}
 289	mutex_unlock(&adapter->mdio_lock);
 290	return ret;
 291}
 292
 293static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
 294			 u16 reg_addr, u16 val)
 295{
 296	struct port_info *pi = netdev_priv(dev);
 297	struct adapter *adapter = pi->adapter;
 298	int ret;
 299
 300	mutex_lock(&adapter->mdio_lock);
 301	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 302	if (!ret) {
 303		t3_write_reg(adapter, A_MI1_DATA, val);
 304		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 305		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 306				      MDIO_ATTEMPTS, 10);
 307	}
 308	mutex_unlock(&adapter->mdio_lock);
 309	return ret;
 310}
 311
 312static const struct mdio_ops mi1_mdio_ext_ops = {
 313	.read = mi1_ext_read,
 314	.write = mi1_ext_write,
 315	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
 316};
 317
 318/**
 319 *	t3_mdio_change_bits - modify the value of a PHY register
 320 *	@phy: the PHY to operate on
 321 *	@mmd: the device address
 322 *	@reg: the register address
 323 *	@clear: what part of the register value to mask off
 324 *	@set: what part of the register value to set
 325 *
 326 *	Changes the value of a PHY register by applying a mask to its current
 327 *	value and ORing the result with a new value.
 328 */
 329int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
 330			unsigned int set)
 331{
 332	int ret;
 333	unsigned int val;
 334
 335	ret = t3_mdio_read(phy, mmd, reg, &val);
 336	if (!ret) {
 337		val &= ~clear;
 338		ret = t3_mdio_write(phy, mmd, reg, val | set);
 339	}
 340	return ret;
 341}
 342
 343/**
 344 *	t3_phy_reset - reset a PHY block
 345 *	@phy: the PHY to operate on
 346 *	@mmd: the device address of the PHY block to reset
 347 *	@wait: how long to wait for the reset to complete in 1ms increments
 348 *
 349 *	Resets a PHY block and optionally waits for the reset to complete.
 350 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
 351 *	for 10G PHYs.
 352 */
 353int t3_phy_reset(struct cphy *phy, int mmd, int wait)
 354{
 355	int err;
 356	unsigned int ctl;
 357
 358	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
 359				  MDIO_CTRL1_RESET);
 360	if (err || !wait)
 361		return err;
 362
 363	do {
 364		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
 365		if (err)
 366			return err;
 367		ctl &= MDIO_CTRL1_RESET;
 368		if (ctl)
 369			msleep(1);
 370	} while (ctl && --wait);
 371
 372	return ctl ? -1 : 0;
 373}
 374
 375/**
 376 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
 377 *	@phy: the PHY to operate on
 378 *	@advert: bitmap of capabilities the PHY should advertise
 379 *
 380 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
 381 *	requested capabilities.
 382 */
 383int t3_phy_advertise(struct cphy *phy, unsigned int advert)
 384{
 385	int err;
 386	unsigned int val = 0;
 387
 388	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
 389	if (err)
 390		return err;
 391
 392	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 393	if (advert & ADVERTISED_1000baseT_Half)
 394		val |= ADVERTISE_1000HALF;
 395	if (advert & ADVERTISED_1000baseT_Full)
 396		val |= ADVERTISE_1000FULL;
 397
 398	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
 399	if (err)
 400		return err;
 401
 402	val = 1;
 403	if (advert & ADVERTISED_10baseT_Half)
 404		val |= ADVERTISE_10HALF;
 405	if (advert & ADVERTISED_10baseT_Full)
 406		val |= ADVERTISE_10FULL;
 407	if (advert & ADVERTISED_100baseT_Half)
 408		val |= ADVERTISE_100HALF;
 409	if (advert & ADVERTISED_100baseT_Full)
 410		val |= ADVERTISE_100FULL;
 411	if (advert & ADVERTISED_Pause)
 412		val |= ADVERTISE_PAUSE_CAP;
 413	if (advert & ADVERTISED_Asym_Pause)
 414		val |= ADVERTISE_PAUSE_ASYM;
 415	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 416}
 417
 418/**
 419 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
 420 *	@phy: the PHY to operate on
 421 *	@advert: bitmap of capabilities the PHY should advertise
 422 *
 423 *	Sets a fiber PHY's advertisement register to advertise the
 424 *	requested capabilities.
 425 */
 426int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
 427{
 428	unsigned int val = 0;
 429
 430	if (advert & ADVERTISED_1000baseT_Half)
 431		val |= ADVERTISE_1000XHALF;
 432	if (advert & ADVERTISED_1000baseT_Full)
 433		val |= ADVERTISE_1000XFULL;
 434	if (advert & ADVERTISED_Pause)
 435		val |= ADVERTISE_1000XPAUSE;
 436	if (advert & ADVERTISED_Asym_Pause)
 437		val |= ADVERTISE_1000XPSE_ASYM;
 438	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 439}
 440
 441/**
 442 *	t3_set_phy_speed_duplex - force PHY speed and duplex
 443 *	@phy: the PHY to operate on
 444 *	@speed: requested PHY speed
 445 *	@duplex: requested PHY duplex
 446 *
 447 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
 448 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
 449 */
 450int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
 451{
 452	int err;
 453	unsigned int ctl;
 454
 455	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
 456	if (err)
 457		return err;
 458
 459	if (speed >= 0) {
 460		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
 461		if (speed == SPEED_100)
 462			ctl |= BMCR_SPEED100;
 463		else if (speed == SPEED_1000)
 464			ctl |= BMCR_SPEED1000;
 465	}
 466	if (duplex >= 0) {
 467		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
 468		if (duplex == DUPLEX_FULL)
 469			ctl |= BMCR_FULLDPLX;
 470	}
 471	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
 472		ctl |= BMCR_ANENABLE;
 473	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
 474}
 475
 476int t3_phy_lasi_intr_enable(struct cphy *phy)
 477{
 478	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
 479			     MDIO_PMA_LASI_LSALARM);
 480}
 481
 482int t3_phy_lasi_intr_disable(struct cphy *phy)
 483{
 484	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
 485}
 486
 487int t3_phy_lasi_intr_clear(struct cphy *phy)
 488{
 489	u32 val;
 490
 491	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
 492}
 493
 494int t3_phy_lasi_intr_handler(struct cphy *phy)
 495{
 496	unsigned int status;
 497	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
 498			       &status);
 499
 500	if (err)
 501		return err;
 502	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
 503}
 504
 505static const struct adapter_info t3_adap_info[] = {
 506	{1, 1, 0,
 507	 F_GPIO2_OEN | F_GPIO4_OEN |
 508	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 509	 &mi1_mdio_ops, "Chelsio PE9000"},
 510	{1, 1, 0,
 511	 F_GPIO2_OEN | F_GPIO4_OEN |
 512	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 513	 &mi1_mdio_ops, "Chelsio T302"},
 514	{1, 0, 0,
 515	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
 516	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 517	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 518	 &mi1_mdio_ext_ops, "Chelsio T310"},
 519	{1, 1, 0,
 520	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
 521	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
 522	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 523	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 524	 &mi1_mdio_ext_ops, "Chelsio T320"},
 525	{},
 526	{},
 527	{1, 0, 0,
 528	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 529	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 530	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 531	 &mi1_mdio_ext_ops, "Chelsio T310" },
 532	{1, 0, 0,
 533	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 534	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
 535	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 536	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
 537};
 538
 539/*
 540 * Return the adapter_info structure with a given index.  Out-of-range indices
 541 * return NULL.
 542 */
 543const struct adapter_info *t3_get_adapter_info(unsigned int id)
 544{
 545	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
 546}
 547
 548struct port_type_info {
 549	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
 550			int phy_addr, const struct mdio_ops *ops);
 551};
 552
 553static const struct port_type_info port_types[] = {
 554	{ NULL },
 555	{ t3_ael1002_phy_prep },
 556	{ t3_vsc8211_phy_prep },
 557	{ NULL},
 558	{ t3_xaui_direct_phy_prep },
 559	{ t3_ael2005_phy_prep },
 560	{ t3_qt2045_phy_prep },
 561	{ t3_ael1006_phy_prep },
 562	{ NULL },
 563	{ t3_aq100x_phy_prep },
 564	{ t3_ael2020_phy_prep },
 565};
 566
 567#define VPD_ENTRY(name, len) \
 568	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
 569
 570/*
 571 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
 572 * VPD-R sections.
 573 */
 574struct t3_vpd {
 575	u8 id_tag;
 576	u8 id_len[2];
 577	u8 id_data[16];
 578	u8 vpdr_tag;
 579	u8 vpdr_len[2];
 580	VPD_ENTRY(pn, 16);	/* part number */
 581	VPD_ENTRY(ec, 16);	/* EC level */
 582	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
 583	VPD_ENTRY(na, 12);	/* MAC address base */
 584	VPD_ENTRY(cclk, 6);	/* core clock */
 585	VPD_ENTRY(mclk, 6);	/* mem clock */
 586	VPD_ENTRY(uclk, 6);	/* uP clk */
 587	VPD_ENTRY(mdc, 6);	/* MDIO clk */
 588	VPD_ENTRY(mt, 2);	/* mem timing */
 589	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
 590	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
 591	VPD_ENTRY(port0, 2);	/* PHY0 complex */
 592	VPD_ENTRY(port1, 2);	/* PHY1 complex */
 593	VPD_ENTRY(port2, 2);	/* PHY2 complex */
 594	VPD_ENTRY(port3, 2);	/* PHY3 complex */
 595	VPD_ENTRY(rv, 1);	/* csum */
 596	u32 pad;		/* for multiple-of-4 sizing and alignment */
 597};
 598
 
 599#define EEPROM_STAT_ADDR  0x4000
 600#define VPD_BASE          0xc00
 601
 602/**
 603 *	t3_seeprom_wp - enable/disable EEPROM write protection
 604 *	@adapter: the adapter
 605 *	@enable: 1 to enable write protection, 0 to disable it
 
 606 *
 607 *	Enables or disables write protection on the serial EEPROM.
 608 */
 609int t3_seeprom_wp(struct adapter *adapter, int enable)
 610{
 611	u32 data = enable ? 0xc : 0;
 612	int ret;
 
 
 
 
 
 
 
 
 613
 614	/* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
 615	ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
 616				&data);
 
 
 617
 618	return ret < 0 ? ret : 0;
 
 
 
 
 
 
 619}
 620
 621static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
 
 
 
 
 
 
 
 
 
 622{
 623	char tok[256];
 
 
 
 
 
 624
 625	memcpy(tok, s, len);
 626	tok[len] = 0;
 627	return kstrtouint(strim(tok), base, val);
 
 
 
 
 
 
 
 
 
 
 
 628}
 629
 630static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
 
 
 
 
 
 
 
 631{
 632	char tok[256];
 633
 634	memcpy(tok, s, len);
 635	tok[len] = 0;
 636	return kstrtou16(strim(tok), base, val);
 637}
 638
 639/**
 640 *	get_vpd_params - read VPD parameters from VPD EEPROM
 641 *	@adapter: adapter to read
 642 *	@p: where to store the parameters
 643 *
 644 *	Reads card parameters stored in VPD EEPROM.
 645 */
 646static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 647{
 
 648	struct t3_vpd vpd;
 649	u8 base_val = 0;
 650	int addr, ret;
 651
 652	/*
 653	 * Card information is normally at VPD_BASE but some early cards had
 654	 * it at 0.
 655	 */
 656	ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
 657	if (ret < 0)
 658		return ret;
 659	addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
 660
 661	ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
 662	if (ret < 0)
 663		return ret;
 
 
 
 664
 665	ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
 666	if (ret)
 667		return ret;
 668	ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
 669	if (ret)
 670		return ret;
 671	ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
 672	if (ret)
 673		return ret;
 674	ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
 675	if (ret)
 676		return ret;
 677	ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
 678	if (ret)
 679		return ret;
 680	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
 681
 682	/* Old eeproms didn't have port information */
 683	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
 684		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
 685		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
 686	} else {
 687		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
 688		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
 689		ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
 690				  &p->xauicfg[0]);
 691		if (ret)
 692			return ret;
 693		ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
 694				  &p->xauicfg[1]);
 695		if (ret)
 696			return ret;
 697	}
 698
 699	ret = hex2bin(p->eth_base, vpd.na_data, 6);
 700	if (ret < 0)
 701		return -EINVAL;
 702	return 0;
 703}
 704
 705/* serial flash and firmware constants */
 706enum {
 707	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
 708	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
 709	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
 710
 711	/* flash command opcodes */
 712	SF_PROG_PAGE = 2,	/* program page */
 713	SF_WR_DISABLE = 4,	/* disable writes */
 714	SF_RD_STATUS = 5,	/* read status register */
 715	SF_WR_ENABLE = 6,	/* enable writes */
 716	SF_RD_DATA_FAST = 0xb,	/* read flash */
 717	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
 718
 719	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
 720	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
 721	FW_MIN_SIZE = 8            /* at least version and csum */
 722};
 723
 724/**
 725 *	sf1_read - read data from the serial flash
 726 *	@adapter: the adapter
 727 *	@byte_cnt: number of bytes to read
 728 *	@cont: whether another operation will be chained
 729 *	@valp: where to store the read data
 730 *
 731 *	Reads up to 4 bytes of data from the serial flash.  The location of
 732 *	the read needs to be specified prior to calling this by issuing the
 733 *	appropriate commands to the serial flash.
 734 */
 735static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 736		    u32 *valp)
 737{
 738	int ret;
 739
 740	if (!byte_cnt || byte_cnt > 4)
 741		return -EINVAL;
 742	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 743		return -EBUSY;
 744	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
 745	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 746	if (!ret)
 747		*valp = t3_read_reg(adapter, A_SF_DATA);
 748	return ret;
 749}
 750
 751/**
 752 *	sf1_write - write data to the serial flash
 753 *	@adapter: the adapter
 754 *	@byte_cnt: number of bytes to write
 755 *	@cont: whether another operation will be chained
 756 *	@val: value to write
 757 *
 758 *	Writes up to 4 bytes of data to the serial flash.  The location of
 759 *	the write needs to be specified prior to calling this by issuing the
 760 *	appropriate commands to the serial flash.
 761 */
 762static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 763		     u32 val)
 764{
 765	if (!byte_cnt || byte_cnt > 4)
 766		return -EINVAL;
 767	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 768		return -EBUSY;
 769	t3_write_reg(adapter, A_SF_DATA, val);
 770	t3_write_reg(adapter, A_SF_OP,
 771		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
 772	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 773}
 774
 775/**
 776 *	flash_wait_op - wait for a flash operation to complete
 777 *	@adapter: the adapter
 778 *	@attempts: max number of polls of the status register
 779 *	@delay: delay between polls in ms
 780 *
 781 *	Wait for a flash operation to complete by polling the status register.
 782 */
 783static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
 784{
 785	int ret;
 786	u32 status;
 787
 788	while (1) {
 789		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
 790		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
 791			return ret;
 792		if (!(status & 1))
 793			return 0;
 794		if (--attempts == 0)
 795			return -EAGAIN;
 796		if (delay)
 797			msleep(delay);
 798	}
 799}
 800
 801/**
 802 *	t3_read_flash - read words from serial flash
 803 *	@adapter: the adapter
 804 *	@addr: the start address for the read
 805 *	@nwords: how many 32-bit words to read
 806 *	@data: where to store the read data
 807 *	@byte_oriented: whether to store data as bytes or as words
 808 *
 809 *	Read the specified number of 32-bit words from the serial flash.
 810 *	If @byte_oriented is set the read data is stored as a byte array
 811 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
 812 *	natural endianness.
 813 */
 814static int t3_read_flash(struct adapter *adapter, unsigned int addr,
 815			 unsigned int nwords, u32 *data, int byte_oriented)
 816{
 817	int ret;
 818
 819	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
 820		return -EINVAL;
 821
 822	addr = swab32(addr) | SF_RD_DATA_FAST;
 823
 824	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
 825	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
 826		return ret;
 827
 828	for (; nwords; nwords--, data++) {
 829		ret = sf1_read(adapter, 4, nwords > 1, data);
 830		if (ret)
 831			return ret;
 832		if (byte_oriented)
 833			*data = htonl(*data);
 834	}
 835	return 0;
 836}
 837
 838/**
 839 *	t3_write_flash - write up to a page of data to the serial flash
 840 *	@adapter: the adapter
 841 *	@addr: the start address to write
 842 *	@n: length of data to write
 843 *	@data: the data to write
 844 *
 845 *	Writes up to a page of data (256 bytes) to the serial flash starting
 846 *	at the given address.
 847 */
 848static int t3_write_flash(struct adapter *adapter, unsigned int addr,
 849			  unsigned int n, const u8 *data)
 850{
 851	int ret;
 852	u32 buf[64];
 853	unsigned int i, c, left, val, offset = addr & 0xff;
 854
 855	if (addr + n > SF_SIZE || offset + n > 256)
 856		return -EINVAL;
 857
 858	val = swab32(addr) | SF_PROG_PAGE;
 859
 860	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
 861	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
 862		return ret;
 863
 864	for (left = n; left; left -= c) {
 865		c = min(left, 4U);
 866		for (val = 0, i = 0; i < c; ++i)
 867			val = (val << 8) + *data++;
 868
 869		ret = sf1_write(adapter, c, c != left, val);
 870		if (ret)
 871			return ret;
 872	}
 873	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
 874		return ret;
 875
 876	/* Read the page to verify the write succeeded */
 877	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
 878	if (ret)
 879		return ret;
 880
 881	if (memcmp(data - n, (u8 *) buf + offset, n))
 882		return -EIO;
 883	return 0;
 884}
 885
 886/**
 887 *	t3_get_tp_version - read the tp sram version
 888 *	@adapter: the adapter
 889 *	@vers: where to place the version
 890 *
 891 *	Reads the protocol sram version from sram.
 892 */
 893int t3_get_tp_version(struct adapter *adapter, u32 *vers)
 894{
 895	int ret;
 896
 897	/* Get version loaded in SRAM */
 898	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
 899	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
 900			      1, 1, 5, 1);
 901	if (ret)
 902		return ret;
 903
 904	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
 905
 906	return 0;
 907}
 908
 909/**
 910 *	t3_check_tpsram_version - read the tp sram version
 911 *	@adapter: the adapter
 912 *
 913 *	Reads the protocol sram version from flash.
 914 */
 915int t3_check_tpsram_version(struct adapter *adapter)
 916{
 917	int ret;
 918	u32 vers;
 919	unsigned int major, minor;
 920
 921	if (adapter->params.rev == T3_REV_A)
 922		return 0;
 923
 924
 925	ret = t3_get_tp_version(adapter, &vers);
 926	if (ret)
 927		return ret;
 928
 929	major = G_TP_VERSION_MAJOR(vers);
 930	minor = G_TP_VERSION_MINOR(vers);
 931
 932	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
 933		return 0;
 934	else {
 935		CH_ERR(adapter, "found wrong TP version (%u.%u), "
 936		       "driver compiled for version %d.%d\n", major, minor,
 937		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
 938	}
 939	return -EINVAL;
 940}
 941
 942/**
 943 *	t3_check_tpsram - check if provided protocol SRAM
 944 *			  is compatible with this driver
 945 *	@adapter: the adapter
 946 *	@tp_sram: the firmware image to write
 947 *	@size: image size
 948 *
 949 *	Checks if an adapter's tp sram is compatible with the driver.
 950 *	Returns 0 if the versions are compatible, a negative error otherwise.
 951 */
 952int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
 953		    unsigned int size)
 954{
 955	u32 csum;
 956	unsigned int i;
 957	const __be32 *p = (const __be32 *)tp_sram;
 958
 959	/* Verify checksum */
 960	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
 961		csum += ntohl(p[i]);
 962	if (csum != 0xffffffff) {
 963		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
 964		       csum);
 965		return -EINVAL;
 966	}
 967
 968	return 0;
 969}
 970
 971enum fw_version_type {
 972	FW_VERSION_N3,
 973	FW_VERSION_T3
 974};
 975
 976/**
 977 *	t3_get_fw_version - read the firmware version
 978 *	@adapter: the adapter
 979 *	@vers: where to place the version
 980 *
 981 *	Reads the FW version from flash.
 982 */
 983int t3_get_fw_version(struct adapter *adapter, u32 *vers)
 984{
 985	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
 986}
 987
 988/**
 989 *	t3_check_fw_version - check if the FW is compatible with this driver
 990 *	@adapter: the adapter
 991 *
 992 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
 993 *	if the versions are compatible, a negative error otherwise.
 994 */
 995int t3_check_fw_version(struct adapter *adapter)
 996{
 997	int ret;
 998	u32 vers;
 999	unsigned int type, major, minor;
1000
1001	ret = t3_get_fw_version(adapter, &vers);
1002	if (ret)
1003		return ret;
1004
1005	type = G_FW_VERSION_TYPE(vers);
1006	major = G_FW_VERSION_MAJOR(vers);
1007	minor = G_FW_VERSION_MINOR(vers);
1008
1009	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1010	    minor == FW_VERSION_MINOR)
1011		return 0;
1012	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1013		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1014		        "driver compiled for version %u.%u\n", major, minor,
1015			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1016	else {
1017		CH_WARN(adapter, "found newer FW version(%u.%u), "
1018		        "driver compiled for version %u.%u\n", major, minor,
1019			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1020		return 0;
1021	}
1022	return -EINVAL;
1023}
1024
1025/**
1026 *	t3_flash_erase_sectors - erase a range of flash sectors
1027 *	@adapter: the adapter
1028 *	@start: the first sector to erase
1029 *	@end: the last sector to erase
1030 *
1031 *	Erases the sectors in the given range.
1032 */
1033static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1034{
1035	while (start <= end) {
1036		int ret;
1037
1038		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1039		    (ret = sf1_write(adapter, 4, 0,
1040				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1041		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1042			return ret;
1043		start++;
1044	}
1045	return 0;
1046}
1047
1048/**
1049 *	t3_load_fw - download firmware
1050 *	@adapter: the adapter
1051 *	@fw_data: the firmware image to write
1052 *	@size: image size
1053 *
1054 *	Write the supplied firmware image to the card's serial flash.
1055 *	The FW image has the following sections: @size - 8 bytes of code and
1056 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1057 *	1's complement checksum of the whole image.
1058 */
1059int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1060{
1061	u32 csum;
1062	unsigned int i;
1063	const __be32 *p = (const __be32 *)fw_data;
1064	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1065
1066	if ((size & 3) || size < FW_MIN_SIZE)
1067		return -EINVAL;
1068	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1069		return -EFBIG;
1070
1071	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1072		csum += ntohl(p[i]);
1073	if (csum != 0xffffffff) {
1074		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1075		       csum);
1076		return -EINVAL;
1077	}
1078
1079	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1080	if (ret)
1081		goto out;
1082
1083	size -= 8;		/* trim off version and checksum */
1084	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1085		unsigned int chunk_size = min(size, 256U);
1086
1087		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1088		if (ret)
1089			goto out;
1090
1091		addr += chunk_size;
1092		fw_data += chunk_size;
1093		size -= chunk_size;
1094	}
1095
1096	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1097out:
1098	if (ret)
1099		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1100	return ret;
1101}
1102
1103#define CIM_CTL_BASE 0x2000
1104
1105/**
1106 *      t3_cim_ctl_blk_read - read a block from CIM control region
1107 *
1108 *      @adap: the adapter
1109 *      @addr: the start address within the CIM control region
1110 *      @n: number of words to read
1111 *      @valp: where to store the result
1112 *
1113 *      Reads a block of 4-byte words from the CIM control region.
1114 */
1115int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1116			unsigned int n, unsigned int *valp)
1117{
1118	int ret = 0;
1119
1120	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1121		return -EBUSY;
1122
1123	for ( ; !ret && n--; addr += 4) {
1124		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1125		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1126				      0, 5, 2);
1127		if (!ret)
1128			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1129	}
1130	return ret;
1131}
1132
1133static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1134			       u32 *rx_hash_high, u32 *rx_hash_low)
1135{
1136	/* stop Rx unicast traffic */
1137	t3_mac_disable_exact_filters(mac);
1138
1139	/* stop broadcast, multicast, promiscuous mode traffic */
1140	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1141	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1142			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1143			 F_DISBCAST);
1144
1145	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1146	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1147
1148	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1149	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1150
1151	/* Leave time to drain max RX fifo */
1152	msleep(1);
1153}
1154
1155static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1156			       u32 rx_hash_high, u32 rx_hash_low)
1157{
1158	t3_mac_enable_exact_filters(mac);
1159	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1160			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1161			 rx_cfg);
1162	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1163	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1164}
1165
1166/**
1167 *	t3_link_changed - handle interface link changes
1168 *	@adapter: the adapter
1169 *	@port_id: the port index that changed link state
1170 *
1171 *	Called when a port's link settings change to propagate the new values
1172 *	to the associated PHY and MAC.  After performing the common tasks it
1173 *	invokes an OS-specific handler.
1174 */
1175void t3_link_changed(struct adapter *adapter, int port_id)
1176{
1177	int link_ok, speed, duplex, fc;
1178	struct port_info *pi = adap2pinfo(adapter, port_id);
1179	struct cphy *phy = &pi->phy;
1180	struct cmac *mac = &pi->mac;
1181	struct link_config *lc = &pi->link_config;
1182
1183	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1184
1185	if (!lc->link_ok && link_ok) {
1186		u32 rx_cfg, rx_hash_high, rx_hash_low;
1187		u32 status;
1188
1189		t3_xgm_intr_enable(adapter, port_id);
1190		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1191		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1192		t3_mac_enable(mac, MAC_DIRECTION_RX);
1193
1194		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1195		if (status & F_LINKFAULTCHANGE) {
1196			mac->stats.link_faults++;
1197			pi->link_fault = 1;
1198		}
1199		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1200	}
1201
1202	if (lc->requested_fc & PAUSE_AUTONEG)
1203		fc &= lc->requested_fc;
1204	else
1205		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1206
1207	if (link_ok == lc->link_ok && speed == lc->speed &&
1208	    duplex == lc->duplex && fc == lc->fc)
1209		return;                            /* nothing changed */
1210
1211	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1212	    uses_xaui(adapter)) {
1213		if (link_ok)
1214			t3b_pcs_reset(mac);
1215		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1216			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1217	}
1218	lc->link_ok = link_ok;
1219	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1220	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1221
1222	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1223		/* Set MAC speed, duplex, and flow control to match PHY. */
1224		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1225		lc->fc = fc;
1226	}
1227
1228	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1229			   speed, duplex, fc);
1230}
1231
1232void t3_link_fault(struct adapter *adapter, int port_id)
1233{
1234	struct port_info *pi = adap2pinfo(adapter, port_id);
1235	struct cmac *mac = &pi->mac;
1236	struct cphy *phy = &pi->phy;
1237	struct link_config *lc = &pi->link_config;
1238	int link_ok, speed, duplex, fc, link_fault;
1239	u32 rx_cfg, rx_hash_high, rx_hash_low;
1240
1241	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1242
1243	if (adapter->params.rev > 0 && uses_xaui(adapter))
1244		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1245
1246	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1247	t3_mac_enable(mac, MAC_DIRECTION_RX);
1248
1249	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1250
1251	link_fault = t3_read_reg(adapter,
1252				 A_XGM_INT_STATUS + mac->offset);
1253	link_fault &= F_LINKFAULTCHANGE;
1254
1255	link_ok = lc->link_ok;
1256	speed = lc->speed;
1257	duplex = lc->duplex;
1258	fc = lc->fc;
1259
1260	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1261
1262	if (link_fault) {
1263		lc->link_ok = 0;
1264		lc->speed = SPEED_INVALID;
1265		lc->duplex = DUPLEX_INVALID;
1266
1267		t3_os_link_fault(adapter, port_id, 0);
1268
1269		/* Account link faults only when the phy reports a link up */
1270		if (link_ok)
1271			mac->stats.link_faults++;
1272	} else {
1273		if (link_ok)
1274			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1275				     F_TXACTENABLE | F_RXEN);
1276
1277		pi->link_fault = 0;
1278		lc->link_ok = (unsigned char)link_ok;
1279		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1280		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1281		t3_os_link_fault(adapter, port_id, link_ok);
1282	}
1283}
1284
1285/**
1286 *	t3_link_start - apply link configuration to MAC/PHY
1287 *	@phy: the PHY to setup
1288 *	@mac: the MAC to setup
1289 *	@lc: the requested link configuration
1290 *
1291 *	Set up a port's MAC and PHY according to a desired link configuration.
1292 *	- If the PHY can auto-negotiate first decide what to advertise, then
1293 *	  enable/disable auto-negotiation as desired, and reset.
1294 *	- If the PHY does not auto-negotiate just reset it.
1295 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1296 *	  otherwise do it later based on the outcome of auto-negotiation.
1297 */
1298int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1299{
1300	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1301
1302	lc->link_ok = 0;
1303	if (lc->supported & SUPPORTED_Autoneg) {
1304		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1305		if (fc) {
1306			lc->advertising |= ADVERTISED_Asym_Pause;
1307			if (fc & PAUSE_RX)
1308				lc->advertising |= ADVERTISED_Pause;
1309		}
1310		phy->ops->advertise(phy, lc->advertising);
1311
1312		if (lc->autoneg == AUTONEG_DISABLE) {
1313			lc->speed = lc->requested_speed;
1314			lc->duplex = lc->requested_duplex;
1315			lc->fc = (unsigned char)fc;
1316			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1317						   fc);
1318			/* Also disables autoneg */
1319			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1320		} else
1321			phy->ops->autoneg_enable(phy);
1322	} else {
1323		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1324		lc->fc = (unsigned char)fc;
1325		phy->ops->reset(phy, 0);
1326	}
1327	return 0;
1328}
1329
1330/**
1331 *	t3_set_vlan_accel - control HW VLAN extraction
1332 *	@adapter: the adapter
1333 *	@ports: bitmap of adapter ports to operate on
1334 *	@on: enable (1) or disable (0) HW VLAN extraction
1335 *
1336 *	Enables or disables HW extraction of VLAN tags for the given port.
1337 */
1338void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1339{
1340	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1341			 ports << S_VLANEXTRACTIONENABLE,
1342			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1343}
1344
1345struct intr_info {
1346	unsigned int mask;	/* bits to check in interrupt status */
1347	const char *msg;	/* message to print or NULL */
1348	short stat_idx;		/* stat counter to increment or -1 */
1349	unsigned short fatal;	/* whether the condition reported is fatal */
1350};
1351
1352/**
1353 *	t3_handle_intr_status - table driven interrupt handler
1354 *	@adapter: the adapter that generated the interrupt
1355 *	@reg: the interrupt status register to process
1356 *	@mask: a mask to apply to the interrupt status
1357 *	@acts: table of interrupt actions
1358 *	@stats: statistics counters tracking interrupt occurrences
1359 *
1360 *	A table driven interrupt handler that applies a set of masks to an
1361 *	interrupt status word and performs the corresponding actions if the
1362 *	interrupts described by the mask have occurred.  The actions include
1363 *	optionally printing a warning or alert message, and optionally
1364 *	incrementing a stat counter.  The table is terminated by an entry
1365 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1366 */
1367static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1368				 unsigned int mask,
1369				 const struct intr_info *acts,
1370				 unsigned long *stats)
1371{
1372	int fatal = 0;
1373	unsigned int status = t3_read_reg(adapter, reg) & mask;
1374
1375	for (; acts->mask; ++acts) {
1376		if (!(status & acts->mask))
1377			continue;
1378		if (acts->fatal) {
1379			fatal++;
1380			CH_ALERT(adapter, "%s (0x%x)\n",
1381				 acts->msg, status & acts->mask);
1382			status &= ~acts->mask;
1383		} else if (acts->msg)
1384			CH_WARN(adapter, "%s (0x%x)\n",
1385				acts->msg, status & acts->mask);
1386		if (acts->stat_idx >= 0)
1387			stats[acts->stat_idx]++;
1388	}
1389	if (status)		/* clear processed interrupts */
1390		t3_write_reg(adapter, reg, status);
1391	return fatal;
1392}
1393
1394#define SGE_INTR_MASK (F_RSPQDISABLED | \
1395		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1396		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1397		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1398		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1399		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1400		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1401		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1402		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1403		       F_LOPIODRBDROPERR)
1404#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1405		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1406		       F_NFASRCHFAIL)
1407#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1408#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1409		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1410		       F_TXFIFO_UNDERRUN)
1411#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1412			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1413			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1414			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1415			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1416			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1417#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1418			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1419			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1420			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1421			F_TXPARERR | V_BISTERR(M_BISTERR))
1422#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1423			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1424			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1425#define ULPTX_INTR_MASK 0xfc
1426#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1427			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1428			 F_ZERO_SWITCH_ERROR)
1429#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1430		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1431		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1432	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1433		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1434		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1435		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1436		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1437#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1438			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1439			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1440#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1441			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1442			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1443#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1444		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1445		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1446		       V_MCAPARERRENB(M_MCAPARERRENB))
1447#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1448#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1449		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1450		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1451		      F_MPS0 | F_CPL_SWITCH)
1452/*
1453 * Interrupt handler for the PCIX1 module.
1454 */
1455static void pci_intr_handler(struct adapter *adapter)
1456{
1457	static const struct intr_info pcix1_intr_info[] = {
1458		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1459		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1460		{F_RCVTARABT, "PCI received target abort", -1, 1},
1461		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1462		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1463		{F_DETPARERR, "PCI detected parity error", -1, 1},
1464		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1465		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1466		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1467		 1},
1468		{F_DETCORECCERR, "PCI correctable ECC error",
1469		 STAT_PCI_CORR_ECC, 0},
1470		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1471		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1472		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1473		 1},
1474		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1475		 1},
1476		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1477		 1},
1478		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1479		 "error", -1, 1},
1480		{0}
1481	};
1482
1483	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1484				  pcix1_intr_info, adapter->irq_stats))
1485		t3_fatal_err(adapter);
1486}
1487
1488/*
1489 * Interrupt handler for the PCIE module.
1490 */
1491static void pcie_intr_handler(struct adapter *adapter)
1492{
1493	static const struct intr_info pcie_intr_info[] = {
1494		{F_PEXERR, "PCI PEX error", -1, 1},
1495		{F_UNXSPLCPLERRR,
1496		 "PCI unexpected split completion DMA read error", -1, 1},
1497		{F_UNXSPLCPLERRC,
1498		 "PCI unexpected split completion DMA command error", -1, 1},
1499		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1500		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1501		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1502		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1503		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1504		 "PCI MSI-X table/PBA parity error", -1, 1},
1505		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1506		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1507		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1508		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1509		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1510		{0}
1511	};
1512
1513	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1514		CH_ALERT(adapter, "PEX error code 0x%x\n",
1515			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1516
1517	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1518				  pcie_intr_info, adapter->irq_stats))
1519		t3_fatal_err(adapter);
1520}
1521
1522/*
1523 * TP interrupt handler.
1524 */
1525static void tp_intr_handler(struct adapter *adapter)
1526{
1527	static const struct intr_info tp_intr_info[] = {
1528		{0xffffff, "TP parity error", -1, 1},
1529		{0x1000000, "TP out of Rx pages", -1, 1},
1530		{0x2000000, "TP out of Tx pages", -1, 1},
1531		{0}
1532	};
1533
1534	static const struct intr_info tp_intr_info_t3c[] = {
1535		{0x1fffffff, "TP parity error", -1, 1},
1536		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1537		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1538		{0}
1539	};
1540
1541	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1542				  adapter->params.rev < T3_REV_C ?
1543				  tp_intr_info : tp_intr_info_t3c, NULL))
1544		t3_fatal_err(adapter);
1545}
1546
1547/*
1548 * CIM interrupt handler.
1549 */
1550static void cim_intr_handler(struct adapter *adapter)
1551{
1552	static const struct intr_info cim_intr_info[] = {
1553		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1554		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1555		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1556		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1557		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1558		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1559		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1560		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1561		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1562		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1563		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1564		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1565		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1566		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1567		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1568		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1569		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1570		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1571		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1572		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1573		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1574		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1575		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1576		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1577		{0}
1578	};
1579
1580	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1581				  cim_intr_info, NULL))
1582		t3_fatal_err(adapter);
1583}
1584
1585/*
1586 * ULP RX interrupt handler.
1587 */
1588static void ulprx_intr_handler(struct adapter *adapter)
1589{
1590	static const struct intr_info ulprx_intr_info[] = {
1591		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1592		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1593		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1594		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1595		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1596		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1597		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1598		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1599		{0}
1600	};
1601
1602	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1603				  ulprx_intr_info, NULL))
1604		t3_fatal_err(adapter);
1605}
1606
1607/*
1608 * ULP TX interrupt handler.
1609 */
1610static void ulptx_intr_handler(struct adapter *adapter)
1611{
1612	static const struct intr_info ulptx_intr_info[] = {
1613		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1614		 STAT_ULP_CH0_PBL_OOB, 0},
1615		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1616		 STAT_ULP_CH1_PBL_OOB, 0},
1617		{0xfc, "ULP TX parity error", -1, 1},
1618		{0}
1619	};
1620
1621	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1622				  ulptx_intr_info, adapter->irq_stats))
1623		t3_fatal_err(adapter);
1624}
1625
1626#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1627	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1628	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1629	F_ICSPI1_TX_FRAMING_ERROR)
1630#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1631	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1632	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1633	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1634
1635/*
1636 * PM TX interrupt handler.
1637 */
1638static void pmtx_intr_handler(struct adapter *adapter)
1639{
1640	static const struct intr_info pmtx_intr_info[] = {
1641		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1642		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1643		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1644		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1645		 "PMTX ispi parity error", -1, 1},
1646		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1647		 "PMTX ospi parity error", -1, 1},
1648		{0}
1649	};
1650
1651	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1652				  pmtx_intr_info, NULL))
1653		t3_fatal_err(adapter);
1654}
1655
1656#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1657	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1658	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1659	F_IESPI1_TX_FRAMING_ERROR)
1660#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1661	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1662	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1663	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1664
1665/*
1666 * PM RX interrupt handler.
1667 */
1668static void pmrx_intr_handler(struct adapter *adapter)
1669{
1670	static const struct intr_info pmrx_intr_info[] = {
1671		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1672		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1673		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1674		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1675		 "PMRX ispi parity error", -1, 1},
1676		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1677		 "PMRX ospi parity error", -1, 1},
1678		{0}
1679	};
1680
1681	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1682				  pmrx_intr_info, NULL))
1683		t3_fatal_err(adapter);
1684}
1685
1686/*
1687 * CPL switch interrupt handler.
1688 */
1689static void cplsw_intr_handler(struct adapter *adapter)
1690{
1691	static const struct intr_info cplsw_intr_info[] = {
1692		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1693		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1694		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1695		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1696		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1697		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1698		{0}
1699	};
1700
1701	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1702				  cplsw_intr_info, NULL))
1703		t3_fatal_err(adapter);
1704}
1705
1706/*
1707 * MPS interrupt handler.
1708 */
1709static void mps_intr_handler(struct adapter *adapter)
1710{
1711	static const struct intr_info mps_intr_info[] = {
1712		{0x1ff, "MPS parity error", -1, 1},
1713		{0}
1714	};
1715
1716	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1717				  mps_intr_info, NULL))
1718		t3_fatal_err(adapter);
1719}
1720
1721#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1722
1723/*
1724 * MC7 interrupt handler.
1725 */
1726static void mc7_intr_handler(struct mc7 *mc7)
1727{
1728	struct adapter *adapter = mc7->adapter;
1729	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1730
1731	if (cause & F_CE) {
1732		mc7->stats.corr_err++;
1733		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1734			"data 0x%x 0x%x 0x%x\n", mc7->name,
1735			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1736			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1737			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1738			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1739	}
1740
1741	if (cause & F_UE) {
1742		mc7->stats.uncorr_err++;
1743		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1744			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1745			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1746			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1747			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1748			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1749	}
1750
1751	if (G_PE(cause)) {
1752		mc7->stats.parity_err++;
1753		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1754			 mc7->name, G_PE(cause));
1755	}
1756
1757	if (cause & F_AE) {
1758		u32 addr = 0;
1759
1760		if (adapter->params.rev > 0)
1761			addr = t3_read_reg(adapter,
1762					   mc7->offset + A_MC7_ERR_ADDR);
1763		mc7->stats.addr_err++;
1764		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1765			 mc7->name, addr);
1766	}
1767
1768	if (cause & MC7_INTR_FATAL)
1769		t3_fatal_err(adapter);
1770
1771	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1772}
1773
1774#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1775			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1776/*
1777 * XGMAC interrupt handler.
1778 */
1779static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1780{
1781	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1782	/*
1783	 * We mask out interrupt causes for which we're not taking interrupts.
1784	 * This allows us to use polling logic to monitor some of the other
1785	 * conditions when taking interrupts would impose too much load on the
1786	 * system.
1787	 */
1788	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1789		    ~F_RXFIFO_OVERFLOW;
1790
1791	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1792		mac->stats.tx_fifo_parity_err++;
1793		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1794	}
1795	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1796		mac->stats.rx_fifo_parity_err++;
1797		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1798	}
1799	if (cause & F_TXFIFO_UNDERRUN)
1800		mac->stats.tx_fifo_urun++;
1801	if (cause & F_RXFIFO_OVERFLOW)
1802		mac->stats.rx_fifo_ovfl++;
1803	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1804		mac->stats.serdes_signal_loss++;
1805	if (cause & F_XAUIPCSCTCERR)
1806		mac->stats.xaui_pcs_ctc_err++;
1807	if (cause & F_XAUIPCSALIGNCHANGE)
1808		mac->stats.xaui_pcs_align_change++;
1809	if (cause & F_XGM_INT) {
1810		t3_set_reg_field(adap,
1811				 A_XGM_INT_ENABLE + mac->offset,
1812				 F_XGM_INT, 0);
1813		mac->stats.link_faults++;
1814
1815		t3_os_link_fault_handler(adap, idx);
1816	}
1817
1818	if (cause & XGM_INTR_FATAL)
1819		t3_fatal_err(adap);
1820
1821	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1822	return cause != 0;
1823}
1824
1825/*
1826 * Interrupt handler for PHY events.
1827 */
1828int t3_phy_intr_handler(struct adapter *adapter)
1829{
1830	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1831
1832	for_each_port(adapter, i) {
1833		struct port_info *p = adap2pinfo(adapter, i);
1834
1835		if (!(p->phy.caps & SUPPORTED_IRQ))
1836			continue;
1837
1838		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1839			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1840
1841			if (phy_cause & cphy_cause_link_change)
1842				t3_link_changed(adapter, i);
1843			if (phy_cause & cphy_cause_fifo_error)
1844				p->phy.fifo_errors++;
1845			if (phy_cause & cphy_cause_module_change)
1846				t3_os_phymod_changed(adapter, i);
1847		}
1848	}
1849
1850	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1851	return 0;
1852}
1853
1854/*
1855 * T3 slow path (non-data) interrupt handler.
1856 */
1857int t3_slow_intr_handler(struct adapter *adapter)
1858{
1859	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1860
1861	cause &= adapter->slow_intr_mask;
1862	if (!cause)
1863		return 0;
1864	if (cause & F_PCIM0) {
1865		if (is_pcie(adapter))
1866			pcie_intr_handler(adapter);
1867		else
1868			pci_intr_handler(adapter);
1869	}
1870	if (cause & F_SGE3)
1871		t3_sge_err_intr_handler(adapter);
1872	if (cause & F_MC7_PMRX)
1873		mc7_intr_handler(&adapter->pmrx);
1874	if (cause & F_MC7_PMTX)
1875		mc7_intr_handler(&adapter->pmtx);
1876	if (cause & F_MC7_CM)
1877		mc7_intr_handler(&adapter->cm);
1878	if (cause & F_CIM)
1879		cim_intr_handler(adapter);
1880	if (cause & F_TP1)
1881		tp_intr_handler(adapter);
1882	if (cause & F_ULP2_RX)
1883		ulprx_intr_handler(adapter);
1884	if (cause & F_ULP2_TX)
1885		ulptx_intr_handler(adapter);
1886	if (cause & F_PM1_RX)
1887		pmrx_intr_handler(adapter);
1888	if (cause & F_PM1_TX)
1889		pmtx_intr_handler(adapter);
1890	if (cause & F_CPL_SWITCH)
1891		cplsw_intr_handler(adapter);
1892	if (cause & F_MPS0)
1893		mps_intr_handler(adapter);
1894	if (cause & F_MC5A)
1895		t3_mc5_intr_handler(&adapter->mc5);
1896	if (cause & F_XGMAC0_0)
1897		mac_intr_handler(adapter, 0);
1898	if (cause & F_XGMAC0_1)
1899		mac_intr_handler(adapter, 1);
1900	if (cause & F_T3DBG)
1901		t3_os_ext_intr_handler(adapter);
1902
1903	/* Clear the interrupts just processed. */
1904	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1905	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1906	return 1;
1907}
1908
1909static unsigned int calc_gpio_intr(struct adapter *adap)
1910{
1911	unsigned int i, gpi_intr = 0;
1912
1913	for_each_port(adap, i)
1914		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1915		    adapter_info(adap)->gpio_intr[i])
1916			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1917	return gpi_intr;
1918}
1919
1920/**
1921 *	t3_intr_enable - enable interrupts
1922 *	@adapter: the adapter whose interrupts should be enabled
1923 *
1924 *	Enable interrupts by setting the interrupt enable registers of the
1925 *	various HW modules and then enabling the top-level interrupt
1926 *	concentrator.
1927 */
1928void t3_intr_enable(struct adapter *adapter)
1929{
1930	static const struct addr_val_pair intr_en_avp[] = {
1931		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1932		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1933		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1934		 MC7_INTR_MASK},
1935		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1936		 MC7_INTR_MASK},
1937		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1938		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1939		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1940		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1941		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1942		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1943	};
1944
1945	adapter->slow_intr_mask = PL_INTR_MASK;
1946
1947	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1948	t3_write_reg(adapter, A_TP_INT_ENABLE,
1949		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1950
1951	if (adapter->params.rev > 0) {
1952		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1953			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1954		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1955			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1956			     F_PBL_BOUND_ERR_CH1);
1957	} else {
1958		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1959		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1960	}
1961
1962	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1963
1964	if (is_pcie(adapter))
1965		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1966	else
1967		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1968	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1969	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1970}
1971
1972/**
1973 *	t3_intr_disable - disable a card's interrupts
1974 *	@adapter: the adapter whose interrupts should be disabled
1975 *
1976 *	Disable interrupts.  We only disable the top-level interrupt
1977 *	concentrator and the SGE data interrupts.
1978 */
1979void t3_intr_disable(struct adapter *adapter)
1980{
1981	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1982	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1983	adapter->slow_intr_mask = 0;
1984}
1985
1986/**
1987 *	t3_intr_clear - clear all interrupts
1988 *	@adapter: the adapter whose interrupts should be cleared
1989 *
1990 *	Clears all interrupts.
1991 */
1992void t3_intr_clear(struct adapter *adapter)
1993{
1994	static const unsigned int cause_reg_addr[] = {
1995		A_SG_INT_CAUSE,
1996		A_SG_RSPQ_FL_STATUS,
1997		A_PCIX_INT_CAUSE,
1998		A_MC7_INT_CAUSE,
1999		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2000		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2001		A_CIM_HOST_INT_CAUSE,
2002		A_TP_INT_CAUSE,
2003		A_MC5_DB_INT_CAUSE,
2004		A_ULPRX_INT_CAUSE,
2005		A_ULPTX_INT_CAUSE,
2006		A_CPL_INTR_CAUSE,
2007		A_PM1_TX_INT_CAUSE,
2008		A_PM1_RX_INT_CAUSE,
2009		A_MPS_INT_CAUSE,
2010		A_T3DBG_INT_CAUSE,
2011	};
2012	unsigned int i;
2013
2014	/* Clear PHY and MAC interrupts for each port. */
2015	for_each_port(adapter, i)
2016	    t3_port_intr_clear(adapter, i);
2017
2018	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2019		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2020
2021	if (is_pcie(adapter))
2022		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2023	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2024	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2025}
2026
2027void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2028{
2029	struct port_info *pi = adap2pinfo(adapter, idx);
2030
2031	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2032		     XGM_EXTRA_INTR_MASK);
2033}
2034
2035void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2036{
2037	struct port_info *pi = adap2pinfo(adapter, idx);
2038
2039	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2040		     0x7ff);
2041}
2042
2043/**
2044 *	t3_port_intr_enable - enable port-specific interrupts
2045 *	@adapter: associated adapter
2046 *	@idx: index of port whose interrupts should be enabled
2047 *
2048 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2049 *	adapter port.
2050 */
2051void t3_port_intr_enable(struct adapter *adapter, int idx)
2052{
2053	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2054
2055	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2056	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2057	phy->ops->intr_enable(phy);
2058}
2059
2060/**
2061 *	t3_port_intr_disable - disable port-specific interrupts
2062 *	@adapter: associated adapter
2063 *	@idx: index of port whose interrupts should be disabled
2064 *
2065 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2066 *	adapter port.
2067 */
2068void t3_port_intr_disable(struct adapter *adapter, int idx)
2069{
2070	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2071
2072	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2073	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2074	phy->ops->intr_disable(phy);
2075}
2076
2077/**
2078 *	t3_port_intr_clear - clear port-specific interrupts
2079 *	@adapter: associated adapter
2080 *	@idx: index of port whose interrupts to clear
2081 *
2082 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2083 *	adapter port.
2084 */
2085static void t3_port_intr_clear(struct adapter *adapter, int idx)
2086{
2087	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2088
2089	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2090	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2091	phy->ops->intr_clear(phy);
2092}
2093
2094#define SG_CONTEXT_CMD_ATTEMPTS 100
2095
2096/**
2097 * 	t3_sge_write_context - write an SGE context
2098 * 	@adapter: the adapter
2099 * 	@id: the context id
2100 * 	@type: the context type
2101 *
2102 * 	Program an SGE context with the values already loaded in the
2103 * 	CONTEXT_DATA? registers.
2104 */
2105static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2106				unsigned int type)
2107{
2108	if (type == F_RESPONSEQ) {
2109		/*
2110		 * Can't write the Response Queue Context bits for
2111		 * Interrupt Armed or the Reserve bits after the chip
2112		 * has been initialized out of reset.  Writing to these
2113		 * bits can confuse the hardware.
2114		 */
2115		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2116		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2117		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2118		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2119	} else {
2120		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2121		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2122		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2123		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2124	}
2125	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2126		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2127	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2128			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2129}
2130
2131/**
2132 *	clear_sge_ctxt - completely clear an SGE context
2133 *	@adap: the adapter
2134 *	@id: the context id
2135 *	@type: the context type
2136 *
2137 *	Completely clear an SGE context.  Used predominantly at post-reset
2138 *	initialization.  Note in particular that we don't skip writing to any
2139 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2140 *	does ...
2141 */
2142static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2143			  unsigned int type)
2144{
2145	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2146	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2147	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2148	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2149	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2150	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2151	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2152	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2153	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2154		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2155	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2156			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2157}
2158
2159/**
2160 *	t3_sge_init_ecntxt - initialize an SGE egress context
2161 *	@adapter: the adapter to configure
2162 *	@id: the context id
2163 *	@gts_enable: whether to enable GTS for the context
2164 *	@type: the egress context type
2165 *	@respq: associated response queue
2166 *	@base_addr: base address of queue
2167 *	@size: number of queue entries
2168 *	@token: uP token
2169 *	@gen: initial generation value for the context
2170 *	@cidx: consumer pointer
2171 *
2172 *	Initialize an SGE egress context and make it ready for use.  If the
2173 *	platform allows concurrent context operations, the caller is
2174 *	responsible for appropriate locking.
2175 */
2176int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2177		       enum sge_context_type type, int respq, u64 base_addr,
2178		       unsigned int size, unsigned int token, int gen,
2179		       unsigned int cidx)
2180{
2181	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2182
2183	if (base_addr & 0xfff)	/* must be 4K aligned */
2184		return -EINVAL;
2185	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2186		return -EBUSY;
2187
2188	base_addr >>= 12;
2189	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2190		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2191	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2192		     V_EC_BASE_LO(base_addr & 0xffff));
2193	base_addr >>= 16;
2194	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2195	base_addr >>= 32;
2196	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2197		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2198		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2199		     F_EC_VALID);
2200	return t3_sge_write_context(adapter, id, F_EGRESS);
2201}
2202
2203/**
2204 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2205 *	@adapter: the adapter to configure
2206 *	@id: the context id
2207 *	@gts_enable: whether to enable GTS for the context
2208 *	@base_addr: base address of queue
2209 *	@size: number of queue entries
2210 *	@bsize: size of each buffer for this queue
2211 *	@cong_thres: threshold to signal congestion to upstream producers
2212 *	@gen: initial generation value for the context
2213 *	@cidx: consumer pointer
2214 *
2215 *	Initialize an SGE free list context and make it ready for use.  The
2216 *	caller is responsible for ensuring only one context operation occurs
2217 *	at a time.
2218 */
2219int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2220			int gts_enable, u64 base_addr, unsigned int size,
2221			unsigned int bsize, unsigned int cong_thres, int gen,
2222			unsigned int cidx)
2223{
2224	if (base_addr & 0xfff)	/* must be 4K aligned */
2225		return -EINVAL;
2226	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2227		return -EBUSY;
2228
2229	base_addr >>= 12;
2230	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2231	base_addr >>= 32;
2232	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2233		     V_FL_BASE_HI((u32) base_addr) |
2234		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2235	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2236		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2237		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2238	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2239		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2240		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2241	return t3_sge_write_context(adapter, id, F_FREELIST);
2242}
2243
2244/**
2245 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2246 *	@adapter: the adapter to configure
2247 *	@id: the context id
2248 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2249 *	@base_addr: base address of queue
2250 *	@size: number of queue entries
2251 *	@fl_thres: threshold for selecting the normal or jumbo free list
2252 *	@gen: initial generation value for the context
2253 *	@cidx: consumer pointer
2254 *
2255 *	Initialize an SGE response queue context and make it ready for use.
2256 *	The caller is responsible for ensuring only one context operation
2257 *	occurs at a time.
2258 */
2259int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2260			 int irq_vec_idx, u64 base_addr, unsigned int size,
2261			 unsigned int fl_thres, int gen, unsigned int cidx)
2262{
2263	unsigned int intr = 0;
2264
2265	if (base_addr & 0xfff)	/* must be 4K aligned */
2266		return -EINVAL;
2267	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2268		return -EBUSY;
2269
2270	base_addr >>= 12;
2271	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2272		     V_CQ_INDEX(cidx));
2273	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2274	base_addr >>= 32;
2275	if (irq_vec_idx >= 0)
2276		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2277	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2278		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2279	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2280	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2281}
2282
2283/**
2284 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2285 *	@adapter: the adapter to configure
2286 *	@id: the context id
2287 *	@base_addr: base address of queue
2288 *	@size: number of queue entries
2289 *	@rspq: response queue for async notifications
2290 *	@ovfl_mode: CQ overflow mode
2291 *	@credits: completion queue credits
2292 *	@credit_thres: the credit threshold
2293 *
2294 *	Initialize an SGE completion queue context and make it ready for use.
2295 *	The caller is responsible for ensuring only one context operation
2296 *	occurs at a time.
2297 */
2298int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2299			unsigned int size, int rspq, int ovfl_mode,
2300			unsigned int credits, unsigned int credit_thres)
2301{
2302	if (base_addr & 0xfff)	/* must be 4K aligned */
2303		return -EINVAL;
2304	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2305		return -EBUSY;
2306
2307	base_addr >>= 12;
2308	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2309	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2310	base_addr >>= 32;
2311	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2312		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2313		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2314		     V_CQ_ERR(ovfl_mode));
2315	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2316		     V_CQ_CREDIT_THRES(credit_thres));
2317	return t3_sge_write_context(adapter, id, F_CQ);
2318}
2319
2320/**
2321 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2322 *	@adapter: the adapter
2323 *	@id: the egress context id
2324 *	@enable: enable (1) or disable (0) the context
2325 *
2326 *	Enable or disable an SGE egress context.  The caller is responsible for
2327 *	ensuring only one context operation occurs at a time.
2328 */
2329int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2330{
2331	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2332		return -EBUSY;
2333
2334	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2335	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2336	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2337	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2338	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2339	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2340		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2341	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2342			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2343}
2344
2345/**
2346 *	t3_sge_disable_fl - disable an SGE free-buffer list
2347 *	@adapter: the adapter
2348 *	@id: the free list context id
2349 *
2350 *	Disable an SGE free-buffer list.  The caller is responsible for
2351 *	ensuring only one context operation occurs at a time.
2352 */
2353int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2354{
2355	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2356		return -EBUSY;
2357
2358	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2359	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2360	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2361	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2362	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2363	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2364		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2365	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2366			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2367}
2368
2369/**
2370 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2371 *	@adapter: the adapter
2372 *	@id: the response queue context id
2373 *
2374 *	Disable an SGE response queue.  The caller is responsible for
2375 *	ensuring only one context operation occurs at a time.
2376 */
2377int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2378{
2379	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2380		return -EBUSY;
2381
2382	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2383	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2384	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2385	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2386	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2387	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2388		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2389	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2390			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2391}
2392
2393/**
2394 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2395 *	@adapter: the adapter
2396 *	@id: the completion queue context id
2397 *
2398 *	Disable an SGE completion queue.  The caller is responsible for
2399 *	ensuring only one context operation occurs at a time.
2400 */
2401int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2402{
2403	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2404		return -EBUSY;
2405
2406	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2407	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2408	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2409	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2410	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2411	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2412		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2413	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2414			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2415}
2416
2417/**
2418 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2419 *	@adapter: the adapter
2420 *	@id: the context id
2421 *	@op: the operation to perform
2422 *	@credits: credit value to write
2423 *
2424 *	Perform the selected operation on an SGE completion queue context.
2425 *	The caller is responsible for ensuring only one context operation
2426 *	occurs at a time.
2427 */
2428int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2429		      unsigned int credits)
2430{
2431	u32 val;
2432
2433	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2434		return -EBUSY;
2435
2436	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2437	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2438		     V_CONTEXT(id) | F_CQ);
2439	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2440				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2441		return -EIO;
2442
2443	if (op >= 2 && op < 7) {
2444		if (adapter->params.rev > 0)
2445			return G_CQ_INDEX(val);
2446
2447		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2448			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2449		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2450				    F_CONTEXT_CMD_BUSY, 0,
2451				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2452			return -EIO;
2453		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2454	}
2455	return 0;
2456}
2457
2458/**
2459 *	t3_config_rss - configure Rx packet steering
2460 *	@adapter: the adapter
2461 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2462 *	@cpus: values for the CPU lookup table (0xff terminated)
2463 *	@rspq: values for the response queue lookup table (0xffff terminated)
2464 *
2465 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2466 *	the values for the CPU and response queue lookup tables.  If they
2467 *	provide fewer values than the size of the tables the supplied values
2468 *	are used repeatedly until the tables are fully populated.
2469 */
2470void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2471		   const u8 * cpus, const u16 *rspq)
2472{
2473	int i, j, cpu_idx = 0, q_idx = 0;
2474
2475	if (cpus)
2476		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2477			u32 val = i << 16;
2478
2479			for (j = 0; j < 2; ++j) {
2480				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2481				if (cpus[cpu_idx] == 0xff)
2482					cpu_idx = 0;
2483			}
2484			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2485		}
2486
2487	if (rspq)
2488		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2489			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2490				     (i << 16) | rspq[q_idx++]);
2491			if (rspq[q_idx] == 0xffff)
2492				q_idx = 0;
2493		}
2494
2495	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2496}
2497
2498/**
2499 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2500 *	@adap: the adapter
2501 *	@enable: 1 to select offload mode, 0 for regular NIC
2502 *
2503 *	Switches TP to NIC/offload mode.
2504 */
2505void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2506{
2507	if (is_offload(adap) || !enable)
2508		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2509				 V_NICMODE(!enable));
2510}
2511
2512/**
2513 *	pm_num_pages - calculate the number of pages of the payload memory
2514 *	@mem_size: the size of the payload memory
2515 *	@pg_size: the size of each payload memory page
2516 *
2517 *	Calculate the number of pages, each of the given size, that fit in a
2518 *	memory of the specified size, respecting the HW requirement that the
2519 *	number of pages must be a multiple of 24.
2520 */
2521static inline unsigned int pm_num_pages(unsigned int mem_size,
2522					unsigned int pg_size)
2523{
2524	unsigned int n = mem_size / pg_size;
2525
2526	return n - n % 24;
2527}
2528
2529#define mem_region(adap, start, size, reg) \
2530	t3_write_reg((adap), A_ ## reg, (start)); \
2531	start += size
2532
2533/**
2534 *	partition_mem - partition memory and configure TP memory settings
2535 *	@adap: the adapter
2536 *	@p: the TP parameters
2537 *
2538 *	Partitions context and payload memory and configures TP's memory
2539 *	registers.
2540 */
2541static void partition_mem(struct adapter *adap, const struct tp_params *p)
2542{
2543	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2544	unsigned int timers = 0, timers_shift = 22;
2545
2546	if (adap->params.rev > 0) {
2547		if (tids <= 16 * 1024) {
2548			timers = 1;
2549			timers_shift = 16;
2550		} else if (tids <= 64 * 1024) {
2551			timers = 2;
2552			timers_shift = 18;
2553		} else if (tids <= 256 * 1024) {
2554			timers = 3;
2555			timers_shift = 20;
2556		}
2557	}
2558
2559	t3_write_reg(adap, A_TP_PMM_SIZE,
2560		     p->chan_rx_size | (p->chan_tx_size >> 16));
2561
2562	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2563	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2564	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2565	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2566			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2567
2568	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2569	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2570	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2571
2572	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2573	/* Add a bit of headroom and make multiple of 24 */
2574	pstructs += 48;
2575	pstructs -= pstructs % 24;
2576	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2577
2578	m = tids * TCB_SIZE;
2579	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2580	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2581	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2582	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2583	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2584	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2585	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2586	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2587
2588	m = (m + 4095) & ~0xfff;
2589	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2590	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2591
2592	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2593	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2594	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2595	if (tids < m)
2596		adap->params.mc5.nservers += m - tids;
2597}
2598
2599static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2600				  u32 val)
2601{
2602	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2603	t3_write_reg(adap, A_TP_PIO_DATA, val);
2604}
2605
2606static void tp_config(struct adapter *adap, const struct tp_params *p)
2607{
2608	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2609		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2610		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2611	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2612		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2613		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2614	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2615		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2616		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2617		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2618	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2619			 F_IPV6ENABLE | F_NICMODE);
2620	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2621	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2622	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2623			 adap->params.rev > 0 ? F_ENABLEESND :
2624			 F_T3A_ENABLEESND);
2625
2626	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2627			 F_ENABLEEPCMDAFULL,
2628			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2629			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2630	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2631			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2632			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2633	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2634	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2635
2636	if (adap->params.rev > 0) {
2637		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2638		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2639				 F_TXPACEAUTO);
2640		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2641		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2642	} else
2643		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2644
2645	if (adap->params.rev == T3_REV_C)
2646		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2647				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2648				 V_TABLELATENCYDELTA(4));
2649
2650	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2651	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2652	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2653	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2654}
2655
2656/* Desired TP timer resolution in usec */
2657#define TP_TMR_RES 50
2658
2659/* TCP timer values in ms */
2660#define TP_DACK_TIMER 50
2661#define TP_RTO_MIN    250
2662
2663/**
2664 *	tp_set_timers - set TP timing parameters
2665 *	@adap: the adapter to set
2666 *	@core_clk: the core clock frequency in Hz
2667 *
2668 *	Set TP's timing parameters, such as the various timer resolutions and
2669 *	the TCP timer values.
2670 */
2671static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2672{
2673	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2674	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2675	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2676	unsigned int tps = core_clk >> tre;
2677
2678	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2679		     V_DELAYEDACKRESOLUTION(dack_re) |
2680		     V_TIMESTAMPRESOLUTION(tstamp_re));
2681	t3_write_reg(adap, A_TP_DACK_TIMER,
2682		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2683	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2684	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2685	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2686	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2687	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2688		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2689		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2690		     V_KEEPALIVEMAX(9));
2691
2692#define SECONDS * tps
2693
2694	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2695	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2696	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2697	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2698	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2699	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2700	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2701	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2702	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2703
2704#undef SECONDS
2705}
2706
2707/**
2708 *	t3_tp_set_coalescing_size - set receive coalescing size
2709 *	@adap: the adapter
2710 *	@size: the receive coalescing size
2711 *	@psh: whether a set PSH bit should deliver coalesced data
2712 *
2713 *	Set the receive coalescing size and PSH bit handling.
2714 */
2715static int t3_tp_set_coalescing_size(struct adapter *adap,
2716				     unsigned int size, int psh)
2717{
2718	u32 val;
2719
2720	if (size > MAX_RX_COALESCING_LEN)
2721		return -EINVAL;
2722
2723	val = t3_read_reg(adap, A_TP_PARA_REG3);
2724	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2725
2726	if (size) {
2727		val |= F_RXCOALESCEENABLE;
2728		if (psh)
2729			val |= F_RXCOALESCEPSHEN;
2730		size = min(MAX_RX_COALESCING_LEN, size);
2731		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2732			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2733	}
2734	t3_write_reg(adap, A_TP_PARA_REG3, val);
2735	return 0;
2736}
2737
2738/**
2739 *	t3_tp_set_max_rxsize - set the max receive size
2740 *	@adap: the adapter
2741 *	@size: the max receive size
2742 *
2743 *	Set TP's max receive size.  This is the limit that applies when
2744 *	receive coalescing is disabled.
2745 */
2746static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2747{
2748	t3_write_reg(adap, A_TP_PARA_REG7,
2749		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2750}
2751
2752static void init_mtus(unsigned short mtus[])
2753{
2754	/*
2755	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2756	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2757	 * are enabled and still have at least 8 bytes of payload.
2758	 */
2759	mtus[0] = 88;
2760	mtus[1] = 88;
2761	mtus[2] = 256;
2762	mtus[3] = 512;
2763	mtus[4] = 576;
2764	mtus[5] = 1024;
2765	mtus[6] = 1280;
2766	mtus[7] = 1492;
2767	mtus[8] = 1500;
2768	mtus[9] = 2002;
2769	mtus[10] = 2048;
2770	mtus[11] = 4096;
2771	mtus[12] = 4352;
2772	mtus[13] = 8192;
2773	mtus[14] = 9000;
2774	mtus[15] = 9600;
2775}
2776
2777/*
2778 * Initial congestion control parameters.
2779 */
2780static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2781{
2782	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2783	a[9] = 2;
2784	a[10] = 3;
2785	a[11] = 4;
2786	a[12] = 5;
2787	a[13] = 6;
2788	a[14] = 7;
2789	a[15] = 8;
2790	a[16] = 9;
2791	a[17] = 10;
2792	a[18] = 14;
2793	a[19] = 17;
2794	a[20] = 21;
2795	a[21] = 25;
2796	a[22] = 30;
2797	a[23] = 35;
2798	a[24] = 45;
2799	a[25] = 60;
2800	a[26] = 80;
2801	a[27] = 100;
2802	a[28] = 200;
2803	a[29] = 300;
2804	a[30] = 400;
2805	a[31] = 500;
2806
2807	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2808	b[9] = b[10] = 1;
2809	b[11] = b[12] = 2;
2810	b[13] = b[14] = b[15] = b[16] = 3;
2811	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2812	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2813	b[28] = b[29] = 6;
2814	b[30] = b[31] = 7;
2815}
2816
2817/* The minimum additive increment value for the congestion control table */
2818#define CC_MIN_INCR 2U
2819
2820/**
2821 *	t3_load_mtus - write the MTU and congestion control HW tables
2822 *	@adap: the adapter
2823 *	@mtus: the unrestricted values for the MTU table
2824 *	@alpha: the values for the congestion control alpha parameter
2825 *	@beta: the values for the congestion control beta parameter
2826 *	@mtu_cap: the maximum permitted effective MTU
2827 *
2828 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2829 *	Update the high-speed congestion control table with the supplied alpha,
2830 * 	beta, and MTUs.
2831 */
2832void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2833		  unsigned short alpha[NCCTRL_WIN],
2834		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2835{
2836	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2837		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2838		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2839		28672, 40960, 57344, 81920, 114688, 163840, 229376
2840	};
2841
2842	unsigned int i, w;
2843
2844	for (i = 0; i < NMTUS; ++i) {
2845		unsigned int mtu = min(mtus[i], mtu_cap);
2846		unsigned int log2 = fls(mtu);
2847
2848		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2849			log2--;
2850		t3_write_reg(adap, A_TP_MTU_TABLE,
2851			     (i << 24) | (log2 << 16) | mtu);
2852
2853		for (w = 0; w < NCCTRL_WIN; ++w) {
2854			unsigned int inc;
2855
2856			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2857				  CC_MIN_INCR);
2858
2859			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2860				     (w << 16) | (beta[w] << 13) | inc);
2861		}
2862	}
2863}
2864
2865/**
2866 *	t3_tp_get_mib_stats - read TP's MIB counters
2867 *	@adap: the adapter
2868 *	@tps: holds the returned counter values
2869 *
2870 *	Returns the values of TP's MIB counters.
2871 */
2872void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2873{
2874	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2875			 sizeof(*tps) / sizeof(u32), 0);
2876}
2877
2878#define ulp_region(adap, name, start, len) \
2879	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2880	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2881		     (start) + (len) - 1); \
2882	start += len
2883
2884#define ulptx_region(adap, name, start, len) \
2885	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2886	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2887		     (start) + (len) - 1)
2888
2889static void ulp_config(struct adapter *adap, const struct tp_params *p)
2890{
2891	unsigned int m = p->chan_rx_size;
2892
2893	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2894	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2895	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2896	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2897	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2898	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2899	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2900	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2901}
2902
2903/**
2904 *	t3_set_proto_sram - set the contents of the protocol sram
2905 *	@adap: the adapter
2906 *	@data: the protocol image
2907 *
2908 *	Write the contents of the protocol SRAM.
2909 */
2910int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2911{
2912	int i;
2913	const __be32 *buf = (const __be32 *)data;
2914
2915	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2916		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2917		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2918		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2919		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2920		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2921
2922		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2923		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2924			return -EIO;
2925	}
2926	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2927
2928	return 0;
2929}
2930
2931void t3_config_trace_filter(struct adapter *adapter,
2932			    const struct trace_params *tp, int filter_index,
2933			    int invert, int enable)
2934{
2935	u32 addr, key[4], mask[4];
2936
2937	key[0] = tp->sport | (tp->sip << 16);
2938	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2939	key[2] = tp->dip;
2940	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2941
2942	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2943	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2944	mask[2] = tp->dip_mask;
2945	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2946
2947	if (invert)
2948		key[3] |= (1 << 29);
2949	if (enable)
2950		key[3] |= (1 << 28);
2951
2952	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2953	tp_wr_indirect(adapter, addr++, key[0]);
2954	tp_wr_indirect(adapter, addr++, mask[0]);
2955	tp_wr_indirect(adapter, addr++, key[1]);
2956	tp_wr_indirect(adapter, addr++, mask[1]);
2957	tp_wr_indirect(adapter, addr++, key[2]);
2958	tp_wr_indirect(adapter, addr++, mask[2]);
2959	tp_wr_indirect(adapter, addr++, key[3]);
2960	tp_wr_indirect(adapter, addr, mask[3]);
2961	t3_read_reg(adapter, A_TP_PIO_DATA);
2962}
2963
2964/**
2965 *	t3_config_sched - configure a HW traffic scheduler
2966 *	@adap: the adapter
2967 *	@kbps: target rate in Kbps
2968 *	@sched: the scheduler index
2969 *
2970 *	Configure a HW scheduler for the target rate
2971 */
2972int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2973{
2974	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2975	unsigned int clk = adap->params.vpd.cclk * 1000;
2976	unsigned int selected_cpt = 0, selected_bpt = 0;
2977
2978	if (kbps > 0) {
2979		kbps *= 125;	/* -> bytes */
2980		for (cpt = 1; cpt <= 255; cpt++) {
2981			tps = clk / cpt;
2982			bpt = (kbps + tps / 2) / tps;
2983			if (bpt > 0 && bpt <= 255) {
2984				v = bpt * tps;
2985				delta = v >= kbps ? v - kbps : kbps - v;
2986				if (delta <= mindelta) {
2987					mindelta = delta;
2988					selected_cpt = cpt;
2989					selected_bpt = bpt;
2990				}
2991			} else if (selected_cpt)
2992				break;
2993		}
2994		if (!selected_cpt)
2995			return -EINVAL;
2996	}
2997	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2998		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2999	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3000	if (sched & 1)
3001		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3002	else
3003		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3004	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3005	return 0;
3006}
3007
3008static int tp_init(struct adapter *adap, const struct tp_params *p)
3009{
3010	int busy = 0;
3011
3012	tp_config(adap, p);
3013	t3_set_vlan_accel(adap, 3, 0);
3014
3015	if (is_offload(adap)) {
3016		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3017		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3018		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3019				       0, 1000, 5);
3020		if (busy)
3021			CH_ERR(adap, "TP initialization timed out\n");
3022	}
3023
3024	if (!busy)
3025		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3026	return busy;
3027}
3028
3029/*
3030 * Perform the bits of HW initialization that are dependent on the Tx
3031 * channels being used.
3032 */
3033static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3034{
3035	int i;
3036
3037	if (chan_map != 3) {                                 /* one channel */
3038		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3039		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3040		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3041			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3042					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3043		t3_write_reg(adap, A_PM1_TX_CFG,
3044			     chan_map == 1 ? 0xffffffff : 0);
3045	} else {                                             /* two channels */
3046		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3047		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3048		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3049			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3050		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3051			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3052			     F_ENFORCEPKT);
3053		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3054		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3055		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3056			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3057		for (i = 0; i < 16; i++)
3058			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3059				     (i << 16) | 0x1010);
3060	}
3061}
3062
3063static int calibrate_xgm(struct adapter *adapter)
3064{
3065	if (uses_xaui(adapter)) {
3066		unsigned int v, i;
3067
3068		for (i = 0; i < 5; ++i) {
3069			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3070			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3071			msleep(1);
3072			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3073			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3074				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3075					     V_XAUIIMP(G_CALIMP(v) >> 2));
3076				return 0;
3077			}
3078		}
3079		CH_ERR(adapter, "MAC calibration failed\n");
3080		return -1;
3081	} else {
3082		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3083			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3084		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3085				 F_XGM_IMPSETUPDATE);
3086	}
3087	return 0;
3088}
3089
3090static void calibrate_xgm_t3b(struct adapter *adapter)
3091{
3092	if (!uses_xaui(adapter)) {
3093		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3094			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3095		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3096		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3097				 F_XGM_IMPSETUPDATE);
3098		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3099				 0);
3100		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3101		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3102	}
3103}
3104
3105struct mc7_timing_params {
3106	unsigned char ActToPreDly;
3107	unsigned char ActToRdWrDly;
3108	unsigned char PreCyc;
3109	unsigned char RefCyc[5];
3110	unsigned char BkCyc;
3111	unsigned char WrToRdDly;
3112	unsigned char RdToWrDly;
3113};
3114
3115/*
3116 * Write a value to a register and check that the write completed.  These
3117 * writes normally complete in a cycle or two, so one read should suffice.
3118 * The very first read exists to flush the posted write to the device.
3119 */
3120static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3121{
3122	t3_write_reg(adapter, addr, val);
3123	t3_read_reg(adapter, addr);	/* flush */
3124	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3125		return 0;
3126	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3127	return -EIO;
3128}
3129
3130static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3131{
3132	static const unsigned int mc7_mode[] = {
3133		0x632, 0x642, 0x652, 0x432, 0x442
3134	};
3135	static const struct mc7_timing_params mc7_timings[] = {
3136		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3137		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3138		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3139		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3140		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3141	};
3142
3143	u32 val;
3144	unsigned int width, density, slow, attempts;
3145	struct adapter *adapter = mc7->adapter;
3146	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3147
3148	if (!mc7->size)
3149		return 0;
3150
3151	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3152	slow = val & F_SLOW;
3153	width = G_WIDTH(val);
3154	density = G_DEN(val);
3155
3156	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3157	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3158	msleep(1);
3159
3160	if (!slow) {
3161		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3162		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3163		msleep(1);
3164		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3165		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3166			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3167			       mc7->name);
3168			goto out_fail;
3169		}
3170	}
3171
3172	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3173		     V_ACTTOPREDLY(p->ActToPreDly) |
3174		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3175		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3176		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3177
3178	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3179		     val | F_CLKEN | F_TERM150);
3180	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3181
3182	if (!slow)
3183		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3184				 F_DLLENB);
3185	udelay(1);
3186
3187	val = slow ? 3 : 6;
3188	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3189	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3190	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3191	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3192		goto out_fail;
3193
3194	if (!slow) {
3195		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3196		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3197		udelay(5);
3198	}
3199
3200	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3201	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3202	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3203	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3204		       mc7_mode[mem_type]) ||
3205	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3206	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3207		goto out_fail;
3208
3209	/* clock value is in KHz */
3210	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3211	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3212
3213	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3214		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3215	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3216
3217	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3218	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3219	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3220	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3221		     (mc7->size << width) - 1);
3222	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3223	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3224
3225	attempts = 50;
3226	do {
3227		msleep(250);
3228		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3229	} while ((val & F_BUSY) && --attempts);
3230	if (val & F_BUSY) {
3231		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3232		goto out_fail;
3233	}
3234
3235	/* Enable normal memory accesses. */
3236	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3237	return 0;
3238
3239out_fail:
3240	return -1;
3241}
3242
3243static void config_pcie(struct adapter *adap)
3244{
3245	static const u16 ack_lat[4][6] = {
3246		{237, 416, 559, 1071, 2095, 4143},
3247		{128, 217, 289, 545, 1057, 2081},
3248		{73, 118, 154, 282, 538, 1050},
3249		{67, 107, 86, 150, 278, 534}
3250	};
3251	static const u16 rpl_tmr[4][6] = {
3252		{711, 1248, 1677, 3213, 6285, 12429},
3253		{384, 651, 867, 1635, 3171, 6243},
3254		{219, 354, 462, 846, 1614, 3150},
3255		{201, 321, 258, 450, 834, 1602}
3256	};
3257
3258	u16 val, devid;
3259	unsigned int log2_width, pldsize;
3260	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3261
3262	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3263	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3264
3265	pci_read_config_word(adap->pdev, 0x2, &devid);
3266	if (devid == 0x37) {
3267		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3268					   val & ~PCI_EXP_DEVCTL_READRQ &
3269					   ~PCI_EXP_DEVCTL_PAYLOAD);
3270		pldsize = 0;
3271	}
3272
3273	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3274
3275	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3276	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3277	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3278	log2_width = fls(adap->params.pci.width) - 1;
3279	acklat = ack_lat[log2_width][pldsize];
3280	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3281		acklat += fst_trn_tx * 4;
3282	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3283
3284	if (adap->params.rev == 0)
3285		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3286				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3287				 V_T3A_ACKLAT(acklat));
3288	else
3289		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3290				 V_ACKLAT(acklat));
3291
3292	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3293			 V_REPLAYLMT(rpllmt));
3294
3295	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3296	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3297			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3298			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3299}
3300
3301/*
3302 * Initialize and configure T3 HW modules.  This performs the
3303 * initialization steps that need to be done once after a card is reset.
3304 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3305 *
3306 * fw_params are passed to FW and their value is platform dependent.  Only the
3307 * top 8 bits are available for use, the rest must be 0.
3308 */
3309int t3_init_hw(struct adapter *adapter, u32 fw_params)
3310{
3311	int err = -EIO, attempts, i;
3312	const struct vpd_params *vpd = &adapter->params.vpd;
3313
3314	if (adapter->params.rev > 0)
3315		calibrate_xgm_t3b(adapter);
3316	else if (calibrate_xgm(adapter))
3317		goto out_err;
3318
3319	if (vpd->mclk) {
3320		partition_mem(adapter, &adapter->params.tp);
3321
3322		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3323		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3324		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3325		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3326				adapter->params.mc5.nfilters,
3327				adapter->params.mc5.nroutes))
3328			goto out_err;
3329
3330		for (i = 0; i < 32; i++)
3331			if (clear_sge_ctxt(adapter, i, F_CQ))
3332				goto out_err;
3333	}
3334
3335	if (tp_init(adapter, &adapter->params.tp))
3336		goto out_err;
3337
3338	t3_tp_set_coalescing_size(adapter,
3339				  min(adapter->params.sge.max_pkt_size,
3340				      MAX_RX_COALESCING_LEN), 1);
3341	t3_tp_set_max_rxsize(adapter,
3342			     min(adapter->params.sge.max_pkt_size, 16384U));
3343	ulp_config(adapter, &adapter->params.tp);
3344
3345	if (is_pcie(adapter))
3346		config_pcie(adapter);
3347	else
3348		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3349				 F_DMASTOPEN | F_CLIDECEN);
3350
3351	if (adapter->params.rev == T3_REV_C)
3352		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3353				 F_CFG_CQE_SOP_MASK);
3354
3355	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3356	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3357	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3358	chan_init_hw(adapter, adapter->params.chan_map);
3359	t3_sge_init(adapter, &adapter->params.sge);
3360	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3361
3362	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3363
3364	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3365	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3366		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3367	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3368
3369	attempts = 100;
3370	do {			/* wait for uP to initialize */
3371		msleep(20);
3372	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3373	if (!attempts) {
3374		CH_ERR(adapter, "uP initialization timed out\n");
3375		goto out_err;
3376	}
3377
3378	err = 0;
3379out_err:
3380	return err;
3381}
3382
3383/**
3384 *	get_pci_mode - determine a card's PCI mode
3385 *	@adapter: the adapter
3386 *	@p: where to store the PCI settings
3387 *
3388 *	Determines a card's PCI mode and associated parameters, such as speed
3389 *	and width.
3390 */
3391static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3392{
3393	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3394	u32 pci_mode;
3395
3396	if (pci_is_pcie(adapter->pdev)) {
3397		u16 val;
3398
3399		p->variant = PCI_VARIANT_PCIE;
3400		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3401		p->width = (val >> 4) & 0x3f;
3402		return;
3403	}
3404
3405	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3406	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3407	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3408	pci_mode = G_PCIXINITPAT(pci_mode);
3409	if (pci_mode == 0)
3410		p->variant = PCI_VARIANT_PCI;
3411	else if (pci_mode < 4)
3412		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3413	else if (pci_mode < 8)
3414		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3415	else
3416		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3417}
3418
3419/**
3420 *	init_link_config - initialize a link's SW state
3421 *	@lc: structure holding the link state
3422 *	@caps: information about the current card
3423 *
3424 *	Initializes the SW state maintained for each link, including the link's
3425 *	capabilities and default speed/duplex/flow-control/autonegotiation
3426 *	settings.
3427 */
3428static void init_link_config(struct link_config *lc, unsigned int caps)
3429{
3430	lc->supported = caps;
3431	lc->requested_speed = lc->speed = SPEED_INVALID;
3432	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3433	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3434	if (lc->supported & SUPPORTED_Autoneg) {
3435		lc->advertising = lc->supported;
3436		lc->autoneg = AUTONEG_ENABLE;
3437		lc->requested_fc |= PAUSE_AUTONEG;
3438	} else {
3439		lc->advertising = 0;
3440		lc->autoneg = AUTONEG_DISABLE;
3441	}
3442}
3443
3444/**
3445 *	mc7_calc_size - calculate MC7 memory size
3446 *	@cfg: the MC7 configuration
3447 *
3448 *	Calculates the size of an MC7 memory in bytes from the value of its
3449 *	configuration register.
3450 */
3451static unsigned int mc7_calc_size(u32 cfg)
3452{
3453	unsigned int width = G_WIDTH(cfg);
3454	unsigned int banks = !!(cfg & F_BKS) + 1;
3455	unsigned int org = !!(cfg & F_ORG) + 1;
3456	unsigned int density = G_DEN(cfg);
3457	unsigned int MBs = ((256 << density) * banks) / (org << width);
3458
3459	return MBs << 20;
3460}
3461
3462static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3463		     unsigned int base_addr, const char *name)
3464{
3465	u32 cfg;
3466
3467	mc7->adapter = adapter;
3468	mc7->name = name;
3469	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3470	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3471	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3472	mc7->width = G_WIDTH(cfg);
3473}
3474
3475static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3476{
3477	u16 devid;
3478
3479	mac->adapter = adapter;
3480	pci_read_config_word(adapter->pdev, 0x2, &devid);
3481
3482	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3483		index = 0;
3484	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3485	mac->nucast = 1;
3486
3487	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3488		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3489			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3490		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3491				 F_ENRGMII, 0);
3492	}
3493}
3494
3495static void early_hw_init(struct adapter *adapter,
3496			  const struct adapter_info *ai)
3497{
3498	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3499
3500	mi1_init(adapter, ai);
3501	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3502		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3503	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3504		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3505	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3506	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3507
3508	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3509		val |= F_ENRGMII;
3510
3511	/* Enable MAC clocks so we can access the registers */
3512	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3513	t3_read_reg(adapter, A_XGM_PORT_CFG);
3514
3515	val |= F_CLKDIVRESET_;
3516	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3517	t3_read_reg(adapter, A_XGM_PORT_CFG);
3518	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3519	t3_read_reg(adapter, A_XGM_PORT_CFG);
3520}
3521
3522/*
3523 * Reset the adapter.
3524 * Older PCIe cards lose their config space during reset, PCI-X
3525 * ones don't.
3526 */
3527int t3_reset_adapter(struct adapter *adapter)
3528{
3529	int i, save_and_restore_pcie =
3530	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3531	uint16_t devid = 0;
3532
3533	if (save_and_restore_pcie)
3534		pci_save_state(adapter->pdev);
3535	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3536
3537	/*
3538	 * Delay. Give Some time to device to reset fully.
3539	 * XXX The delay time should be modified.
3540	 */
3541	for (i = 0; i < 10; i++) {
3542		msleep(50);
3543		pci_read_config_word(adapter->pdev, 0x00, &devid);
3544		if (devid == 0x1425)
3545			break;
3546	}
3547
3548	if (devid != 0x1425)
3549		return -1;
3550
3551	if (save_and_restore_pcie)
3552		pci_restore_state(adapter->pdev);
3553	return 0;
3554}
3555
3556static int init_parity(struct adapter *adap)
3557{
3558	int i, err, addr;
3559
3560	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3561		return -EBUSY;
3562
3563	for (err = i = 0; !err && i < 16; i++)
3564		err = clear_sge_ctxt(adap, i, F_EGRESS);
3565	for (i = 0xfff0; !err && i <= 0xffff; i++)
3566		err = clear_sge_ctxt(adap, i, F_EGRESS);
3567	for (i = 0; !err && i < SGE_QSETS; i++)
3568		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3569	if (err)
3570		return err;
3571
3572	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3573	for (i = 0; i < 4; i++)
3574		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3575			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3576				     F_IBQDBGWR | V_IBQDBGQID(i) |
3577				     V_IBQDBGADDR(addr));
3578			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3579					      F_IBQDBGBUSY, 0, 2, 1);
3580			if (err)
3581				return err;
3582		}
3583	return 0;
3584}
3585
3586/*
3587 * Initialize adapter SW state for the various HW modules, set initial values
3588 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3589 * interface.
3590 */
3591int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3592		    int reset)
3593{
3594	int ret;
3595	unsigned int i, j = -1;
3596
3597	get_pci_mode(adapter, &adapter->params.pci);
3598
3599	adapter->params.info = ai;
3600	adapter->params.nports = ai->nports0 + ai->nports1;
3601	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3602	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3603	/*
3604	 * We used to only run the "adapter check task" once a second if
3605	 * we had PHYs which didn't support interrupts (we would check
3606	 * their link status once a second).  Now we check other conditions
3607	 * in that routine which could potentially impose a very high
3608	 * interrupt load on the system.  As such, we now always scan the
3609	 * adapter state once a second ...
3610	 */
3611	adapter->params.linkpoll_period = 10;
3612	adapter->params.stats_update_period = is_10G(adapter) ?
3613	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3614	adapter->params.pci.vpd_cap_addr =
3615	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3616	if (!adapter->params.pci.vpd_cap_addr)
3617		return -ENODEV;
3618	ret = get_vpd_params(adapter, &adapter->params.vpd);
3619	if (ret < 0)
3620		return ret;
3621
3622	if (reset && t3_reset_adapter(adapter))
3623		return -1;
3624
3625	t3_sge_prep(adapter, &adapter->params.sge);
3626
3627	if (adapter->params.vpd.mclk) {
3628		struct tp_params *p = &adapter->params.tp;
3629
3630		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3631		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3632		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3633
3634		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3635		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3636		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3637		p->cm_size = t3_mc7_size(&adapter->cm);
3638		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3639		p->chan_tx_size = p->pmtx_size / p->nchan;
3640		p->rx_pg_size = 64 * 1024;
3641		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3642		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3643		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3644		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3645		    adapter->params.rev > 0 ? 12 : 6;
3646	}
3647
3648	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3649				  t3_mc7_size(&adapter->pmtx) &&
3650				  t3_mc7_size(&adapter->cm);
3651
3652	if (is_offload(adapter)) {
3653		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3654		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3655		    DEFAULT_NFILTERS : 0;
3656		adapter->params.mc5.nroutes = 0;
3657		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3658
3659		init_mtus(adapter->params.mtus);
3660		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3661	}
3662
3663	early_hw_init(adapter, ai);
3664	ret = init_parity(adapter);
3665	if (ret)
3666		return ret;
3667
3668	for_each_port(adapter, i) {
3669		u8 hw_addr[6];
3670		const struct port_type_info *pti;
3671		struct port_info *p = adap2pinfo(adapter, i);
3672
3673		while (!adapter->params.vpd.port_type[++j])
3674			;
3675
3676		pti = &port_types[adapter->params.vpd.port_type[j]];
3677		if (!pti->phy_prep) {
3678			CH_ALERT(adapter, "Invalid port type index %d\n",
3679				 adapter->params.vpd.port_type[j]);
3680			return -EINVAL;
3681		}
3682
3683		p->phy.mdio.dev = adapter->port[i];
3684		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3685				    ai->mdio_ops);
3686		if (ret)
3687			return ret;
3688		mac_prep(&p->mac, adapter, j);
3689
3690		/*
3691		 * The VPD EEPROM stores the base Ethernet address for the
3692		 * card.  A port's address is derived from the base by adding
3693		 * the port's index to the base's low octet.
3694		 */
3695		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3696		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3697
3698		eth_hw_addr_set(adapter->port[i], hw_addr);
 
3699		init_link_config(&p->link_config, p->phy.caps);
3700		p->phy.ops->power_down(&p->phy, 1);
3701
3702		/*
3703		 * If the PHY doesn't support interrupts for link status
3704		 * changes, schedule a scan of the adapter links at least
3705		 * once a second.
3706		 */
3707		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3708		    adapter->params.linkpoll_period > 10)
3709			adapter->params.linkpoll_period = 10;
3710	}
3711
3712	return 0;
3713}
3714
3715void t3_led_ready(struct adapter *adapter)
3716{
3717	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3718			 F_GPIO0_OUT_VAL);
3719}
3720
3721int t3_replay_prep_adapter(struct adapter *adapter)
3722{
3723	const struct adapter_info *ai = adapter->params.info;
3724	unsigned int i, j = -1;
3725	int ret;
3726
3727	early_hw_init(adapter, ai);
3728	ret = init_parity(adapter);
3729	if (ret)
3730		return ret;
3731
3732	for_each_port(adapter, i) {
3733		const struct port_type_info *pti;
3734		struct port_info *p = adap2pinfo(adapter, i);
3735
3736		while (!adapter->params.vpd.port_type[++j])
3737			;
3738
3739		pti = &port_types[adapter->params.vpd.port_type[j]];
3740		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3741		if (ret)
3742			return ret;
3743		p->phy.ops->power_down(&p->phy, 1);
3744	}
3745
3746	return 0;
3747}
3748
v3.15
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
 
  32#include "common.h"
  33#include "regs.h"
  34#include "sge_defs.h"
  35#include "firmware_exports.h"
  36
  37static void t3_port_intr_clear(struct adapter *adapter, int idx);
  38
  39/**
  40 *	t3_wait_op_done_val - wait until an operation is completed
  41 *	@adapter: the adapter performing the operation
  42 *	@reg: the register to check for completion
  43 *	@mask: a single-bit field within @reg that indicates completion
  44 *	@polarity: the value of the field when the operation is completed
  45 *	@attempts: number of check iterations
  46 *	@delay: delay in usecs between iterations
  47 *	@valp: where to store the value of the register at completion time
  48 *
  49 *	Wait until an operation is completed by checking a bit in a register
  50 *	up to @attempts times.  If @valp is not NULL the value of the register
  51 *	at the time it indicated completion is stored there.  Returns 0 if the
  52 *	operation completes and -EAGAIN otherwise.
  53 */
  54
  55int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  56			int polarity, int attempts, int delay, u32 *valp)
  57{
  58	while (1) {
  59		u32 val = t3_read_reg(adapter, reg);
  60
  61		if (!!(val & mask) == polarity) {
  62			if (valp)
  63				*valp = val;
  64			return 0;
  65		}
  66		if (--attempts == 0)
  67			return -EAGAIN;
  68		if (delay)
  69			udelay(delay);
  70	}
  71}
  72
  73/**
  74 *	t3_write_regs - write a bunch of registers
  75 *	@adapter: the adapter to program
  76 *	@p: an array of register address/register value pairs
  77 *	@n: the number of address/value pairs
  78 *	@offset: register address offset
  79 *
  80 *	Takes an array of register address/register value pairs and writes each
  81 *	value to the corresponding register.  Register addresses are adjusted
  82 *	by the supplied offset.
  83 */
  84void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
  85		   int n, unsigned int offset)
  86{
  87	while (n--) {
  88		t3_write_reg(adapter, p->reg_addr + offset, p->val);
  89		p++;
  90	}
  91}
  92
  93/**
  94 *	t3_set_reg_field - set a register field to a value
  95 *	@adapter: the adapter to program
  96 *	@addr: the register address
  97 *	@mask: specifies the portion of the register to modify
  98 *	@val: the new value for the register field
  99 *
 100 *	Sets a register field specified by the supplied mask to the
 101 *	given value.
 102 */
 103void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
 104		      u32 val)
 105{
 106	u32 v = t3_read_reg(adapter, addr) & ~mask;
 107
 108	t3_write_reg(adapter, addr, v | val);
 109	t3_read_reg(adapter, addr);	/* flush */
 110}
 111
 112/**
 113 *	t3_read_indirect - read indirectly addressed registers
 114 *	@adap: the adapter
 115 *	@addr_reg: register holding the indirect address
 116 *	@data_reg: register holding the value of the indirect register
 117 *	@vals: where the read register values are stored
 118 *	@start_idx: index of first indirect register to read
 119 *	@nregs: how many indirect registers to read
 120 *
 121 *	Reads registers that are accessed indirectly through an address/data
 122 *	register pair.
 123 */
 124static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
 125			     unsigned int data_reg, u32 *vals,
 126			     unsigned int nregs, unsigned int start_idx)
 127{
 128	while (nregs--) {
 129		t3_write_reg(adap, addr_reg, start_idx);
 130		*vals++ = t3_read_reg(adap, data_reg);
 131		start_idx++;
 132	}
 133}
 134
 135/**
 136 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
 137 *	@mc7: identifies MC7 to read from
 138 *	@start: index of first 64-bit word to read
 139 *	@n: number of 64-bit words to read
 140 *	@buf: where to store the read result
 141 *
 142 *	Read n 64-bit words from MC7 starting at word start, using backdoor
 143 *	accesses.
 144 */
 145int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
 146		   u64 *buf)
 147{
 148	static const int shift[] = { 0, 0, 16, 24 };
 149	static const int step[] = { 0, 32, 16, 8 };
 150
 151	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
 152	struct adapter *adap = mc7->adapter;
 153
 154	if (start >= size64 || start + n > size64)
 155		return -EINVAL;
 156
 157	start *= (8 << mc7->width);
 158	while (n--) {
 159		int i;
 160		u64 val64 = 0;
 161
 162		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
 163			int attempts = 10;
 164			u32 val;
 165
 166			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
 167			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
 168			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
 169			while ((val & F_BUSY) && attempts--)
 170				val = t3_read_reg(adap,
 171						  mc7->offset + A_MC7_BD_OP);
 172			if (val & F_BUSY)
 173				return -EIO;
 174
 175			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
 176			if (mc7->width == 0) {
 177				val64 = t3_read_reg(adap,
 178						    mc7->offset +
 179						    A_MC7_BD_DATA0);
 180				val64 |= (u64) val << 32;
 181			} else {
 182				if (mc7->width > 1)
 183					val >>= shift[mc7->width];
 184				val64 |= (u64) val << (step[mc7->width] * i);
 185			}
 186			start += 8;
 187		}
 188		*buf++ = val64;
 189	}
 190	return 0;
 191}
 192
 193/*
 194 * Initialize MI1.
 195 */
 196static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
 197{
 198	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
 199	u32 val = F_PREEN | V_CLKDIV(clkdiv);
 200
 201	t3_write_reg(adap, A_MI1_CFG, val);
 202}
 203
 204#define MDIO_ATTEMPTS 20
 205
 206/*
 207 * MI1 read/write operations for clause 22 PHYs.
 208 */
 209static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
 210		       u16 reg_addr)
 211{
 212	struct port_info *pi = netdev_priv(dev);
 213	struct adapter *adapter = pi->adapter;
 214	int ret;
 215	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 216
 217	mutex_lock(&adapter->mdio_lock);
 218	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 219	t3_write_reg(adapter, A_MI1_ADDR, addr);
 220	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
 221	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 222	if (!ret)
 223		ret = t3_read_reg(adapter, A_MI1_DATA);
 224	mutex_unlock(&adapter->mdio_lock);
 225	return ret;
 226}
 227
 228static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
 229			u16 reg_addr, u16 val)
 230{
 231	struct port_info *pi = netdev_priv(dev);
 232	struct adapter *adapter = pi->adapter;
 233	int ret;
 234	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 235
 236	mutex_lock(&adapter->mdio_lock);
 237	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 238	t3_write_reg(adapter, A_MI1_ADDR, addr);
 239	t3_write_reg(adapter, A_MI1_DATA, val);
 240	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 241	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 242	mutex_unlock(&adapter->mdio_lock);
 243	return ret;
 244}
 245
 246static const struct mdio_ops mi1_mdio_ops = {
 247	.read = t3_mi1_read,
 248	.write = t3_mi1_write,
 249	.mode_support = MDIO_SUPPORTS_C22
 250};
 251
 252/*
 253 * Performs the address cycle for clause 45 PHYs.
 254 * Must be called with the MDIO_LOCK held.
 255 */
 256static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
 257		       int reg_addr)
 258{
 259	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
 260
 261	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
 262	t3_write_reg(adapter, A_MI1_ADDR, addr);
 263	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
 264	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
 265	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 266			       MDIO_ATTEMPTS, 10);
 267}
 268
 269/*
 270 * MI1 read/write operations for indirect-addressed PHYs.
 271 */
 272static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
 273			u16 reg_addr)
 274{
 275	struct port_info *pi = netdev_priv(dev);
 276	struct adapter *adapter = pi->adapter;
 277	int ret;
 278
 279	mutex_lock(&adapter->mdio_lock);
 280	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 281	if (!ret) {
 282		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
 283		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 284				      MDIO_ATTEMPTS, 10);
 285		if (!ret)
 286			ret = t3_read_reg(adapter, A_MI1_DATA);
 287	}
 288	mutex_unlock(&adapter->mdio_lock);
 289	return ret;
 290}
 291
 292static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
 293			 u16 reg_addr, u16 val)
 294{
 295	struct port_info *pi = netdev_priv(dev);
 296	struct adapter *adapter = pi->adapter;
 297	int ret;
 298
 299	mutex_lock(&adapter->mdio_lock);
 300	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 301	if (!ret) {
 302		t3_write_reg(adapter, A_MI1_DATA, val);
 303		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 304		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 305				      MDIO_ATTEMPTS, 10);
 306	}
 307	mutex_unlock(&adapter->mdio_lock);
 308	return ret;
 309}
 310
 311static const struct mdio_ops mi1_mdio_ext_ops = {
 312	.read = mi1_ext_read,
 313	.write = mi1_ext_write,
 314	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
 315};
 316
 317/**
 318 *	t3_mdio_change_bits - modify the value of a PHY register
 319 *	@phy: the PHY to operate on
 320 *	@mmd: the device address
 321 *	@reg: the register address
 322 *	@clear: what part of the register value to mask off
 323 *	@set: what part of the register value to set
 324 *
 325 *	Changes the value of a PHY register by applying a mask to its current
 326 *	value and ORing the result with a new value.
 327 */
 328int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
 329			unsigned int set)
 330{
 331	int ret;
 332	unsigned int val;
 333
 334	ret = t3_mdio_read(phy, mmd, reg, &val);
 335	if (!ret) {
 336		val &= ~clear;
 337		ret = t3_mdio_write(phy, mmd, reg, val | set);
 338	}
 339	return ret;
 340}
 341
 342/**
 343 *	t3_phy_reset - reset a PHY block
 344 *	@phy: the PHY to operate on
 345 *	@mmd: the device address of the PHY block to reset
 346 *	@wait: how long to wait for the reset to complete in 1ms increments
 347 *
 348 *	Resets a PHY block and optionally waits for the reset to complete.
 349 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
 350 *	for 10G PHYs.
 351 */
 352int t3_phy_reset(struct cphy *phy, int mmd, int wait)
 353{
 354	int err;
 355	unsigned int ctl;
 356
 357	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
 358				  MDIO_CTRL1_RESET);
 359	if (err || !wait)
 360		return err;
 361
 362	do {
 363		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
 364		if (err)
 365			return err;
 366		ctl &= MDIO_CTRL1_RESET;
 367		if (ctl)
 368			msleep(1);
 369	} while (ctl && --wait);
 370
 371	return ctl ? -1 : 0;
 372}
 373
 374/**
 375 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
 376 *	@phy: the PHY to operate on
 377 *	@advert: bitmap of capabilities the PHY should advertise
 378 *
 379 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
 380 *	requested capabilities.
 381 */
 382int t3_phy_advertise(struct cphy *phy, unsigned int advert)
 383{
 384	int err;
 385	unsigned int val = 0;
 386
 387	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
 388	if (err)
 389		return err;
 390
 391	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 392	if (advert & ADVERTISED_1000baseT_Half)
 393		val |= ADVERTISE_1000HALF;
 394	if (advert & ADVERTISED_1000baseT_Full)
 395		val |= ADVERTISE_1000FULL;
 396
 397	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
 398	if (err)
 399		return err;
 400
 401	val = 1;
 402	if (advert & ADVERTISED_10baseT_Half)
 403		val |= ADVERTISE_10HALF;
 404	if (advert & ADVERTISED_10baseT_Full)
 405		val |= ADVERTISE_10FULL;
 406	if (advert & ADVERTISED_100baseT_Half)
 407		val |= ADVERTISE_100HALF;
 408	if (advert & ADVERTISED_100baseT_Full)
 409		val |= ADVERTISE_100FULL;
 410	if (advert & ADVERTISED_Pause)
 411		val |= ADVERTISE_PAUSE_CAP;
 412	if (advert & ADVERTISED_Asym_Pause)
 413		val |= ADVERTISE_PAUSE_ASYM;
 414	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 415}
 416
 417/**
 418 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
 419 *	@phy: the PHY to operate on
 420 *	@advert: bitmap of capabilities the PHY should advertise
 421 *
 422 *	Sets a fiber PHY's advertisement register to advertise the
 423 *	requested capabilities.
 424 */
 425int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
 426{
 427	unsigned int val = 0;
 428
 429	if (advert & ADVERTISED_1000baseT_Half)
 430		val |= ADVERTISE_1000XHALF;
 431	if (advert & ADVERTISED_1000baseT_Full)
 432		val |= ADVERTISE_1000XFULL;
 433	if (advert & ADVERTISED_Pause)
 434		val |= ADVERTISE_1000XPAUSE;
 435	if (advert & ADVERTISED_Asym_Pause)
 436		val |= ADVERTISE_1000XPSE_ASYM;
 437	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 438}
 439
 440/**
 441 *	t3_set_phy_speed_duplex - force PHY speed and duplex
 442 *	@phy: the PHY to operate on
 443 *	@speed: requested PHY speed
 444 *	@duplex: requested PHY duplex
 445 *
 446 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
 447 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
 448 */
 449int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
 450{
 451	int err;
 452	unsigned int ctl;
 453
 454	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
 455	if (err)
 456		return err;
 457
 458	if (speed >= 0) {
 459		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
 460		if (speed == SPEED_100)
 461			ctl |= BMCR_SPEED100;
 462		else if (speed == SPEED_1000)
 463			ctl |= BMCR_SPEED1000;
 464	}
 465	if (duplex >= 0) {
 466		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
 467		if (duplex == DUPLEX_FULL)
 468			ctl |= BMCR_FULLDPLX;
 469	}
 470	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
 471		ctl |= BMCR_ANENABLE;
 472	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
 473}
 474
 475int t3_phy_lasi_intr_enable(struct cphy *phy)
 476{
 477	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
 478			     MDIO_PMA_LASI_LSALARM);
 479}
 480
 481int t3_phy_lasi_intr_disable(struct cphy *phy)
 482{
 483	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
 484}
 485
 486int t3_phy_lasi_intr_clear(struct cphy *phy)
 487{
 488	u32 val;
 489
 490	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
 491}
 492
 493int t3_phy_lasi_intr_handler(struct cphy *phy)
 494{
 495	unsigned int status;
 496	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
 497			       &status);
 498
 499	if (err)
 500		return err;
 501	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
 502}
 503
 504static const struct adapter_info t3_adap_info[] = {
 505	{1, 1, 0,
 506	 F_GPIO2_OEN | F_GPIO4_OEN |
 507	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 508	 &mi1_mdio_ops, "Chelsio PE9000"},
 509	{1, 1, 0,
 510	 F_GPIO2_OEN | F_GPIO4_OEN |
 511	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 512	 &mi1_mdio_ops, "Chelsio T302"},
 513	{1, 0, 0,
 514	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
 515	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 516	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 517	 &mi1_mdio_ext_ops, "Chelsio T310"},
 518	{1, 1, 0,
 519	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
 520	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
 521	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 522	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 523	 &mi1_mdio_ext_ops, "Chelsio T320"},
 524	{},
 525	{},
 526	{1, 0, 0,
 527	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 528	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 529	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 530	 &mi1_mdio_ext_ops, "Chelsio T310" },
 531	{1, 0, 0,
 532	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 533	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
 534	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 535	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
 536};
 537
 538/*
 539 * Return the adapter_info structure with a given index.  Out-of-range indices
 540 * return NULL.
 541 */
 542const struct adapter_info *t3_get_adapter_info(unsigned int id)
 543{
 544	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
 545}
 546
 547struct port_type_info {
 548	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
 549			int phy_addr, const struct mdio_ops *ops);
 550};
 551
 552static const struct port_type_info port_types[] = {
 553	{ NULL },
 554	{ t3_ael1002_phy_prep },
 555	{ t3_vsc8211_phy_prep },
 556	{ NULL},
 557	{ t3_xaui_direct_phy_prep },
 558	{ t3_ael2005_phy_prep },
 559	{ t3_qt2045_phy_prep },
 560	{ t3_ael1006_phy_prep },
 561	{ NULL },
 562	{ t3_aq100x_phy_prep },
 563	{ t3_ael2020_phy_prep },
 564};
 565
 566#define VPD_ENTRY(name, len) \
 567	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
 568
 569/*
 570 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
 571 * VPD-R sections.
 572 */
 573struct t3_vpd {
 574	u8 id_tag;
 575	u8 id_len[2];
 576	u8 id_data[16];
 577	u8 vpdr_tag;
 578	u8 vpdr_len[2];
 579	VPD_ENTRY(pn, 16);	/* part number */
 580	VPD_ENTRY(ec, 16);	/* EC level */
 581	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
 582	VPD_ENTRY(na, 12);	/* MAC address base */
 583	VPD_ENTRY(cclk, 6);	/* core clock */
 584	VPD_ENTRY(mclk, 6);	/* mem clock */
 585	VPD_ENTRY(uclk, 6);	/* uP clk */
 586	VPD_ENTRY(mdc, 6);	/* MDIO clk */
 587	VPD_ENTRY(mt, 2);	/* mem timing */
 588	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
 589	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
 590	VPD_ENTRY(port0, 2);	/* PHY0 complex */
 591	VPD_ENTRY(port1, 2);	/* PHY1 complex */
 592	VPD_ENTRY(port2, 2);	/* PHY2 complex */
 593	VPD_ENTRY(port3, 2);	/* PHY3 complex */
 594	VPD_ENTRY(rv, 1);	/* csum */
 595	u32 pad;		/* for multiple-of-4 sizing and alignment */
 596};
 597
 598#define EEPROM_MAX_POLL   40
 599#define EEPROM_STAT_ADDR  0x4000
 600#define VPD_BASE          0xc00
 601
 602/**
 603 *	t3_seeprom_read - read a VPD EEPROM location
 604 *	@adapter: adapter to read
 605 *	@addr: EEPROM address
 606 *	@data: where to store the read data
 607 *
 608 *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
 609 *	VPD ROM capability.  A zero is written to the flag bit when the
 610 *	address is written to the control register.  The hardware device will
 611 *	set the flag to 1 when 4 bytes have been read into the data register.
 612 */
 613int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
 614{
 615	u16 val;
 616	int attempts = EEPROM_MAX_POLL;
 617	u32 v;
 618	unsigned int base = adapter->params.pci.vpd_cap_addr;
 619
 620	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
 621		return -EINVAL;
 622
 623	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
 624	do {
 625		udelay(10);
 626		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
 627	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
 628
 629	if (!(val & PCI_VPD_ADDR_F)) {
 630		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
 631		return -EIO;
 632	}
 633	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
 634	*data = cpu_to_le32(v);
 635	return 0;
 636}
 637
 638/**
 639 *	t3_seeprom_write - write a VPD EEPROM location
 640 *	@adapter: adapter to write
 641 *	@addr: EEPROM address
 642 *	@data: value to write
 643 *
 644 *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
 645 *	VPD ROM capability.
 646 */
 647int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
 648{
 649	u16 val;
 650	int attempts = EEPROM_MAX_POLL;
 651	unsigned int base = adapter->params.pci.vpd_cap_addr;
 652
 653	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
 654		return -EINVAL;
 655
 656	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
 657			       le32_to_cpu(data));
 658	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
 659			      addr | PCI_VPD_ADDR_F);
 660	do {
 661		msleep(1);
 662		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
 663	} while ((val & PCI_VPD_ADDR_F) && --attempts);
 664
 665	if (val & PCI_VPD_ADDR_F) {
 666		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
 667		return -EIO;
 668	}
 669	return 0;
 670}
 671
 672/**
 673 *	t3_seeprom_wp - enable/disable EEPROM write protection
 674 *	@adapter: the adapter
 675 *	@enable: 1 to enable write protection, 0 to disable it
 676 *
 677 *	Enables or disables write protection on the serial EEPROM.
 678 */
 679int t3_seeprom_wp(struct adapter *adapter, int enable)
 680{
 681	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
 
 
 
 
 682}
 683
 684/**
 685 *	get_vpd_params - read VPD parameters from VPD EEPROM
 686 *	@adapter: adapter to read
 687 *	@p: where to store the parameters
 688 *
 689 *	Reads card parameters stored in VPD EEPROM.
 690 */
 691static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 692{
 693	int i, addr, ret;
 694	struct t3_vpd vpd;
 
 
 695
 696	/*
 697	 * Card information is normally at VPD_BASE but some early cards had
 698	 * it at 0.
 699	 */
 700	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
 701	if (ret)
 702		return ret;
 703	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
 704
 705	for (i = 0; i < sizeof(vpd); i += 4) {
 706		ret = t3_seeprom_read(adapter, addr + i,
 707				      (__le32 *)((u8 *)&vpd + i));
 708		if (ret)
 709			return ret;
 710	}
 711
 712	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
 713	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
 714	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
 715	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
 716	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
 
 
 
 
 
 
 
 
 
 
 717	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
 718
 719	/* Old eeproms didn't have port information */
 720	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
 721		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
 722		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
 723	} else {
 724		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
 725		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
 726		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
 727		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
 
 
 
 
 
 
 728	}
 729
 730	for (i = 0; i < 6; i++)
 731		p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
 732				 hex_to_bin(vpd.na_data[2 * i + 1]);
 733	return 0;
 734}
 735
 736/* serial flash and firmware constants */
 737enum {
 738	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
 739	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
 740	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
 741
 742	/* flash command opcodes */
 743	SF_PROG_PAGE = 2,	/* program page */
 744	SF_WR_DISABLE = 4,	/* disable writes */
 745	SF_RD_STATUS = 5,	/* read status register */
 746	SF_WR_ENABLE = 6,	/* enable writes */
 747	SF_RD_DATA_FAST = 0xb,	/* read flash */
 748	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
 749
 750	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
 751	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
 752	FW_MIN_SIZE = 8            /* at least version and csum */
 753};
 754
 755/**
 756 *	sf1_read - read data from the serial flash
 757 *	@adapter: the adapter
 758 *	@byte_cnt: number of bytes to read
 759 *	@cont: whether another operation will be chained
 760 *	@valp: where to store the read data
 761 *
 762 *	Reads up to 4 bytes of data from the serial flash.  The location of
 763 *	the read needs to be specified prior to calling this by issuing the
 764 *	appropriate commands to the serial flash.
 765 */
 766static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 767		    u32 *valp)
 768{
 769	int ret;
 770
 771	if (!byte_cnt || byte_cnt > 4)
 772		return -EINVAL;
 773	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 774		return -EBUSY;
 775	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
 776	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 777	if (!ret)
 778		*valp = t3_read_reg(adapter, A_SF_DATA);
 779	return ret;
 780}
 781
 782/**
 783 *	sf1_write - write data to the serial flash
 784 *	@adapter: the adapter
 785 *	@byte_cnt: number of bytes to write
 786 *	@cont: whether another operation will be chained
 787 *	@val: value to write
 788 *
 789 *	Writes up to 4 bytes of data to the serial flash.  The location of
 790 *	the write needs to be specified prior to calling this by issuing the
 791 *	appropriate commands to the serial flash.
 792 */
 793static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 794		     u32 val)
 795{
 796	if (!byte_cnt || byte_cnt > 4)
 797		return -EINVAL;
 798	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 799		return -EBUSY;
 800	t3_write_reg(adapter, A_SF_DATA, val);
 801	t3_write_reg(adapter, A_SF_OP,
 802		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
 803	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 804}
 805
 806/**
 807 *	flash_wait_op - wait for a flash operation to complete
 808 *	@adapter: the adapter
 809 *	@attempts: max number of polls of the status register
 810 *	@delay: delay between polls in ms
 811 *
 812 *	Wait for a flash operation to complete by polling the status register.
 813 */
 814static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
 815{
 816	int ret;
 817	u32 status;
 818
 819	while (1) {
 820		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
 821		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
 822			return ret;
 823		if (!(status & 1))
 824			return 0;
 825		if (--attempts == 0)
 826			return -EAGAIN;
 827		if (delay)
 828			msleep(delay);
 829	}
 830}
 831
 832/**
 833 *	t3_read_flash - read words from serial flash
 834 *	@adapter: the adapter
 835 *	@addr: the start address for the read
 836 *	@nwords: how many 32-bit words to read
 837 *	@data: where to store the read data
 838 *	@byte_oriented: whether to store data as bytes or as words
 839 *
 840 *	Read the specified number of 32-bit words from the serial flash.
 841 *	If @byte_oriented is set the read data is stored as a byte array
 842 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
 843 *	natural endianess.
 844 */
 845static int t3_read_flash(struct adapter *adapter, unsigned int addr,
 846			 unsigned int nwords, u32 *data, int byte_oriented)
 847{
 848	int ret;
 849
 850	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
 851		return -EINVAL;
 852
 853	addr = swab32(addr) | SF_RD_DATA_FAST;
 854
 855	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
 856	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
 857		return ret;
 858
 859	for (; nwords; nwords--, data++) {
 860		ret = sf1_read(adapter, 4, nwords > 1, data);
 861		if (ret)
 862			return ret;
 863		if (byte_oriented)
 864			*data = htonl(*data);
 865	}
 866	return 0;
 867}
 868
 869/**
 870 *	t3_write_flash - write up to a page of data to the serial flash
 871 *	@adapter: the adapter
 872 *	@addr: the start address to write
 873 *	@n: length of data to write
 874 *	@data: the data to write
 875 *
 876 *	Writes up to a page of data (256 bytes) to the serial flash starting
 877 *	at the given address.
 878 */
 879static int t3_write_flash(struct adapter *adapter, unsigned int addr,
 880			  unsigned int n, const u8 *data)
 881{
 882	int ret;
 883	u32 buf[64];
 884	unsigned int i, c, left, val, offset = addr & 0xff;
 885
 886	if (addr + n > SF_SIZE || offset + n > 256)
 887		return -EINVAL;
 888
 889	val = swab32(addr) | SF_PROG_PAGE;
 890
 891	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
 892	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
 893		return ret;
 894
 895	for (left = n; left; left -= c) {
 896		c = min(left, 4U);
 897		for (val = 0, i = 0; i < c; ++i)
 898			val = (val << 8) + *data++;
 899
 900		ret = sf1_write(adapter, c, c != left, val);
 901		if (ret)
 902			return ret;
 903	}
 904	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
 905		return ret;
 906
 907	/* Read the page to verify the write succeeded */
 908	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
 909	if (ret)
 910		return ret;
 911
 912	if (memcmp(data - n, (u8 *) buf + offset, n))
 913		return -EIO;
 914	return 0;
 915}
 916
 917/**
 918 *	t3_get_tp_version - read the tp sram version
 919 *	@adapter: the adapter
 920 *	@vers: where to place the version
 921 *
 922 *	Reads the protocol sram version from sram.
 923 */
 924int t3_get_tp_version(struct adapter *adapter, u32 *vers)
 925{
 926	int ret;
 927
 928	/* Get version loaded in SRAM */
 929	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
 930	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
 931			      1, 1, 5, 1);
 932	if (ret)
 933		return ret;
 934
 935	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
 936
 937	return 0;
 938}
 939
 940/**
 941 *	t3_check_tpsram_version - read the tp sram version
 942 *	@adapter: the adapter
 943 *
 944 *	Reads the protocol sram version from flash.
 945 */
 946int t3_check_tpsram_version(struct adapter *adapter)
 947{
 948	int ret;
 949	u32 vers;
 950	unsigned int major, minor;
 951
 952	if (adapter->params.rev == T3_REV_A)
 953		return 0;
 954
 955
 956	ret = t3_get_tp_version(adapter, &vers);
 957	if (ret)
 958		return ret;
 959
 960	major = G_TP_VERSION_MAJOR(vers);
 961	minor = G_TP_VERSION_MINOR(vers);
 962
 963	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
 964		return 0;
 965	else {
 966		CH_ERR(adapter, "found wrong TP version (%u.%u), "
 967		       "driver compiled for version %d.%d\n", major, minor,
 968		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
 969	}
 970	return -EINVAL;
 971}
 972
 973/**
 974 *	t3_check_tpsram - check if provided protocol SRAM
 975 *			  is compatible with this driver
 976 *	@adapter: the adapter
 977 *	@tp_sram: the firmware image to write
 978 *	@size: image size
 979 *
 980 *	Checks if an adapter's tp sram is compatible with the driver.
 981 *	Returns 0 if the versions are compatible, a negative error otherwise.
 982 */
 983int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
 984		    unsigned int size)
 985{
 986	u32 csum;
 987	unsigned int i;
 988	const __be32 *p = (const __be32 *)tp_sram;
 989
 990	/* Verify checksum */
 991	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
 992		csum += ntohl(p[i]);
 993	if (csum != 0xffffffff) {
 994		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
 995		       csum);
 996		return -EINVAL;
 997	}
 998
 999	return 0;
1000}
1001
1002enum fw_version_type {
1003	FW_VERSION_N3,
1004	FW_VERSION_T3
1005};
1006
1007/**
1008 *	t3_get_fw_version - read the firmware version
1009 *	@adapter: the adapter
1010 *	@vers: where to place the version
1011 *
1012 *	Reads the FW version from flash.
1013 */
1014int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1015{
1016	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1017}
1018
1019/**
1020 *	t3_check_fw_version - check if the FW is compatible with this driver
1021 *	@adapter: the adapter
1022 *
1023 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1024 *	if the versions are compatible, a negative error otherwise.
1025 */
1026int t3_check_fw_version(struct adapter *adapter)
1027{
1028	int ret;
1029	u32 vers;
1030	unsigned int type, major, minor;
1031
1032	ret = t3_get_fw_version(adapter, &vers);
1033	if (ret)
1034		return ret;
1035
1036	type = G_FW_VERSION_TYPE(vers);
1037	major = G_FW_VERSION_MAJOR(vers);
1038	minor = G_FW_VERSION_MINOR(vers);
1039
1040	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1041	    minor == FW_VERSION_MINOR)
1042		return 0;
1043	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1044		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1045		        "driver compiled for version %u.%u\n", major, minor,
1046			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1047	else {
1048		CH_WARN(adapter, "found newer FW version(%u.%u), "
1049		        "driver compiled for version %u.%u\n", major, minor,
1050			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1051			return 0;
1052	}
1053	return -EINVAL;
1054}
1055
1056/**
1057 *	t3_flash_erase_sectors - erase a range of flash sectors
1058 *	@adapter: the adapter
1059 *	@start: the first sector to erase
1060 *	@end: the last sector to erase
1061 *
1062 *	Erases the sectors in the given range.
1063 */
1064static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1065{
1066	while (start <= end) {
1067		int ret;
1068
1069		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1070		    (ret = sf1_write(adapter, 4, 0,
1071				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1072		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1073			return ret;
1074		start++;
1075	}
1076	return 0;
1077}
1078
1079/**
1080 *	t3_load_fw - download firmware
1081 *	@adapter: the adapter
1082 *	@fw_data: the firmware image to write
1083 *	@size: image size
1084 *
1085 *	Write the supplied firmware image to the card's serial flash.
1086 *	The FW image has the following sections: @size - 8 bytes of code and
1087 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1088 *	1's complement checksum of the whole image.
1089 */
1090int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1091{
1092	u32 csum;
1093	unsigned int i;
1094	const __be32 *p = (const __be32 *)fw_data;
1095	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1096
1097	if ((size & 3) || size < FW_MIN_SIZE)
1098		return -EINVAL;
1099	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1100		return -EFBIG;
1101
1102	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1103		csum += ntohl(p[i]);
1104	if (csum != 0xffffffff) {
1105		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1106		       csum);
1107		return -EINVAL;
1108	}
1109
1110	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1111	if (ret)
1112		goto out;
1113
1114	size -= 8;		/* trim off version and checksum */
1115	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1116		unsigned int chunk_size = min(size, 256U);
1117
1118		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1119		if (ret)
1120			goto out;
1121
1122		addr += chunk_size;
1123		fw_data += chunk_size;
1124		size -= chunk_size;
1125	}
1126
1127	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1128out:
1129	if (ret)
1130		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1131	return ret;
1132}
1133
1134#define CIM_CTL_BASE 0x2000
1135
1136/**
1137 *      t3_cim_ctl_blk_read - read a block from CIM control region
1138 *
1139 *      @adap: the adapter
1140 *      @addr: the start address within the CIM control region
1141 *      @n: number of words to read
1142 *      @valp: where to store the result
1143 *
1144 *      Reads a block of 4-byte words from the CIM control region.
1145 */
1146int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1147			unsigned int n, unsigned int *valp)
1148{
1149	int ret = 0;
1150
1151	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1152		return -EBUSY;
1153
1154	for ( ; !ret && n--; addr += 4) {
1155		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1156		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1157				      0, 5, 2);
1158		if (!ret)
1159			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1160	}
1161	return ret;
1162}
1163
1164static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1165			       u32 *rx_hash_high, u32 *rx_hash_low)
1166{
1167	/* stop Rx unicast traffic */
1168	t3_mac_disable_exact_filters(mac);
1169
1170	/* stop broadcast, multicast, promiscuous mode traffic */
1171	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1172	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1173			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1174			 F_DISBCAST);
1175
1176	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1177	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1178
1179	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1180	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1181
1182	/* Leave time to drain max RX fifo */
1183	msleep(1);
1184}
1185
1186static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1187			       u32 rx_hash_high, u32 rx_hash_low)
1188{
1189	t3_mac_enable_exact_filters(mac);
1190	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1191			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1192			 rx_cfg);
1193	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1194	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1195}
1196
1197/**
1198 *	t3_link_changed - handle interface link changes
1199 *	@adapter: the adapter
1200 *	@port_id: the port index that changed link state
1201 *
1202 *	Called when a port's link settings change to propagate the new values
1203 *	to the associated PHY and MAC.  After performing the common tasks it
1204 *	invokes an OS-specific handler.
1205 */
1206void t3_link_changed(struct adapter *adapter, int port_id)
1207{
1208	int link_ok, speed, duplex, fc;
1209	struct port_info *pi = adap2pinfo(adapter, port_id);
1210	struct cphy *phy = &pi->phy;
1211	struct cmac *mac = &pi->mac;
1212	struct link_config *lc = &pi->link_config;
1213
1214	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1215
1216	if (!lc->link_ok && link_ok) {
1217		u32 rx_cfg, rx_hash_high, rx_hash_low;
1218		u32 status;
1219
1220		t3_xgm_intr_enable(adapter, port_id);
1221		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1222		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1223		t3_mac_enable(mac, MAC_DIRECTION_RX);
1224
1225		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1226		if (status & F_LINKFAULTCHANGE) {
1227			mac->stats.link_faults++;
1228			pi->link_fault = 1;
1229		}
1230		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1231	}
1232
1233	if (lc->requested_fc & PAUSE_AUTONEG)
1234		fc &= lc->requested_fc;
1235	else
1236		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1237
1238	if (link_ok == lc->link_ok && speed == lc->speed &&
1239	    duplex == lc->duplex && fc == lc->fc)
1240		return;                            /* nothing changed */
1241
1242	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1243	    uses_xaui(adapter)) {
1244		if (link_ok)
1245			t3b_pcs_reset(mac);
1246		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1247			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1248	}
1249	lc->link_ok = link_ok;
1250	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1251	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1252
1253	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1254		/* Set MAC speed, duplex, and flow control to match PHY. */
1255		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1256		lc->fc = fc;
1257	}
1258
1259	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1260			   speed, duplex, fc);
1261}
1262
1263void t3_link_fault(struct adapter *adapter, int port_id)
1264{
1265	struct port_info *pi = adap2pinfo(adapter, port_id);
1266	struct cmac *mac = &pi->mac;
1267	struct cphy *phy = &pi->phy;
1268	struct link_config *lc = &pi->link_config;
1269	int link_ok, speed, duplex, fc, link_fault;
1270	u32 rx_cfg, rx_hash_high, rx_hash_low;
1271
1272	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1273
1274	if (adapter->params.rev > 0 && uses_xaui(adapter))
1275		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1276
1277	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1278	t3_mac_enable(mac, MAC_DIRECTION_RX);
1279
1280	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1281
1282	link_fault = t3_read_reg(adapter,
1283				 A_XGM_INT_STATUS + mac->offset);
1284	link_fault &= F_LINKFAULTCHANGE;
1285
1286	link_ok = lc->link_ok;
1287	speed = lc->speed;
1288	duplex = lc->duplex;
1289	fc = lc->fc;
1290
1291	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1292
1293	if (link_fault) {
1294		lc->link_ok = 0;
1295		lc->speed = SPEED_INVALID;
1296		lc->duplex = DUPLEX_INVALID;
1297
1298		t3_os_link_fault(adapter, port_id, 0);
1299
1300		/* Account link faults only when the phy reports a link up */
1301		if (link_ok)
1302			mac->stats.link_faults++;
1303	} else {
1304		if (link_ok)
1305			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1306				     F_TXACTENABLE | F_RXEN);
1307
1308		pi->link_fault = 0;
1309		lc->link_ok = (unsigned char)link_ok;
1310		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1311		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1312		t3_os_link_fault(adapter, port_id, link_ok);
1313	}
1314}
1315
1316/**
1317 *	t3_link_start - apply link configuration to MAC/PHY
1318 *	@phy: the PHY to setup
1319 *	@mac: the MAC to setup
1320 *	@lc: the requested link configuration
1321 *
1322 *	Set up a port's MAC and PHY according to a desired link configuration.
1323 *	- If the PHY can auto-negotiate first decide what to advertise, then
1324 *	  enable/disable auto-negotiation as desired, and reset.
1325 *	- If the PHY does not auto-negotiate just reset it.
1326 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1327 *	  otherwise do it later based on the outcome of auto-negotiation.
1328 */
1329int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1330{
1331	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1332
1333	lc->link_ok = 0;
1334	if (lc->supported & SUPPORTED_Autoneg) {
1335		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1336		if (fc) {
1337			lc->advertising |= ADVERTISED_Asym_Pause;
1338			if (fc & PAUSE_RX)
1339				lc->advertising |= ADVERTISED_Pause;
1340		}
1341		phy->ops->advertise(phy, lc->advertising);
1342
1343		if (lc->autoneg == AUTONEG_DISABLE) {
1344			lc->speed = lc->requested_speed;
1345			lc->duplex = lc->requested_duplex;
1346			lc->fc = (unsigned char)fc;
1347			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1348						   fc);
1349			/* Also disables autoneg */
1350			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1351		} else
1352			phy->ops->autoneg_enable(phy);
1353	} else {
1354		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1355		lc->fc = (unsigned char)fc;
1356		phy->ops->reset(phy, 0);
1357	}
1358	return 0;
1359}
1360
1361/**
1362 *	t3_set_vlan_accel - control HW VLAN extraction
1363 *	@adapter: the adapter
1364 *	@ports: bitmap of adapter ports to operate on
1365 *	@on: enable (1) or disable (0) HW VLAN extraction
1366 *
1367 *	Enables or disables HW extraction of VLAN tags for the given port.
1368 */
1369void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1370{
1371	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1372			 ports << S_VLANEXTRACTIONENABLE,
1373			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374}
1375
1376struct intr_info {
1377	unsigned int mask;	/* bits to check in interrupt status */
1378	const char *msg;	/* message to print or NULL */
1379	short stat_idx;		/* stat counter to increment or -1 */
1380	unsigned short fatal;	/* whether the condition reported is fatal */
1381};
1382
1383/**
1384 *	t3_handle_intr_status - table driven interrupt handler
1385 *	@adapter: the adapter that generated the interrupt
1386 *	@reg: the interrupt status register to process
1387 *	@mask: a mask to apply to the interrupt status
1388 *	@acts: table of interrupt actions
1389 *	@stats: statistics counters tracking interrupt occurrences
1390 *
1391 *	A table driven interrupt handler that applies a set of masks to an
1392 *	interrupt status word and performs the corresponding actions if the
1393 *	interrupts described by the mask have occurred.  The actions include
1394 *	optionally printing a warning or alert message, and optionally
1395 *	incrementing a stat counter.  The table is terminated by an entry
1396 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1397 */
1398static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1399				 unsigned int mask,
1400				 const struct intr_info *acts,
1401				 unsigned long *stats)
1402{
1403	int fatal = 0;
1404	unsigned int status = t3_read_reg(adapter, reg) & mask;
1405
1406	for (; acts->mask; ++acts) {
1407		if (!(status & acts->mask))
1408			continue;
1409		if (acts->fatal) {
1410			fatal++;
1411			CH_ALERT(adapter, "%s (0x%x)\n",
1412				 acts->msg, status & acts->mask);
1413			status &= ~acts->mask;
1414		} else if (acts->msg)
1415			CH_WARN(adapter, "%s (0x%x)\n",
1416				acts->msg, status & acts->mask);
1417		if (acts->stat_idx >= 0)
1418			stats[acts->stat_idx]++;
1419	}
1420	if (status)		/* clear processed interrupts */
1421		t3_write_reg(adapter, reg, status);
1422	return fatal;
1423}
1424
1425#define SGE_INTR_MASK (F_RSPQDISABLED | \
1426		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1427		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1428		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1429		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1430		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1431		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1432		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1433		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1434		       F_LOPIODRBDROPERR)
1435#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1436		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1437		       F_NFASRCHFAIL)
1438#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1439#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1440		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1441		       F_TXFIFO_UNDERRUN)
1442#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1443			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1444			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1445			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1446			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1447			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1448#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1449			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1450			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1451			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1452			F_TXPARERR | V_BISTERR(M_BISTERR))
1453#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1454			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1455			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1456#define ULPTX_INTR_MASK 0xfc
1457#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1458			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1459			 F_ZERO_SWITCH_ERROR)
1460#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1461		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1462		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1463	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1464		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1465		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1466		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1467		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1468#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1469			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1470			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1471#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1472			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1473			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1474#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1475		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1476		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1477		       V_MCAPARERRENB(M_MCAPARERRENB))
1478#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1479#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1480		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1481		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1482		      F_MPS0 | F_CPL_SWITCH)
1483/*
1484 * Interrupt handler for the PCIX1 module.
1485 */
1486static void pci_intr_handler(struct adapter *adapter)
1487{
1488	static const struct intr_info pcix1_intr_info[] = {
1489		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1490		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1491		{F_RCVTARABT, "PCI received target abort", -1, 1},
1492		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1493		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1494		{F_DETPARERR, "PCI detected parity error", -1, 1},
1495		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1496		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1497		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1498		 1},
1499		{F_DETCORECCERR, "PCI correctable ECC error",
1500		 STAT_PCI_CORR_ECC, 0},
1501		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1502		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1503		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1504		 1},
1505		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1506		 1},
1507		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1508		 1},
1509		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1510		 "error", -1, 1},
1511		{0}
1512	};
1513
1514	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1515				  pcix1_intr_info, adapter->irq_stats))
1516		t3_fatal_err(adapter);
1517}
1518
1519/*
1520 * Interrupt handler for the PCIE module.
1521 */
1522static void pcie_intr_handler(struct adapter *adapter)
1523{
1524	static const struct intr_info pcie_intr_info[] = {
1525		{F_PEXERR, "PCI PEX error", -1, 1},
1526		{F_UNXSPLCPLERRR,
1527		 "PCI unexpected split completion DMA read error", -1, 1},
1528		{F_UNXSPLCPLERRC,
1529		 "PCI unexpected split completion DMA command error", -1, 1},
1530		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1531		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1532		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1533		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1534		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1535		 "PCI MSI-X table/PBA parity error", -1, 1},
1536		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1537		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1538		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1539		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1540		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1541		{0}
1542	};
1543
1544	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1545		CH_ALERT(adapter, "PEX error code 0x%x\n",
1546			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1547
1548	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1549				  pcie_intr_info, adapter->irq_stats))
1550		t3_fatal_err(adapter);
1551}
1552
1553/*
1554 * TP interrupt handler.
1555 */
1556static void tp_intr_handler(struct adapter *adapter)
1557{
1558	static const struct intr_info tp_intr_info[] = {
1559		{0xffffff, "TP parity error", -1, 1},
1560		{0x1000000, "TP out of Rx pages", -1, 1},
1561		{0x2000000, "TP out of Tx pages", -1, 1},
1562		{0}
1563	};
1564
1565	static const struct intr_info tp_intr_info_t3c[] = {
1566		{0x1fffffff, "TP parity error", -1, 1},
1567		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1568		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1569		{0}
1570	};
1571
1572	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1573				  adapter->params.rev < T3_REV_C ?
1574				  tp_intr_info : tp_intr_info_t3c, NULL))
1575		t3_fatal_err(adapter);
1576}
1577
1578/*
1579 * CIM interrupt handler.
1580 */
1581static void cim_intr_handler(struct adapter *adapter)
1582{
1583	static const struct intr_info cim_intr_info[] = {
1584		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1585		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1586		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1587		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1588		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1589		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1590		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1591		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1592		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1593		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1594		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1595		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1596		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1597		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1598		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1599		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1600		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1601		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1602		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1603		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1604		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1605		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1606		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1607		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1608		{0}
1609	};
1610
1611	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1612				  cim_intr_info, NULL))
1613		t3_fatal_err(adapter);
1614}
1615
1616/*
1617 * ULP RX interrupt handler.
1618 */
1619static void ulprx_intr_handler(struct adapter *adapter)
1620{
1621	static const struct intr_info ulprx_intr_info[] = {
1622		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1623		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1624		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1625		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1626		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1627		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1628		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1629		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1630		{0}
1631	};
1632
1633	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1634				  ulprx_intr_info, NULL))
1635		t3_fatal_err(adapter);
1636}
1637
1638/*
1639 * ULP TX interrupt handler.
1640 */
1641static void ulptx_intr_handler(struct adapter *adapter)
1642{
1643	static const struct intr_info ulptx_intr_info[] = {
1644		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1645		 STAT_ULP_CH0_PBL_OOB, 0},
1646		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1647		 STAT_ULP_CH1_PBL_OOB, 0},
1648		{0xfc, "ULP TX parity error", -1, 1},
1649		{0}
1650	};
1651
1652	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1653				  ulptx_intr_info, adapter->irq_stats))
1654		t3_fatal_err(adapter);
1655}
1656
1657#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1658	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1659	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1660	F_ICSPI1_TX_FRAMING_ERROR)
1661#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1662	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1663	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1664	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1665
1666/*
1667 * PM TX interrupt handler.
1668 */
1669static void pmtx_intr_handler(struct adapter *adapter)
1670{
1671	static const struct intr_info pmtx_intr_info[] = {
1672		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1673		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1674		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1675		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1676		 "PMTX ispi parity error", -1, 1},
1677		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1678		 "PMTX ospi parity error", -1, 1},
1679		{0}
1680	};
1681
1682	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1683				  pmtx_intr_info, NULL))
1684		t3_fatal_err(adapter);
1685}
1686
1687#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1688	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1689	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1690	F_IESPI1_TX_FRAMING_ERROR)
1691#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1692	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1693	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1694	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1695
1696/*
1697 * PM RX interrupt handler.
1698 */
1699static void pmrx_intr_handler(struct adapter *adapter)
1700{
1701	static const struct intr_info pmrx_intr_info[] = {
1702		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1703		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1704		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1705		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1706		 "PMRX ispi parity error", -1, 1},
1707		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1708		 "PMRX ospi parity error", -1, 1},
1709		{0}
1710	};
1711
1712	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1713				  pmrx_intr_info, NULL))
1714		t3_fatal_err(adapter);
1715}
1716
1717/*
1718 * CPL switch interrupt handler.
1719 */
1720static void cplsw_intr_handler(struct adapter *adapter)
1721{
1722	static const struct intr_info cplsw_intr_info[] = {
1723		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1724		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1725		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1726		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1727		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1728		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1729		{0}
1730	};
1731
1732	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1733				  cplsw_intr_info, NULL))
1734		t3_fatal_err(adapter);
1735}
1736
1737/*
1738 * MPS interrupt handler.
1739 */
1740static void mps_intr_handler(struct adapter *adapter)
1741{
1742	static const struct intr_info mps_intr_info[] = {
1743		{0x1ff, "MPS parity error", -1, 1},
1744		{0}
1745	};
1746
1747	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1748				  mps_intr_info, NULL))
1749		t3_fatal_err(adapter);
1750}
1751
1752#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1753
1754/*
1755 * MC7 interrupt handler.
1756 */
1757static void mc7_intr_handler(struct mc7 *mc7)
1758{
1759	struct adapter *adapter = mc7->adapter;
1760	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1761
1762	if (cause & F_CE) {
1763		mc7->stats.corr_err++;
1764		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1765			"data 0x%x 0x%x 0x%x\n", mc7->name,
1766			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1767			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1768			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1769			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1770	}
1771
1772	if (cause & F_UE) {
1773		mc7->stats.uncorr_err++;
1774		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1775			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1776			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1777			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1778			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1779			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1780	}
1781
1782	if (G_PE(cause)) {
1783		mc7->stats.parity_err++;
1784		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1785			 mc7->name, G_PE(cause));
1786	}
1787
1788	if (cause & F_AE) {
1789		u32 addr = 0;
1790
1791		if (adapter->params.rev > 0)
1792			addr = t3_read_reg(adapter,
1793					   mc7->offset + A_MC7_ERR_ADDR);
1794		mc7->stats.addr_err++;
1795		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1796			 mc7->name, addr);
1797	}
1798
1799	if (cause & MC7_INTR_FATAL)
1800		t3_fatal_err(adapter);
1801
1802	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1803}
1804
1805#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1806			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1807/*
1808 * XGMAC interrupt handler.
1809 */
1810static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1811{
1812	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1813	/*
1814	 * We mask out interrupt causes for which we're not taking interrupts.
1815	 * This allows us to use polling logic to monitor some of the other
1816	 * conditions when taking interrupts would impose too much load on the
1817	 * system.
1818	 */
1819	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1820		    ~F_RXFIFO_OVERFLOW;
1821
1822	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1823		mac->stats.tx_fifo_parity_err++;
1824		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1825	}
1826	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1827		mac->stats.rx_fifo_parity_err++;
1828		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1829	}
1830	if (cause & F_TXFIFO_UNDERRUN)
1831		mac->stats.tx_fifo_urun++;
1832	if (cause & F_RXFIFO_OVERFLOW)
1833		mac->stats.rx_fifo_ovfl++;
1834	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1835		mac->stats.serdes_signal_loss++;
1836	if (cause & F_XAUIPCSCTCERR)
1837		mac->stats.xaui_pcs_ctc_err++;
1838	if (cause & F_XAUIPCSALIGNCHANGE)
1839		mac->stats.xaui_pcs_align_change++;
1840	if (cause & F_XGM_INT) {
1841		t3_set_reg_field(adap,
1842				 A_XGM_INT_ENABLE + mac->offset,
1843				 F_XGM_INT, 0);
1844		mac->stats.link_faults++;
1845
1846		t3_os_link_fault_handler(adap, idx);
1847	}
1848
1849	if (cause & XGM_INTR_FATAL)
1850		t3_fatal_err(adap);
1851
1852	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1853	return cause != 0;
1854}
1855
1856/*
1857 * Interrupt handler for PHY events.
1858 */
1859int t3_phy_intr_handler(struct adapter *adapter)
1860{
1861	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1862
1863	for_each_port(adapter, i) {
1864		struct port_info *p = adap2pinfo(adapter, i);
1865
1866		if (!(p->phy.caps & SUPPORTED_IRQ))
1867			continue;
1868
1869		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1870			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1871
1872			if (phy_cause & cphy_cause_link_change)
1873				t3_link_changed(adapter, i);
1874			if (phy_cause & cphy_cause_fifo_error)
1875				p->phy.fifo_errors++;
1876			if (phy_cause & cphy_cause_module_change)
1877				t3_os_phymod_changed(adapter, i);
1878		}
1879	}
1880
1881	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1882	return 0;
1883}
1884
1885/*
1886 * T3 slow path (non-data) interrupt handler.
1887 */
1888int t3_slow_intr_handler(struct adapter *adapter)
1889{
1890	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1891
1892	cause &= adapter->slow_intr_mask;
1893	if (!cause)
1894		return 0;
1895	if (cause & F_PCIM0) {
1896		if (is_pcie(adapter))
1897			pcie_intr_handler(adapter);
1898		else
1899			pci_intr_handler(adapter);
1900	}
1901	if (cause & F_SGE3)
1902		t3_sge_err_intr_handler(adapter);
1903	if (cause & F_MC7_PMRX)
1904		mc7_intr_handler(&adapter->pmrx);
1905	if (cause & F_MC7_PMTX)
1906		mc7_intr_handler(&adapter->pmtx);
1907	if (cause & F_MC7_CM)
1908		mc7_intr_handler(&adapter->cm);
1909	if (cause & F_CIM)
1910		cim_intr_handler(adapter);
1911	if (cause & F_TP1)
1912		tp_intr_handler(adapter);
1913	if (cause & F_ULP2_RX)
1914		ulprx_intr_handler(adapter);
1915	if (cause & F_ULP2_TX)
1916		ulptx_intr_handler(adapter);
1917	if (cause & F_PM1_RX)
1918		pmrx_intr_handler(adapter);
1919	if (cause & F_PM1_TX)
1920		pmtx_intr_handler(adapter);
1921	if (cause & F_CPL_SWITCH)
1922		cplsw_intr_handler(adapter);
1923	if (cause & F_MPS0)
1924		mps_intr_handler(adapter);
1925	if (cause & F_MC5A)
1926		t3_mc5_intr_handler(&adapter->mc5);
1927	if (cause & F_XGMAC0_0)
1928		mac_intr_handler(adapter, 0);
1929	if (cause & F_XGMAC0_1)
1930		mac_intr_handler(adapter, 1);
1931	if (cause & F_T3DBG)
1932		t3_os_ext_intr_handler(adapter);
1933
1934	/* Clear the interrupts just processed. */
1935	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1936	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1937	return 1;
1938}
1939
1940static unsigned int calc_gpio_intr(struct adapter *adap)
1941{
1942	unsigned int i, gpi_intr = 0;
1943
1944	for_each_port(adap, i)
1945		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1946		    adapter_info(adap)->gpio_intr[i])
1947			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1948	return gpi_intr;
1949}
1950
1951/**
1952 *	t3_intr_enable - enable interrupts
1953 *	@adapter: the adapter whose interrupts should be enabled
1954 *
1955 *	Enable interrupts by setting the interrupt enable registers of the
1956 *	various HW modules and then enabling the top-level interrupt
1957 *	concentrator.
1958 */
1959void t3_intr_enable(struct adapter *adapter)
1960{
1961	static const struct addr_val_pair intr_en_avp[] = {
1962		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1963		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1964		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1965		 MC7_INTR_MASK},
1966		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1967		 MC7_INTR_MASK},
1968		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1969		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1970		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1971		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1972		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1973		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1974	};
1975
1976	adapter->slow_intr_mask = PL_INTR_MASK;
1977
1978	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1979	t3_write_reg(adapter, A_TP_INT_ENABLE,
1980		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1981
1982	if (adapter->params.rev > 0) {
1983		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1984			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1985		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1986			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1987			     F_PBL_BOUND_ERR_CH1);
1988	} else {
1989		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1990		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1991	}
1992
1993	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1994
1995	if (is_pcie(adapter))
1996		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1997	else
1998		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1999	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2000	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2001}
2002
2003/**
2004 *	t3_intr_disable - disable a card's interrupts
2005 *	@adapter: the adapter whose interrupts should be disabled
2006 *
2007 *	Disable interrupts.  We only disable the top-level interrupt
2008 *	concentrator and the SGE data interrupts.
2009 */
2010void t3_intr_disable(struct adapter *adapter)
2011{
2012	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2013	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2014	adapter->slow_intr_mask = 0;
2015}
2016
2017/**
2018 *	t3_intr_clear - clear all interrupts
2019 *	@adapter: the adapter whose interrupts should be cleared
2020 *
2021 *	Clears all interrupts.
2022 */
2023void t3_intr_clear(struct adapter *adapter)
2024{
2025	static const unsigned int cause_reg_addr[] = {
2026		A_SG_INT_CAUSE,
2027		A_SG_RSPQ_FL_STATUS,
2028		A_PCIX_INT_CAUSE,
2029		A_MC7_INT_CAUSE,
2030		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2031		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2032		A_CIM_HOST_INT_CAUSE,
2033		A_TP_INT_CAUSE,
2034		A_MC5_DB_INT_CAUSE,
2035		A_ULPRX_INT_CAUSE,
2036		A_ULPTX_INT_CAUSE,
2037		A_CPL_INTR_CAUSE,
2038		A_PM1_TX_INT_CAUSE,
2039		A_PM1_RX_INT_CAUSE,
2040		A_MPS_INT_CAUSE,
2041		A_T3DBG_INT_CAUSE,
2042	};
2043	unsigned int i;
2044
2045	/* Clear PHY and MAC interrupts for each port. */
2046	for_each_port(adapter, i)
2047	    t3_port_intr_clear(adapter, i);
2048
2049	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2050		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2051
2052	if (is_pcie(adapter))
2053		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2054	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2055	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2056}
2057
2058void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2059{
2060	struct port_info *pi = adap2pinfo(adapter, idx);
2061
2062	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2063		     XGM_EXTRA_INTR_MASK);
2064}
2065
2066void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2067{
2068	struct port_info *pi = adap2pinfo(adapter, idx);
2069
2070	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2071		     0x7ff);
2072}
2073
2074/**
2075 *	t3_port_intr_enable - enable port-specific interrupts
2076 *	@adapter: associated adapter
2077 *	@idx: index of port whose interrupts should be enabled
2078 *
2079 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2080 *	adapter port.
2081 */
2082void t3_port_intr_enable(struct adapter *adapter, int idx)
2083{
2084	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2085
2086	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2087	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088	phy->ops->intr_enable(phy);
2089}
2090
2091/**
2092 *	t3_port_intr_disable - disable port-specific interrupts
2093 *	@adapter: associated adapter
2094 *	@idx: index of port whose interrupts should be disabled
2095 *
2096 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2097 *	adapter port.
2098 */
2099void t3_port_intr_disable(struct adapter *adapter, int idx)
2100{
2101	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2102
2103	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2104	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2105	phy->ops->intr_disable(phy);
2106}
2107
2108/**
2109 *	t3_port_intr_clear - clear port-specific interrupts
2110 *	@adapter: associated adapter
2111 *	@idx: index of port whose interrupts to clear
2112 *
2113 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2114 *	adapter port.
2115 */
2116static void t3_port_intr_clear(struct adapter *adapter, int idx)
2117{
2118	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119
2120	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2121	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2122	phy->ops->intr_clear(phy);
2123}
2124
2125#define SG_CONTEXT_CMD_ATTEMPTS 100
2126
2127/**
2128 * 	t3_sge_write_context - write an SGE context
2129 * 	@adapter: the adapter
2130 * 	@id: the context id
2131 * 	@type: the context type
2132 *
2133 * 	Program an SGE context with the values already loaded in the
2134 * 	CONTEXT_DATA? registers.
2135 */
2136static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2137				unsigned int type)
2138{
2139	if (type == F_RESPONSEQ) {
2140		/*
2141		 * Can't write the Response Queue Context bits for
2142		 * Interrupt Armed or the Reserve bits after the chip
2143		 * has been initialized out of reset.  Writing to these
2144		 * bits can confuse the hardware.
2145		 */
2146		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2147		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2148		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2149		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2150	} else {
2151		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2152		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2153		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2154		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2155	}
2156	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2157		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2158	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2159			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2160}
2161
2162/**
2163 *	clear_sge_ctxt - completely clear an SGE context
2164 *	@adapter: the adapter
2165 *	@id: the context id
2166 *	@type: the context type
2167 *
2168 *	Completely clear an SGE context.  Used predominantly at post-reset
2169 *	initialization.  Note in particular that we don't skip writing to any
2170 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2171 *	does ...
2172 */
2173static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2174			  unsigned int type)
2175{
2176	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2177	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2178	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2179	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2180	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2181	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2182	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2183	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2184	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2185		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2186	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2187			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2188}
2189
2190/**
2191 *	t3_sge_init_ecntxt - initialize an SGE egress context
2192 *	@adapter: the adapter to configure
2193 *	@id: the context id
2194 *	@gts_enable: whether to enable GTS for the context
2195 *	@type: the egress context type
2196 *	@respq: associated response queue
2197 *	@base_addr: base address of queue
2198 *	@size: number of queue entries
2199 *	@token: uP token
2200 *	@gen: initial generation value for the context
2201 *	@cidx: consumer pointer
2202 *
2203 *	Initialize an SGE egress context and make it ready for use.  If the
2204 *	platform allows concurrent context operations, the caller is
2205 *	responsible for appropriate locking.
2206 */
2207int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2208		       enum sge_context_type type, int respq, u64 base_addr,
2209		       unsigned int size, unsigned int token, int gen,
2210		       unsigned int cidx)
2211{
2212	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2213
2214	if (base_addr & 0xfff)	/* must be 4K aligned */
2215		return -EINVAL;
2216	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2217		return -EBUSY;
2218
2219	base_addr >>= 12;
2220	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2221		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2222	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2223		     V_EC_BASE_LO(base_addr & 0xffff));
2224	base_addr >>= 16;
2225	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2226	base_addr >>= 32;
2227	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2228		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2229		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2230		     F_EC_VALID);
2231	return t3_sge_write_context(adapter, id, F_EGRESS);
2232}
2233
2234/**
2235 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2236 *	@adapter: the adapter to configure
2237 *	@id: the context id
2238 *	@gts_enable: whether to enable GTS for the context
2239 *	@base_addr: base address of queue
2240 *	@size: number of queue entries
2241 *	@bsize: size of each buffer for this queue
2242 *	@cong_thres: threshold to signal congestion to upstream producers
2243 *	@gen: initial generation value for the context
2244 *	@cidx: consumer pointer
2245 *
2246 *	Initialize an SGE free list context and make it ready for use.  The
2247 *	caller is responsible for ensuring only one context operation occurs
2248 *	at a time.
2249 */
2250int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2251			int gts_enable, u64 base_addr, unsigned int size,
2252			unsigned int bsize, unsigned int cong_thres, int gen,
2253			unsigned int cidx)
2254{
2255	if (base_addr & 0xfff)	/* must be 4K aligned */
2256		return -EINVAL;
2257	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2258		return -EBUSY;
2259
2260	base_addr >>= 12;
2261	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2262	base_addr >>= 32;
2263	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2264		     V_FL_BASE_HI((u32) base_addr) |
2265		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2266	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2267		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2268		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2269	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2270		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2271		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2272	return t3_sge_write_context(adapter, id, F_FREELIST);
2273}
2274
2275/**
2276 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2277 *	@adapter: the adapter to configure
2278 *	@id: the context id
2279 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2280 *	@base_addr: base address of queue
2281 *	@size: number of queue entries
2282 *	@fl_thres: threshold for selecting the normal or jumbo free list
2283 *	@gen: initial generation value for the context
2284 *	@cidx: consumer pointer
2285 *
2286 *	Initialize an SGE response queue context and make it ready for use.
2287 *	The caller is responsible for ensuring only one context operation
2288 *	occurs at a time.
2289 */
2290int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2291			 int irq_vec_idx, u64 base_addr, unsigned int size,
2292			 unsigned int fl_thres, int gen, unsigned int cidx)
2293{
2294	unsigned int intr = 0;
2295
2296	if (base_addr & 0xfff)	/* must be 4K aligned */
2297		return -EINVAL;
2298	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2299		return -EBUSY;
2300
2301	base_addr >>= 12;
2302	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2303		     V_CQ_INDEX(cidx));
2304	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2305	base_addr >>= 32;
2306	if (irq_vec_idx >= 0)
2307		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2308	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2309		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2310	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2311	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2312}
2313
2314/**
2315 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2316 *	@adapter: the adapter to configure
2317 *	@id: the context id
2318 *	@base_addr: base address of queue
2319 *	@size: number of queue entries
2320 *	@rspq: response queue for async notifications
2321 *	@ovfl_mode: CQ overflow mode
2322 *	@credits: completion queue credits
2323 *	@credit_thres: the credit threshold
2324 *
2325 *	Initialize an SGE completion queue context and make it ready for use.
2326 *	The caller is responsible for ensuring only one context operation
2327 *	occurs at a time.
2328 */
2329int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2330			unsigned int size, int rspq, int ovfl_mode,
2331			unsigned int credits, unsigned int credit_thres)
2332{
2333	if (base_addr & 0xfff)	/* must be 4K aligned */
2334		return -EINVAL;
2335	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2336		return -EBUSY;
2337
2338	base_addr >>= 12;
2339	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2340	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2341	base_addr >>= 32;
2342	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2344		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2345		     V_CQ_ERR(ovfl_mode));
2346	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2347		     V_CQ_CREDIT_THRES(credit_thres));
2348	return t3_sge_write_context(adapter, id, F_CQ);
2349}
2350
2351/**
2352 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2353 *	@adapter: the adapter
2354 *	@id: the egress context id
2355 *	@enable: enable (1) or disable (0) the context
2356 *
2357 *	Enable or disable an SGE egress context.  The caller is responsible for
2358 *	ensuring only one context operation occurs at a time.
2359 */
2360int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2361{
2362	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2363		return -EBUSY;
2364
2365	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2366	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2367	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2368	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2369	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2370	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2371		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2372	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2373			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2374}
2375
2376/**
2377 *	t3_sge_disable_fl - disable an SGE free-buffer list
2378 *	@adapter: the adapter
2379 *	@id: the free list context id
2380 *
2381 *	Disable an SGE free-buffer list.  The caller is responsible for
2382 *	ensuring only one context operation occurs at a time.
2383 */
2384int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2385{
2386	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2387		return -EBUSY;
2388
2389	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2390	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2391	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2392	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2393	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2394	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2395		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2396	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2397			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2398}
2399
2400/**
2401 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2402 *	@adapter: the adapter
2403 *	@id: the response queue context id
2404 *
2405 *	Disable an SGE response queue.  The caller is responsible for
2406 *	ensuring only one context operation occurs at a time.
2407 */
2408int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2409{
2410	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2411		return -EBUSY;
2412
2413	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2414	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2415	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2416	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2417	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2418	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2419		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2420	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2421			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2422}
2423
2424/**
2425 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2426 *	@adapter: the adapter
2427 *	@id: the completion queue context id
2428 *
2429 *	Disable an SGE completion queue.  The caller is responsible for
2430 *	ensuring only one context operation occurs at a time.
2431 */
2432int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2433{
2434	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2435		return -EBUSY;
2436
2437	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2438	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2439	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2440	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2441	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2442	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2443		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2444	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2445			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2446}
2447
2448/**
2449 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2450 *	@adapter: the adapter
2451 *	@id: the context id
2452 *	@op: the operation to perform
 
2453 *
2454 *	Perform the selected operation on an SGE completion queue context.
2455 *	The caller is responsible for ensuring only one context operation
2456 *	occurs at a time.
2457 */
2458int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2459		      unsigned int credits)
2460{
2461	u32 val;
2462
2463	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2464		return -EBUSY;
2465
2466	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2467	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2468		     V_CONTEXT(id) | F_CQ);
2469	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2470				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2471		return -EIO;
2472
2473	if (op >= 2 && op < 7) {
2474		if (adapter->params.rev > 0)
2475			return G_CQ_INDEX(val);
2476
2477		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2478			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2479		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2480				    F_CONTEXT_CMD_BUSY, 0,
2481				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2482			return -EIO;
2483		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2484	}
2485	return 0;
2486}
2487
2488/**
2489 *	t3_config_rss - configure Rx packet steering
2490 *	@adapter: the adapter
2491 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2492 *	@cpus: values for the CPU lookup table (0xff terminated)
2493 *	@rspq: values for the response queue lookup table (0xffff terminated)
2494 *
2495 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2496 *	the values for the CPU and response queue lookup tables.  If they
2497 *	provide fewer values than the size of the tables the supplied values
2498 *	are used repeatedly until the tables are fully populated.
2499 */
2500void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2501		   const u8 * cpus, const u16 *rspq)
2502{
2503	int i, j, cpu_idx = 0, q_idx = 0;
2504
2505	if (cpus)
2506		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2507			u32 val = i << 16;
2508
2509			for (j = 0; j < 2; ++j) {
2510				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2511				if (cpus[cpu_idx] == 0xff)
2512					cpu_idx = 0;
2513			}
2514			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2515		}
2516
2517	if (rspq)
2518		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2519			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2520				     (i << 16) | rspq[q_idx++]);
2521			if (rspq[q_idx] == 0xffff)
2522				q_idx = 0;
2523		}
2524
2525	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2526}
2527
2528/**
2529 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2530 *	@adap: the adapter
2531 *	@enable: 1 to select offload mode, 0 for regular NIC
2532 *
2533 *	Switches TP to NIC/offload mode.
2534 */
2535void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2536{
2537	if (is_offload(adap) || !enable)
2538		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2539				 V_NICMODE(!enable));
2540}
2541
2542/**
2543 *	pm_num_pages - calculate the number of pages of the payload memory
2544 *	@mem_size: the size of the payload memory
2545 *	@pg_size: the size of each payload memory page
2546 *
2547 *	Calculate the number of pages, each of the given size, that fit in a
2548 *	memory of the specified size, respecting the HW requirement that the
2549 *	number of pages must be a multiple of 24.
2550 */
2551static inline unsigned int pm_num_pages(unsigned int mem_size,
2552					unsigned int pg_size)
2553{
2554	unsigned int n = mem_size / pg_size;
2555
2556	return n - n % 24;
2557}
2558
2559#define mem_region(adap, start, size, reg) \
2560	t3_write_reg((adap), A_ ## reg, (start)); \
2561	start += size
2562
2563/**
2564 *	partition_mem - partition memory and configure TP memory settings
2565 *	@adap: the adapter
2566 *	@p: the TP parameters
2567 *
2568 *	Partitions context and payload memory and configures TP's memory
2569 *	registers.
2570 */
2571static void partition_mem(struct adapter *adap, const struct tp_params *p)
2572{
2573	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2574	unsigned int timers = 0, timers_shift = 22;
2575
2576	if (adap->params.rev > 0) {
2577		if (tids <= 16 * 1024) {
2578			timers = 1;
2579			timers_shift = 16;
2580		} else if (tids <= 64 * 1024) {
2581			timers = 2;
2582			timers_shift = 18;
2583		} else if (tids <= 256 * 1024) {
2584			timers = 3;
2585			timers_shift = 20;
2586		}
2587	}
2588
2589	t3_write_reg(adap, A_TP_PMM_SIZE,
2590		     p->chan_rx_size | (p->chan_tx_size >> 16));
2591
2592	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2593	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2594	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2595	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2596			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2597
2598	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2599	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2600	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2601
2602	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2603	/* Add a bit of headroom and make multiple of 24 */
2604	pstructs += 48;
2605	pstructs -= pstructs % 24;
2606	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2607
2608	m = tids * TCB_SIZE;
2609	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2610	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2611	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2612	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2613	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2614	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2615	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2616	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2617
2618	m = (m + 4095) & ~0xfff;
2619	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2620	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2621
2622	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2623	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2624	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2625	if (tids < m)
2626		adap->params.mc5.nservers += m - tids;
2627}
2628
2629static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2630				  u32 val)
2631{
2632	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2633	t3_write_reg(adap, A_TP_PIO_DATA, val);
2634}
2635
2636static void tp_config(struct adapter *adap, const struct tp_params *p)
2637{
2638	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2639		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2640		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2641	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2642		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2643		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2644	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2645		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2646		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2647		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2648	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2649			 F_IPV6ENABLE | F_NICMODE);
2650	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2651	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2652	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2653			 adap->params.rev > 0 ? F_ENABLEESND :
2654			 F_T3A_ENABLEESND);
2655
2656	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2657			 F_ENABLEEPCMDAFULL,
2658			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2659			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2660	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2661			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2662			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2663	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2664	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2665
2666	if (adap->params.rev > 0) {
2667		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2668		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2669				 F_TXPACEAUTO);
2670		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2671		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2672	} else
2673		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2674
2675	if (adap->params.rev == T3_REV_C)
2676		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2677				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2678				 V_TABLELATENCYDELTA(4));
2679
2680	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2681	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2682	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2683	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2684}
2685
2686/* Desired TP timer resolution in usec */
2687#define TP_TMR_RES 50
2688
2689/* TCP timer values in ms */
2690#define TP_DACK_TIMER 50
2691#define TP_RTO_MIN    250
2692
2693/**
2694 *	tp_set_timers - set TP timing parameters
2695 *	@adap: the adapter to set
2696 *	@core_clk: the core clock frequency in Hz
2697 *
2698 *	Set TP's timing parameters, such as the various timer resolutions and
2699 *	the TCP timer values.
2700 */
2701static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2702{
2703	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2704	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2705	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2706	unsigned int tps = core_clk >> tre;
2707
2708	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2709		     V_DELAYEDACKRESOLUTION(dack_re) |
2710		     V_TIMESTAMPRESOLUTION(tstamp_re));
2711	t3_write_reg(adap, A_TP_DACK_TIMER,
2712		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2713	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2714	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2715	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2716	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2717	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2718		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2719		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2720		     V_KEEPALIVEMAX(9));
2721
2722#define SECONDS * tps
2723
2724	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2725	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2726	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2727	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2728	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2729	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2730	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2731	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2732	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2733
2734#undef SECONDS
2735}
2736
2737/**
2738 *	t3_tp_set_coalescing_size - set receive coalescing size
2739 *	@adap: the adapter
2740 *	@size: the receive coalescing size
2741 *	@psh: whether a set PSH bit should deliver coalesced data
2742 *
2743 *	Set the receive coalescing size and PSH bit handling.
2744 */
2745static int t3_tp_set_coalescing_size(struct adapter *adap,
2746				     unsigned int size, int psh)
2747{
2748	u32 val;
2749
2750	if (size > MAX_RX_COALESCING_LEN)
2751		return -EINVAL;
2752
2753	val = t3_read_reg(adap, A_TP_PARA_REG3);
2754	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2755
2756	if (size) {
2757		val |= F_RXCOALESCEENABLE;
2758		if (psh)
2759			val |= F_RXCOALESCEPSHEN;
2760		size = min(MAX_RX_COALESCING_LEN, size);
2761		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2762			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2763	}
2764	t3_write_reg(adap, A_TP_PARA_REG3, val);
2765	return 0;
2766}
2767
2768/**
2769 *	t3_tp_set_max_rxsize - set the max receive size
2770 *	@adap: the adapter
2771 *	@size: the max receive size
2772 *
2773 *	Set TP's max receive size.  This is the limit that applies when
2774 *	receive coalescing is disabled.
2775 */
2776static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2777{
2778	t3_write_reg(adap, A_TP_PARA_REG7,
2779		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2780}
2781
2782static void init_mtus(unsigned short mtus[])
2783{
2784	/*
2785	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2786	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2787	 * are enabled and still have at least 8 bytes of payload.
2788	 */
2789	mtus[0] = 88;
2790	mtus[1] = 88;
2791	mtus[2] = 256;
2792	mtus[3] = 512;
2793	mtus[4] = 576;
2794	mtus[5] = 1024;
2795	mtus[6] = 1280;
2796	mtus[7] = 1492;
2797	mtus[8] = 1500;
2798	mtus[9] = 2002;
2799	mtus[10] = 2048;
2800	mtus[11] = 4096;
2801	mtus[12] = 4352;
2802	mtus[13] = 8192;
2803	mtus[14] = 9000;
2804	mtus[15] = 9600;
2805}
2806
2807/*
2808 * Initial congestion control parameters.
2809 */
2810static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2811{
2812	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2813	a[9] = 2;
2814	a[10] = 3;
2815	a[11] = 4;
2816	a[12] = 5;
2817	a[13] = 6;
2818	a[14] = 7;
2819	a[15] = 8;
2820	a[16] = 9;
2821	a[17] = 10;
2822	a[18] = 14;
2823	a[19] = 17;
2824	a[20] = 21;
2825	a[21] = 25;
2826	a[22] = 30;
2827	a[23] = 35;
2828	a[24] = 45;
2829	a[25] = 60;
2830	a[26] = 80;
2831	a[27] = 100;
2832	a[28] = 200;
2833	a[29] = 300;
2834	a[30] = 400;
2835	a[31] = 500;
2836
2837	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2838	b[9] = b[10] = 1;
2839	b[11] = b[12] = 2;
2840	b[13] = b[14] = b[15] = b[16] = 3;
2841	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2842	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2843	b[28] = b[29] = 6;
2844	b[30] = b[31] = 7;
2845}
2846
2847/* The minimum additive increment value for the congestion control table */
2848#define CC_MIN_INCR 2U
2849
2850/**
2851 *	t3_load_mtus - write the MTU and congestion control HW tables
2852 *	@adap: the adapter
2853 *	@mtus: the unrestricted values for the MTU table
2854 *	@alphs: the values for the congestion control alpha parameter
2855 *	@beta: the values for the congestion control beta parameter
2856 *	@mtu_cap: the maximum permitted effective MTU
2857 *
2858 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2859 *	Update the high-speed congestion control table with the supplied alpha,
2860 * 	beta, and MTUs.
2861 */
2862void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2863		  unsigned short alpha[NCCTRL_WIN],
2864		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2865{
2866	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2867		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2868		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2869		28672, 40960, 57344, 81920, 114688, 163840, 229376
2870	};
2871
2872	unsigned int i, w;
2873
2874	for (i = 0; i < NMTUS; ++i) {
2875		unsigned int mtu = min(mtus[i], mtu_cap);
2876		unsigned int log2 = fls(mtu);
2877
2878		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2879			log2--;
2880		t3_write_reg(adap, A_TP_MTU_TABLE,
2881			     (i << 24) | (log2 << 16) | mtu);
2882
2883		for (w = 0; w < NCCTRL_WIN; ++w) {
2884			unsigned int inc;
2885
2886			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2887				  CC_MIN_INCR);
2888
2889			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2890				     (w << 16) | (beta[w] << 13) | inc);
2891		}
2892	}
2893}
2894
2895/**
2896 *	t3_tp_get_mib_stats - read TP's MIB counters
2897 *	@adap: the adapter
2898 *	@tps: holds the returned counter values
2899 *
2900 *	Returns the values of TP's MIB counters.
2901 */
2902void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2903{
2904	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2905			 sizeof(*tps) / sizeof(u32), 0);
2906}
2907
2908#define ulp_region(adap, name, start, len) \
2909	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2910	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2911		     (start) + (len) - 1); \
2912	start += len
2913
2914#define ulptx_region(adap, name, start, len) \
2915	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2916	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2917		     (start) + (len) - 1)
2918
2919static void ulp_config(struct adapter *adap, const struct tp_params *p)
2920{
2921	unsigned int m = p->chan_rx_size;
2922
2923	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2924	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2925	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2926	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2927	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2928	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2929	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2930	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2931}
2932
2933/**
2934 *	t3_set_proto_sram - set the contents of the protocol sram
2935 *	@adapter: the adapter
2936 *	@data: the protocol image
2937 *
2938 *	Write the contents of the protocol SRAM.
2939 */
2940int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2941{
2942	int i;
2943	const __be32 *buf = (const __be32 *)data;
2944
2945	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2946		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2947		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2948		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2949		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2950		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2951
2952		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2953		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2954			return -EIO;
2955	}
2956	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2957
2958	return 0;
2959}
2960
2961void t3_config_trace_filter(struct adapter *adapter,
2962			    const struct trace_params *tp, int filter_index,
2963			    int invert, int enable)
2964{
2965	u32 addr, key[4], mask[4];
2966
2967	key[0] = tp->sport | (tp->sip << 16);
2968	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2969	key[2] = tp->dip;
2970	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2971
2972	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2973	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2974	mask[2] = tp->dip_mask;
2975	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2976
2977	if (invert)
2978		key[3] |= (1 << 29);
2979	if (enable)
2980		key[3] |= (1 << 28);
2981
2982	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2983	tp_wr_indirect(adapter, addr++, key[0]);
2984	tp_wr_indirect(adapter, addr++, mask[0]);
2985	tp_wr_indirect(adapter, addr++, key[1]);
2986	tp_wr_indirect(adapter, addr++, mask[1]);
2987	tp_wr_indirect(adapter, addr++, key[2]);
2988	tp_wr_indirect(adapter, addr++, mask[2]);
2989	tp_wr_indirect(adapter, addr++, key[3]);
2990	tp_wr_indirect(adapter, addr, mask[3]);
2991	t3_read_reg(adapter, A_TP_PIO_DATA);
2992}
2993
2994/**
2995 *	t3_config_sched - configure a HW traffic scheduler
2996 *	@adap: the adapter
2997 *	@kbps: target rate in Kbps
2998 *	@sched: the scheduler index
2999 *
3000 *	Configure a HW scheduler for the target rate
3001 */
3002int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3003{
3004	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3005	unsigned int clk = adap->params.vpd.cclk * 1000;
3006	unsigned int selected_cpt = 0, selected_bpt = 0;
3007
3008	if (kbps > 0) {
3009		kbps *= 125;	/* -> bytes */
3010		for (cpt = 1; cpt <= 255; cpt++) {
3011			tps = clk / cpt;
3012			bpt = (kbps + tps / 2) / tps;
3013			if (bpt > 0 && bpt <= 255) {
3014				v = bpt * tps;
3015				delta = v >= kbps ? v - kbps : kbps - v;
3016				if (delta <= mindelta) {
3017					mindelta = delta;
3018					selected_cpt = cpt;
3019					selected_bpt = bpt;
3020				}
3021			} else if (selected_cpt)
3022				break;
3023		}
3024		if (!selected_cpt)
3025			return -EINVAL;
3026	}
3027	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3028		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3029	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3030	if (sched & 1)
3031		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3032	else
3033		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3034	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3035	return 0;
3036}
3037
3038static int tp_init(struct adapter *adap, const struct tp_params *p)
3039{
3040	int busy = 0;
3041
3042	tp_config(adap, p);
3043	t3_set_vlan_accel(adap, 3, 0);
3044
3045	if (is_offload(adap)) {
3046		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3047		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3048		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3049				       0, 1000, 5);
3050		if (busy)
3051			CH_ERR(adap, "TP initialization timed out\n");
3052	}
3053
3054	if (!busy)
3055		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3056	return busy;
3057}
3058
3059/*
3060 * Perform the bits of HW initialization that are dependent on the Tx
3061 * channels being used.
3062 */
3063static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3064{
3065	int i;
3066
3067	if (chan_map != 3) {                                 /* one channel */
3068		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3069		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3070		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3071			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3072					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3073		t3_write_reg(adap, A_PM1_TX_CFG,
3074			     chan_map == 1 ? 0xffffffff : 0);
3075	} else {                                             /* two channels */
3076		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3077		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3078		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3079			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3080		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3081			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3082			     F_ENFORCEPKT);
3083		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3084		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3085		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3086			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3087		for (i = 0; i < 16; i++)
3088			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3089				     (i << 16) | 0x1010);
3090	}
3091}
3092
3093static int calibrate_xgm(struct adapter *adapter)
3094{
3095	if (uses_xaui(adapter)) {
3096		unsigned int v, i;
3097
3098		for (i = 0; i < 5; ++i) {
3099			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3100			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101			msleep(1);
3102			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3103			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3104				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3105					     V_XAUIIMP(G_CALIMP(v) >> 2));
3106				return 0;
3107			}
3108		}
3109		CH_ERR(adapter, "MAC calibration failed\n");
3110		return -1;
3111	} else {
3112		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3113			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3114		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3115				 F_XGM_IMPSETUPDATE);
3116	}
3117	return 0;
3118}
3119
3120static void calibrate_xgm_t3b(struct adapter *adapter)
3121{
3122	if (!uses_xaui(adapter)) {
3123		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3124			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3125		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3126		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3127				 F_XGM_IMPSETUPDATE);
3128		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3129				 0);
3130		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3131		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3132	}
3133}
3134
3135struct mc7_timing_params {
3136	unsigned char ActToPreDly;
3137	unsigned char ActToRdWrDly;
3138	unsigned char PreCyc;
3139	unsigned char RefCyc[5];
3140	unsigned char BkCyc;
3141	unsigned char WrToRdDly;
3142	unsigned char RdToWrDly;
3143};
3144
3145/*
3146 * Write a value to a register and check that the write completed.  These
3147 * writes normally complete in a cycle or two, so one read should suffice.
3148 * The very first read exists to flush the posted write to the device.
3149 */
3150static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3151{
3152	t3_write_reg(adapter, addr, val);
3153	t3_read_reg(adapter, addr);	/* flush */
3154	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3155		return 0;
3156	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3157	return -EIO;
3158}
3159
3160static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3161{
3162	static const unsigned int mc7_mode[] = {
3163		0x632, 0x642, 0x652, 0x432, 0x442
3164	};
3165	static const struct mc7_timing_params mc7_timings[] = {
3166		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3167		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3168		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3169		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3170		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3171	};
3172
3173	u32 val;
3174	unsigned int width, density, slow, attempts;
3175	struct adapter *adapter = mc7->adapter;
3176	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3177
3178	if (!mc7->size)
3179		return 0;
3180
3181	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3182	slow = val & F_SLOW;
3183	width = G_WIDTH(val);
3184	density = G_DEN(val);
3185
3186	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3187	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3188	msleep(1);
3189
3190	if (!slow) {
3191		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3192		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3193		msleep(1);
3194		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3195		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3196			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3197			       mc7->name);
3198			goto out_fail;
3199		}
3200	}
3201
3202	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3203		     V_ACTTOPREDLY(p->ActToPreDly) |
3204		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3205		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3206		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3207
3208	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3209		     val | F_CLKEN | F_TERM150);
3210	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3211
3212	if (!slow)
3213		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3214				 F_DLLENB);
3215	udelay(1);
3216
3217	val = slow ? 3 : 6;
3218	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3219	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3220	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3221	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3222		goto out_fail;
3223
3224	if (!slow) {
3225		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3226		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3227		udelay(5);
3228	}
3229
3230	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3231	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3232	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3233	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3234		       mc7_mode[mem_type]) ||
3235	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3236	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3237		goto out_fail;
3238
3239	/* clock value is in KHz */
3240	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3241	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3242
3243	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3244		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3245	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3246
3247	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3248	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3249	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3250	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3251		     (mc7->size << width) - 1);
3252	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3253	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3254
3255	attempts = 50;
3256	do {
3257		msleep(250);
3258		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3259	} while ((val & F_BUSY) && --attempts);
3260	if (val & F_BUSY) {
3261		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3262		goto out_fail;
3263	}
3264
3265	/* Enable normal memory accesses. */
3266	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3267	return 0;
3268
3269out_fail:
3270	return -1;
3271}
3272
3273static void config_pcie(struct adapter *adap)
3274{
3275	static const u16 ack_lat[4][6] = {
3276		{237, 416, 559, 1071, 2095, 4143},
3277		{128, 217, 289, 545, 1057, 2081},
3278		{73, 118, 154, 282, 538, 1050},
3279		{67, 107, 86, 150, 278, 534}
3280	};
3281	static const u16 rpl_tmr[4][6] = {
3282		{711, 1248, 1677, 3213, 6285, 12429},
3283		{384, 651, 867, 1635, 3171, 6243},
3284		{219, 354, 462, 846, 1614, 3150},
3285		{201, 321, 258, 450, 834, 1602}
3286	};
3287
3288	u16 val, devid;
3289	unsigned int log2_width, pldsize;
3290	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3291
3292	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3293	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3294
3295	pci_read_config_word(adap->pdev, 0x2, &devid);
3296	if (devid == 0x37) {
3297		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3298					   val & ~PCI_EXP_DEVCTL_READRQ &
3299					   ~PCI_EXP_DEVCTL_PAYLOAD);
3300		pldsize = 0;
3301	}
3302
3303	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3304
3305	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3306	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3307	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3308	log2_width = fls(adap->params.pci.width) - 1;
3309	acklat = ack_lat[log2_width][pldsize];
3310	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3311		acklat += fst_trn_tx * 4;
3312	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3313
3314	if (adap->params.rev == 0)
3315		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3316				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3317				 V_T3A_ACKLAT(acklat));
3318	else
3319		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3320				 V_ACKLAT(acklat));
3321
3322	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3323			 V_REPLAYLMT(rpllmt));
3324
3325	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3326	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3327			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3328			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3329}
3330
3331/*
3332 * Initialize and configure T3 HW modules.  This performs the
3333 * initialization steps that need to be done once after a card is reset.
3334 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3335 *
3336 * fw_params are passed to FW and their value is platform dependent.  Only the
3337 * top 8 bits are available for use, the rest must be 0.
3338 */
3339int t3_init_hw(struct adapter *adapter, u32 fw_params)
3340{
3341	int err = -EIO, attempts, i;
3342	const struct vpd_params *vpd = &adapter->params.vpd;
3343
3344	if (adapter->params.rev > 0)
3345		calibrate_xgm_t3b(adapter);
3346	else if (calibrate_xgm(adapter))
3347		goto out_err;
3348
3349	if (vpd->mclk) {
3350		partition_mem(adapter, &adapter->params.tp);
3351
3352		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3353		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3354		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3355		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3356				adapter->params.mc5.nfilters,
3357				adapter->params.mc5.nroutes))
3358			goto out_err;
3359
3360		for (i = 0; i < 32; i++)
3361			if (clear_sge_ctxt(adapter, i, F_CQ))
3362				goto out_err;
3363	}
3364
3365	if (tp_init(adapter, &adapter->params.tp))
3366		goto out_err;
3367
3368	t3_tp_set_coalescing_size(adapter,
3369				  min(adapter->params.sge.max_pkt_size,
3370				      MAX_RX_COALESCING_LEN), 1);
3371	t3_tp_set_max_rxsize(adapter,
3372			     min(adapter->params.sge.max_pkt_size, 16384U));
3373	ulp_config(adapter, &adapter->params.tp);
3374
3375	if (is_pcie(adapter))
3376		config_pcie(adapter);
3377	else
3378		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3379				 F_DMASTOPEN | F_CLIDECEN);
3380
3381	if (adapter->params.rev == T3_REV_C)
3382		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3383				 F_CFG_CQE_SOP_MASK);
3384
3385	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3386	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3387	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3388	chan_init_hw(adapter, adapter->params.chan_map);
3389	t3_sge_init(adapter, &adapter->params.sge);
3390	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3391
3392	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3393
3394	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3395	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3396		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3397	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3398
3399	attempts = 100;
3400	do {			/* wait for uP to initialize */
3401		msleep(20);
3402	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3403	if (!attempts) {
3404		CH_ERR(adapter, "uP initialization timed out\n");
3405		goto out_err;
3406	}
3407
3408	err = 0;
3409out_err:
3410	return err;
3411}
3412
3413/**
3414 *	get_pci_mode - determine a card's PCI mode
3415 *	@adapter: the adapter
3416 *	@p: where to store the PCI settings
3417 *
3418 *	Determines a card's PCI mode and associated parameters, such as speed
3419 *	and width.
3420 */
3421static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3422{
3423	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3424	u32 pci_mode;
3425
3426	if (pci_is_pcie(adapter->pdev)) {
3427		u16 val;
3428
3429		p->variant = PCI_VARIANT_PCIE;
3430		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3431		p->width = (val >> 4) & 0x3f;
3432		return;
3433	}
3434
3435	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3436	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3437	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3438	pci_mode = G_PCIXINITPAT(pci_mode);
3439	if (pci_mode == 0)
3440		p->variant = PCI_VARIANT_PCI;
3441	else if (pci_mode < 4)
3442		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3443	else if (pci_mode < 8)
3444		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3445	else
3446		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3447}
3448
3449/**
3450 *	init_link_config - initialize a link's SW state
3451 *	@lc: structure holding the link state
3452 *	@ai: information about the current card
3453 *
3454 *	Initializes the SW state maintained for each link, including the link's
3455 *	capabilities and default speed/duplex/flow-control/autonegotiation
3456 *	settings.
3457 */
3458static void init_link_config(struct link_config *lc, unsigned int caps)
3459{
3460	lc->supported = caps;
3461	lc->requested_speed = lc->speed = SPEED_INVALID;
3462	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3463	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3464	if (lc->supported & SUPPORTED_Autoneg) {
3465		lc->advertising = lc->supported;
3466		lc->autoneg = AUTONEG_ENABLE;
3467		lc->requested_fc |= PAUSE_AUTONEG;
3468	} else {
3469		lc->advertising = 0;
3470		lc->autoneg = AUTONEG_DISABLE;
3471	}
3472}
3473
3474/**
3475 *	mc7_calc_size - calculate MC7 memory size
3476 *	@cfg: the MC7 configuration
3477 *
3478 *	Calculates the size of an MC7 memory in bytes from the value of its
3479 *	configuration register.
3480 */
3481static unsigned int mc7_calc_size(u32 cfg)
3482{
3483	unsigned int width = G_WIDTH(cfg);
3484	unsigned int banks = !!(cfg & F_BKS) + 1;
3485	unsigned int org = !!(cfg & F_ORG) + 1;
3486	unsigned int density = G_DEN(cfg);
3487	unsigned int MBs = ((256 << density) * banks) / (org << width);
3488
3489	return MBs << 20;
3490}
3491
3492static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3493		     unsigned int base_addr, const char *name)
3494{
3495	u32 cfg;
3496
3497	mc7->adapter = adapter;
3498	mc7->name = name;
3499	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3500	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3501	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3502	mc7->width = G_WIDTH(cfg);
3503}
3504
3505static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3506{
3507	u16 devid;
3508
3509	mac->adapter = adapter;
3510	pci_read_config_word(adapter->pdev, 0x2, &devid);
3511
3512	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3513		index = 0;
3514	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3515	mac->nucast = 1;
3516
3517	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3518		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3519			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3520		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3521				 F_ENRGMII, 0);
3522	}
3523}
3524
3525static void early_hw_init(struct adapter *adapter,
3526			  const struct adapter_info *ai)
3527{
3528	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3529
3530	mi1_init(adapter, ai);
3531	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3532		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3533	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3534		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3535	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3536	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3537
3538	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3539		val |= F_ENRGMII;
3540
3541	/* Enable MAC clocks so we can access the registers */
3542	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3543	t3_read_reg(adapter, A_XGM_PORT_CFG);
3544
3545	val |= F_CLKDIVRESET_;
3546	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3547	t3_read_reg(adapter, A_XGM_PORT_CFG);
3548	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3549	t3_read_reg(adapter, A_XGM_PORT_CFG);
3550}
3551
3552/*
3553 * Reset the adapter.
3554 * Older PCIe cards lose their config space during reset, PCI-X
3555 * ones don't.
3556 */
3557int t3_reset_adapter(struct adapter *adapter)
3558{
3559	int i, save_and_restore_pcie =
3560	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3561	uint16_t devid = 0;
3562
3563	if (save_and_restore_pcie)
3564		pci_save_state(adapter->pdev);
3565	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3566
3567	/*
3568	 * Delay. Give Some time to device to reset fully.
3569	 * XXX The delay time should be modified.
3570	 */
3571	for (i = 0; i < 10; i++) {
3572		msleep(50);
3573		pci_read_config_word(adapter->pdev, 0x00, &devid);
3574		if (devid == 0x1425)
3575			break;
3576	}
3577
3578	if (devid != 0x1425)
3579		return -1;
3580
3581	if (save_and_restore_pcie)
3582		pci_restore_state(adapter->pdev);
3583	return 0;
3584}
3585
3586static int init_parity(struct adapter *adap)
3587{
3588		int i, err, addr;
3589
3590	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3591		return -EBUSY;
3592
3593	for (err = i = 0; !err && i < 16; i++)
3594		err = clear_sge_ctxt(adap, i, F_EGRESS);
3595	for (i = 0xfff0; !err && i <= 0xffff; i++)
3596		err = clear_sge_ctxt(adap, i, F_EGRESS);
3597	for (i = 0; !err && i < SGE_QSETS; i++)
3598		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3599	if (err)
3600		return err;
3601
3602	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3603	for (i = 0; i < 4; i++)
3604		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3605			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3606				     F_IBQDBGWR | V_IBQDBGQID(i) |
3607				     V_IBQDBGADDR(addr));
3608			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3609					      F_IBQDBGBUSY, 0, 2, 1);
3610			if (err)
3611				return err;
3612		}
3613	return 0;
3614}
3615
3616/*
3617 * Initialize adapter SW state for the various HW modules, set initial values
3618 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3619 * interface.
3620 */
3621int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3622		    int reset)
3623{
3624	int ret;
3625	unsigned int i, j = -1;
3626
3627	get_pci_mode(adapter, &adapter->params.pci);
3628
3629	adapter->params.info = ai;
3630	adapter->params.nports = ai->nports0 + ai->nports1;
3631	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3632	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3633	/*
3634	 * We used to only run the "adapter check task" once a second if
3635	 * we had PHYs which didn't support interrupts (we would check
3636	 * their link status once a second).  Now we check other conditions
3637	 * in that routine which could potentially impose a very high
3638	 * interrupt load on the system.  As such, we now always scan the
3639	 * adapter state once a second ...
3640	 */
3641	adapter->params.linkpoll_period = 10;
3642	adapter->params.stats_update_period = is_10G(adapter) ?
3643	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3644	adapter->params.pci.vpd_cap_addr =
3645	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
 
 
3646	ret = get_vpd_params(adapter, &adapter->params.vpd);
3647	if (ret < 0)
3648		return ret;
3649
3650	if (reset && t3_reset_adapter(adapter))
3651		return -1;
3652
3653	t3_sge_prep(adapter, &adapter->params.sge);
3654
3655	if (adapter->params.vpd.mclk) {
3656		struct tp_params *p = &adapter->params.tp;
3657
3658		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3659		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3660		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3661
3662		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3663		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3664		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3665		p->cm_size = t3_mc7_size(&adapter->cm);
3666		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3667		p->chan_tx_size = p->pmtx_size / p->nchan;
3668		p->rx_pg_size = 64 * 1024;
3669		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3670		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3671		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3672		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3673		    adapter->params.rev > 0 ? 12 : 6;
3674	}
3675
3676	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3677				  t3_mc7_size(&adapter->pmtx) &&
3678				  t3_mc7_size(&adapter->cm);
3679
3680	if (is_offload(adapter)) {
3681		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3682		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3683		    DEFAULT_NFILTERS : 0;
3684		adapter->params.mc5.nroutes = 0;
3685		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3686
3687		init_mtus(adapter->params.mtus);
3688		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3689	}
3690
3691	early_hw_init(adapter, ai);
3692	ret = init_parity(adapter);
3693	if (ret)
3694		return ret;
3695
3696	for_each_port(adapter, i) {
3697		u8 hw_addr[6];
3698		const struct port_type_info *pti;
3699		struct port_info *p = adap2pinfo(adapter, i);
3700
3701		while (!adapter->params.vpd.port_type[++j])
3702			;
3703
3704		pti = &port_types[adapter->params.vpd.port_type[j]];
3705		if (!pti->phy_prep) {
3706			CH_ALERT(adapter, "Invalid port type index %d\n",
3707				 adapter->params.vpd.port_type[j]);
3708			return -EINVAL;
3709		}
3710
3711		p->phy.mdio.dev = adapter->port[i];
3712		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3713				    ai->mdio_ops);
3714		if (ret)
3715			return ret;
3716		mac_prep(&p->mac, adapter, j);
3717
3718		/*
3719		 * The VPD EEPROM stores the base Ethernet address for the
3720		 * card.  A port's address is derived from the base by adding
3721		 * the port's index to the base's low octet.
3722		 */
3723		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3724		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3725
3726		memcpy(adapter->port[i]->dev_addr, hw_addr,
3727		       ETH_ALEN);
3728		init_link_config(&p->link_config, p->phy.caps);
3729		p->phy.ops->power_down(&p->phy, 1);
3730
3731		/*
3732		 * If the PHY doesn't support interrupts for link status
3733		 * changes, schedule a scan of the adapter links at least
3734		 * once a second.
3735		 */
3736		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3737		    adapter->params.linkpoll_period > 10)
3738			adapter->params.linkpoll_period = 10;
3739	}
3740
3741	return 0;
3742}
3743
3744void t3_led_ready(struct adapter *adapter)
3745{
3746	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3747			 F_GPIO0_OUT_VAL);
3748}
3749
3750int t3_replay_prep_adapter(struct adapter *adapter)
3751{
3752	const struct adapter_info *ai = adapter->params.info;
3753	unsigned int i, j = -1;
3754	int ret;
3755
3756	early_hw_init(adapter, ai);
3757	ret = init_parity(adapter);
3758	if (ret)
3759		return ret;
3760
3761	for_each_port(adapter, i) {
3762		const struct port_type_info *pti;
3763		struct port_info *p = adap2pinfo(adapter, i);
3764
3765		while (!adapter->params.vpd.port_type[++j])
3766			;
3767
3768		pti = &port_types[adapter->params.vpd.port_type[j]];
3769		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3770		if (ret)
3771			return ret;
3772		p->phy.ops->power_down(&p->phy, 1);
3773	}
3774
3775return 0;
3776}
3777