Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 *  linux/drivers/message/fusion/mptlan.c
   3 *      IP Over Fibre Channel device driver.
   4 *      For use with LSI Fibre Channel PCI chip/adapters
   5 *      running LSI Fusion MPT (Message Passing Technology) firmware.
   6 *
   7 *  Copyright (c) 2000-2008 LSI Corporation
   8 *  (mailto:DL-MPTFusionLinux@lsi.com)
   9 *
  10 */
  11/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  12/*
  13    This program is free software; you can redistribute it and/or modify
  14    it under the terms of the GNU General Public License as published by
  15    the Free Software Foundation; version 2 of the License.
  16
  17    This program is distributed in the hope that it will be useful,
  18    but WITHOUT ANY WARRANTY; without even the implied warranty of
  19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20    GNU General Public License for more details.
  21
  22    NO WARRANTY
  23    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  24    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  25    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  26    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  27    solely responsible for determining the appropriateness of using and
  28    distributing the Program and assumes all risks associated with its
  29    exercise of rights under this Agreement, including but not limited to
  30    the risks and costs of program errors, damage to or loss of data,
  31    programs or equipment, and unavailability or interruption of operations.
  32
  33    DISCLAIMER OF LIABILITY
  34    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  35    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  36    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  37    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  38    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  39    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  40    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  41
  42    You should have received a copy of the GNU General Public License
  43    along with this program; if not, write to the Free Software
  44    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  45*/
  46
  47/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  48/*
  49 * Define statements used for debugging
  50 */
  51//#define MPT_LAN_IO_DEBUG
  52
  53/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  54
  55#include "mptlan.h"
  56#include <linux/init.h>
  57#include <linux/module.h>
  58#include <linux/fs.h>
  59#include <linux/sched.h>
  60#include <linux/slab.h>
  61
  62#define my_VERSION	MPT_LINUX_VERSION_COMMON
  63#define MYNAM		"mptlan"
  64
  65MODULE_LICENSE("GPL");
  66MODULE_VERSION(my_VERSION);
  67
  68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  69/*
  70 * MPT LAN message sizes without variable part.
  71 */
  72#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
  73	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
  74
  75#define MPT_LAN_TRANSACTION32_SIZE \
  76	(sizeof(SGETransaction32_t) - sizeof(u32))
  77
  78/*
  79 *  Fusion MPT LAN private structures
  80 */
  81
  82struct BufferControl {
  83	struct sk_buff	*skb;
  84	dma_addr_t	dma;
  85	unsigned int	len;
  86};
  87
  88struct mpt_lan_priv {
  89	MPT_ADAPTER *mpt_dev;
  90	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
  91
  92	atomic_t buckets_out;		/* number of unused buckets on IOC */
  93	int bucketthresh;		/* Send more when this many left */
  94
  95	int *mpt_txfidx; /* Free Tx Context list */
  96	int mpt_txfidx_tail;
  97	spinlock_t txfidx_lock;
  98
  99	int *mpt_rxfidx; /* Free Rx Context list */
 100	int mpt_rxfidx_tail;
 101	spinlock_t rxfidx_lock;
 102
 103	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
 104	struct BufferControl *SendCtl;	/* Send BufferControl structs */
 105
 106	int max_buckets_out;		/* Max buckets to send to IOC */
 107	int tx_max_out;			/* IOC's Tx queue len */
 108
 109	u32 total_posted;
 110	u32 total_received;
 111
 112	struct delayed_work post_buckets_task;
 113	struct net_device *dev;
 114	unsigned long post_buckets_active;
 115};
 116
 117struct mpt_lan_ohdr {
 118	u16	dtype;
 119	u8	daddr[FC_ALEN];
 120	u16	stype;
 121	u8	saddr[FC_ALEN];
 122};
 123
 124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 125
 126/*
 127 *  Forward protos...
 128 */
 129static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
 130		       MPT_FRAME_HDR *reply);
 131static int  mpt_lan_open(struct net_device *dev);
 132static int  mpt_lan_reset(struct net_device *dev);
 133static int  mpt_lan_close(struct net_device *dev);
 134static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 135static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
 136					   int priority);
 137static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
 138static int  mpt_lan_receive_post_reply(struct net_device *dev,
 139				       LANReceivePostReply_t *pRecvRep);
 140static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
 141static int  mpt_lan_send_reply(struct net_device *dev,
 142			       LANSendReply_t *pSendRep);
 143static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
 144static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
 145static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
 146					 struct net_device *dev);
 147
 148/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 149/*
 150 *  Fusion MPT LAN private data
 151 */
 152static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
 153
 154static u32 max_buckets_out = 127;
 155static u32 tx_max_out_p = 127 - 16;
 156
 157/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 158/**
 159 *	lan_reply - Handle all data sent from the hardware.
 160 *	@ioc: Pointer to MPT_ADAPTER structure
 161 *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
 162 *	@reply: Pointer to MPT reply frame
 163 *
 164 *	Returns 1 indicating original alloc'd request frame ptr
 165 *	should be freed, or 0 if it shouldn't.
 166 */
 167static int
 168lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
 169{
 170	struct net_device *dev = ioc->netdev;
 171	int FreeReqFrame = 0;
 172
 173	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
 174		  IOC_AND_NETDEV_NAMES_s_s(dev)));
 175
 176//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
 177//			mf, reply));
 178
 179	if (mf == NULL) {
 180		u32 tmsg = CAST_PTR_TO_U32(reply);
 181
 182		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
 183				IOC_AND_NETDEV_NAMES_s_s(dev),
 184				tmsg));
 185
 186		switch (GET_LAN_FORM(tmsg)) {
 187
 188		// NOTE!  (Optimization) First case here is now caught in
 189		//  mptbase.c::mpt_interrupt() routine and callcack here
 190		//  is now skipped for this case!
 191#if 0
 192		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
 193//			dioprintk((KERN_INFO MYNAM "/lan_reply: "
 194//				  "MessageContext turbo reply received\n"));
 195			FreeReqFrame = 1;
 196			break;
 197#endif
 198
 199		case LAN_REPLY_FORM_SEND_SINGLE:
 200//			dioprintk((MYNAM "/lan_reply: "
 201//				  "calling mpt_lan_send_reply (turbo)\n"));
 202
 203			// Potential BUG here?
 204			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
 205			//  If/when mpt_lan_send_turbo would return 1 here,
 206			//  calling routine (mptbase.c|mpt_interrupt)
 207			//  would Oops because mf has already been set
 208			//  to NULL.  So after return from this func,
 209			//  mpt_interrupt() will attempt to put (NULL) mf ptr
 210			//  item back onto its adapter FreeQ - Oops!:-(
 211			//  It's Ok, since mpt_lan_send_turbo() *currently*
 212			//  always returns 0, but..., just in case:
 213
 214			(void) mpt_lan_send_turbo(dev, tmsg);
 215			FreeReqFrame = 0;
 216
 217			break;
 218
 219		case LAN_REPLY_FORM_RECEIVE_SINGLE:
 220//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
 221//				  "rcv-Turbo = %08x\n", tmsg));
 222			mpt_lan_receive_post_turbo(dev, tmsg);
 223			break;
 224
 225		default:
 226			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
 227				"that I don't know what to do with\n");
 228
 229			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
 230
 231			break;
 232		}
 233
 234		return FreeReqFrame;
 235	}
 236
 237//	msg = (u32 *) reply;
 238//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
 239//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
 240//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
 241//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
 242//		  reply->u.hdr.Function));
 243
 244	switch (reply->u.hdr.Function) {
 245
 246	case MPI_FUNCTION_LAN_SEND:
 247	{
 248		LANSendReply_t *pSendRep;
 249
 250		pSendRep = (LANSendReply_t *) reply;
 251		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
 252		break;
 253	}
 254
 255	case MPI_FUNCTION_LAN_RECEIVE:
 256	{
 257		LANReceivePostReply_t *pRecvRep;
 258
 259		pRecvRep = (LANReceivePostReply_t *) reply;
 260		if (pRecvRep->NumberOfContexts) {
 261			mpt_lan_receive_post_reply(dev, pRecvRep);
 262			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
 263				FreeReqFrame = 1;
 264		} else
 265			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
 266				  "ReceivePostReply received.\n"));
 267		break;
 268	}
 269
 270	case MPI_FUNCTION_LAN_RESET:
 271		/* Just a default reply. Might want to check it to
 272		 * make sure that everything went ok.
 273		 */
 274		FreeReqFrame = 1;
 275		break;
 276
 277	case MPI_FUNCTION_EVENT_NOTIFICATION:
 278	case MPI_FUNCTION_EVENT_ACK:
 279		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
 280		 *  Should be routed to mpt_lan_event_process(), but just in case...
 281		 */
 282		FreeReqFrame = 1;
 283		break;
 284
 285	default:
 286		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
 287			"reply that I don't know what to do with\n");
 288
 289		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
 290		FreeReqFrame = 1;
 291
 292		break;
 293	}
 294
 295	return FreeReqFrame;
 296}
 297
 298/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 299static int
 300mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 301{
 302	struct net_device *dev = ioc->netdev;
 303	struct mpt_lan_priv *priv;
 304
 305	if (dev == NULL)
 306		return(1);
 307	else
 308		priv = netdev_priv(dev);
 309
 310	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
 311			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
 312			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
 313
 314	if (priv->mpt_rxfidx == NULL)
 315		return (1);
 316
 317	if (reset_phase == MPT_IOC_SETUP_RESET) {
 318		;
 319	} else if (reset_phase == MPT_IOC_PRE_RESET) {
 320		int i;
 321		unsigned long flags;
 322
 323		netif_stop_queue(dev);
 324
 325		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
 326
 327		atomic_set(&priv->buckets_out, 0);
 328
 329		/* Reset Rx Free Tail index and re-populate the queue. */
 330		spin_lock_irqsave(&priv->rxfidx_lock, flags);
 331		priv->mpt_rxfidx_tail = -1;
 332		for (i = 0; i < priv->max_buckets_out; i++)
 333			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 334		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 335	} else {
 336		mpt_lan_post_receive_buckets(priv);
 337		netif_wake_queue(dev);
 338	}
 339
 340	return 1;
 341}
 342
 343/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 344static int
 345mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
 346{
 347	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
 348
 349	switch (le32_to_cpu(pEvReply->Event)) {
 350	case MPI_EVENT_NONE:				/* 00 */
 351	case MPI_EVENT_LOG_DATA:			/* 01 */
 352	case MPI_EVENT_STATE_CHANGE:			/* 02 */
 353	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
 354	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
 355	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
 356	case MPI_EVENT_RESCAN:				/* 06 */
 357		/* Ok, do we need to do anything here? As far as
 358		   I can tell, this is when a new device gets added
 359		   to the loop. */
 360	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
 361	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
 362	case MPI_EVENT_LOGOUT:				/* 09 */
 363	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
 364	default:
 365		break;
 366	}
 367
 368	/*
 369	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
 370	 *  Do NOT do it here now!
 371	 */
 372
 373	return 1;
 374}
 375
 376/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 377static int
 378mpt_lan_open(struct net_device *dev)
 379{
 380	struct mpt_lan_priv *priv = netdev_priv(dev);
 381	int i;
 382
 383	if (mpt_lan_reset(dev) != 0) {
 384		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 385
 386		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
 387
 388		if (mpt_dev->active)
 389			printk ("The ioc is active. Perhaps it needs to be"
 390				" reset?\n");
 391		else
 392			printk ("The ioc in inactive, most likely in the "
 393				"process of being reset. Please try again in "
 394				"a moment.\n");
 395	}
 396
 397	priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
 398					 GFP_KERNEL);
 399	if (priv->mpt_txfidx == NULL)
 400		goto out;
 401	priv->mpt_txfidx_tail = -1;
 402
 403	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
 404				GFP_KERNEL);
 405	if (priv->SendCtl == NULL)
 406		goto out_mpt_txfidx;
 407	for (i = 0; i < priv->tx_max_out; i++)
 408		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
 409
 410	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
 411
 412	priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
 413					 GFP_KERNEL);
 414	if (priv->mpt_rxfidx == NULL)
 415		goto out_SendCtl;
 416	priv->mpt_rxfidx_tail = -1;
 417
 418	priv->RcvCtl = kcalloc(priv->max_buckets_out,
 419			       sizeof(struct BufferControl),
 420			       GFP_KERNEL);
 421	if (priv->RcvCtl == NULL)
 422		goto out_mpt_rxfidx;
 423	for (i = 0; i < priv->max_buckets_out; i++)
 424		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 425
 426/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
 427/**/	for (i = 0; i < priv->tx_max_out; i++)
 428/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
 429/**/	dlprintk(("\n"));
 430
 431	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 432
 433	mpt_lan_post_receive_buckets(priv);
 434	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
 435			IOC_AND_NETDEV_NAMES_s_s(dev));
 436
 437	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
 438		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
 439			" Notifications. This is a bad thing! We're not going "
 440			"to go ahead, but I'd be leery of system stability at "
 441			"this point.\n");
 442	}
 443
 444	netif_start_queue(dev);
 445	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
 446
 447	return 0;
 448out_mpt_rxfidx:
 449	kfree(priv->mpt_rxfidx);
 450	priv->mpt_rxfidx = NULL;
 451out_SendCtl:
 452	kfree(priv->SendCtl);
 453	priv->SendCtl = NULL;
 454out_mpt_txfidx:
 455	kfree(priv->mpt_txfidx);
 456	priv->mpt_txfidx = NULL;
 457out:	return -ENOMEM;
 458}
 459
 460/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 461/* Send a LanReset message to the FW. This should result in the FW returning
 462   any buckets it still has. */
 463static int
 464mpt_lan_reset(struct net_device *dev)
 465{
 466	MPT_FRAME_HDR *mf;
 467	LANResetRequest_t *pResetReq;
 468	struct mpt_lan_priv *priv = netdev_priv(dev);
 469
 470	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
 471
 472	if (mf == NULL) {
 473/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
 474		"Unable to allocate a request frame.\n"));
 475*/
 476		return -1;
 477	}
 478
 479	pResetReq = (LANResetRequest_t *) mf;
 480
 481	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
 482	pResetReq->ChainOffset	= 0;
 483	pResetReq->Reserved	= 0;
 484	pResetReq->PortNumber	= priv->pnum;
 485	pResetReq->MsgFlags	= 0;
 486	pResetReq->Reserved2	= 0;
 487
 488	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
 489
 490	return 0;
 491}
 492
 493/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 494static int
 495mpt_lan_close(struct net_device *dev)
 496{
 497	struct mpt_lan_priv *priv = netdev_priv(dev);
 498	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 499	unsigned long timeout;
 500	int i;
 501
 502	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
 503
 504	mpt_event_deregister(LanCtx);
 505
 506	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
 507		  "since driver was loaded, %d still out\n",
 508		  priv->total_posted,atomic_read(&priv->buckets_out)));
 509
 510	netif_stop_queue(dev);
 511
 512	mpt_lan_reset(dev);
 513
 514	timeout = jiffies + 2 * HZ;
 515	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
 516		schedule_timeout_interruptible(1);
 517
 518	for (i = 0; i < priv->max_buckets_out; i++) {
 519		if (priv->RcvCtl[i].skb != NULL) {
 520/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
 521/**/				  "is still out\n", i));
 522			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
 523					 priv->RcvCtl[i].len,
 524					 PCI_DMA_FROMDEVICE);
 525			dev_kfree_skb(priv->RcvCtl[i].skb);
 526		}
 527	}
 528
 529	kfree(priv->RcvCtl);
 530	kfree(priv->mpt_rxfidx);
 531
 532	for (i = 0; i < priv->tx_max_out; i++) {
 533		if (priv->SendCtl[i].skb != NULL) {
 534			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
 535					 priv->SendCtl[i].len,
 536					 PCI_DMA_TODEVICE);
 537			dev_kfree_skb(priv->SendCtl[i].skb);
 538		}
 539	}
 540
 541	kfree(priv->SendCtl);
 542	kfree(priv->mpt_txfidx);
 543
 544	atomic_set(&priv->buckets_out, 0);
 545
 546	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
 547			IOC_AND_NETDEV_NAMES_s_s(dev));
 548
 549	return 0;
 550}
 551
 552/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 553/* Tx timeout handler. */
 554static void
 555mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
 556{
 557	struct mpt_lan_priv *priv = netdev_priv(dev);
 558	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 559
 560	if (mpt_dev->active) {
 561		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
 562		netif_wake_queue(dev);
 563	}
 564}
 565
 566/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 567//static inline int
 568static int
 569mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
 570{
 571	struct mpt_lan_priv *priv = netdev_priv(dev);
 572	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 573	struct sk_buff *sent;
 574	unsigned long flags;
 575	u32 ctx;
 576
 577	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
 578	sent = priv->SendCtl[ctx].skb;
 579
 580	dev->stats.tx_packets++;
 581	dev->stats.tx_bytes += sent->len;
 582
 583	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 584			IOC_AND_NETDEV_NAMES_s_s(dev),
 585			__func__, sent));
 586
 587	priv->SendCtl[ctx].skb = NULL;
 588	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
 589			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
 590	dev_kfree_skb_irq(sent);
 591
 592	spin_lock_irqsave(&priv->txfidx_lock, flags);
 593	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
 594	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 595
 596	netif_wake_queue(dev);
 597	return 0;
 598}
 599
 600/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 601static int
 602mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
 603{
 604	struct mpt_lan_priv *priv = netdev_priv(dev);
 605	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 606	struct sk_buff *sent;
 607	unsigned long flags;
 608	int FreeReqFrame = 0;
 609	u32 *pContext;
 610	u32 ctx;
 611	u8 count;
 612
 613	count = pSendRep->NumberOfContexts;
 614
 615	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
 616		 le16_to_cpu(pSendRep->IOCStatus)));
 617
 618	/* Add check for Loginfo Flag in IOCStatus */
 619
 620	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
 621	case MPI_IOCSTATUS_SUCCESS:
 622		dev->stats.tx_packets += count;
 623		break;
 624
 625	case MPI_IOCSTATUS_LAN_CANCELED:
 626	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
 627		break;
 628
 629	case MPI_IOCSTATUS_INVALID_SGL:
 630		dev->stats.tx_errors += count;
 631		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
 632				IOC_AND_NETDEV_NAMES_s_s(dev));
 633		goto out;
 634
 635	default:
 636		dev->stats.tx_errors += count;
 637		break;
 638	}
 639
 640	pContext = &pSendRep->BufferContext;
 641
 642	spin_lock_irqsave(&priv->txfidx_lock, flags);
 643	while (count > 0) {
 644		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
 645
 646		sent = priv->SendCtl[ctx].skb;
 647		dev->stats.tx_bytes += sent->len;
 648
 649		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 650				IOC_AND_NETDEV_NAMES_s_s(dev),
 651				__func__, sent));
 652
 653		priv->SendCtl[ctx].skb = NULL;
 654		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
 655				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
 656		dev_kfree_skb_irq(sent);
 657
 658		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
 659
 660		pContext++;
 661		count--;
 662	}
 663	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 664
 665out:
 666	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
 667		FreeReqFrame = 1;
 668
 669	netif_wake_queue(dev);
 670	return FreeReqFrame;
 671}
 672
 673/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 674static netdev_tx_t
 675mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
 676{
 677	struct mpt_lan_priv *priv = netdev_priv(dev);
 678	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 679	MPT_FRAME_HDR *mf;
 680	LANSendRequest_t *pSendReq;
 681	SGETransaction32_t *pTrans;
 682	SGESimple64_t *pSimple;
 683	const unsigned char *mac;
 684	dma_addr_t dma;
 685	unsigned long flags;
 686	int ctx;
 687	u16 cur_naa = 0x1000;
 688
 689	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
 690			__func__, skb));
 691
 692	spin_lock_irqsave(&priv->txfidx_lock, flags);
 693	if (priv->mpt_txfidx_tail < 0) {
 694		netif_stop_queue(dev);
 695		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 696
 697		printk (KERN_ERR "%s: no tx context available: %u\n",
 698			__func__, priv->mpt_txfidx_tail);
 699		return NETDEV_TX_BUSY;
 700	}
 701
 702	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
 703	if (mf == NULL) {
 704		netif_stop_queue(dev);
 705		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 706
 707		printk (KERN_ERR "%s: Unable to alloc request frame\n",
 708			__func__);
 709		return NETDEV_TX_BUSY;
 710	}
 711
 712	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
 713	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 714
 715//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
 716//			IOC_AND_NETDEV_NAMES_s_s(dev)));
 717
 718	pSendReq = (LANSendRequest_t *) mf;
 719
 720	/* Set the mac.raw pointer, since this apparently isn't getting
 721	 * done before we get the skb. Pull the data pointer past the mac data.
 722	 */
 723	skb_reset_mac_header(skb);
 724	skb_pull(skb, 12);
 725
 726        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
 727			     PCI_DMA_TODEVICE);
 728
 729	priv->SendCtl[ctx].skb = skb;
 730	priv->SendCtl[ctx].dma = dma;
 731	priv->SendCtl[ctx].len = skb->len;
 732
 733	/* Message Header */
 734	pSendReq->Reserved    = 0;
 735	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
 736	pSendReq->ChainOffset = 0;
 737	pSendReq->Reserved2   = 0;
 738	pSendReq->MsgFlags    = 0;
 739	pSendReq->PortNumber  = priv->pnum;
 740
 741	/* Transaction Context Element */
 742	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
 743
 744	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
 745	pTrans->ContextSize   = sizeof(u32);
 746	pTrans->DetailsLength = 2 * sizeof(u32);
 747	pTrans->Flags         = 0;
 748	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
 749
 750//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
 751//			IOC_AND_NETDEV_NAMES_s_s(dev),
 752//			ctx, skb, skb->data));
 753
 754	mac = skb_mac_header(skb);
 755
 756	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
 757						    (mac[0] <<  8) |
 758						    (mac[1] <<  0));
 759	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
 760						    (mac[3] << 16) |
 761						    (mac[4] <<  8) |
 762						    (mac[5] <<  0));
 763
 764	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
 765
 766	/* If we ever decide to send more than one Simple SGE per LANSend, then
 767	   we will need to make sure that LAST_ELEMENT only gets set on the
 768	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
 769	pSimple->FlagsLength = cpu_to_le32(
 770			((MPI_SGE_FLAGS_LAST_ELEMENT |
 771			  MPI_SGE_FLAGS_END_OF_BUFFER |
 772			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 773			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
 774			  MPI_SGE_FLAGS_HOST_TO_IOC |
 775			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
 776			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
 777			skb->len);
 778	pSimple->Address.Low = cpu_to_le32((u32) dma);
 779	if (sizeof(dma_addr_t) > sizeof(u32))
 780		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
 781	else
 782		pSimple->Address.High = 0;
 783
 784	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
 785	netif_trans_update(dev);
 786
 787	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
 788			IOC_AND_NETDEV_NAMES_s_s(dev),
 789			le32_to_cpu(pSimple->FlagsLength)));
 790
 791	return NETDEV_TX_OK;
 792}
 793
 794/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 795static void
 796mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
 797/*
 798 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
 799 */
 800{
 801	struct mpt_lan_priv *priv = netdev_priv(dev);
 802	
 803	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
 804		if (priority) {
 805			schedule_delayed_work(&priv->post_buckets_task, 0);
 806		} else {
 807			schedule_delayed_work(&priv->post_buckets_task, 1);
 808			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
 809				   "timer.\n"));
 810		}
 811	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
 812			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
 813	}
 814}
 815
 816/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 817static int
 818mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
 819{
 820	struct mpt_lan_priv *priv = netdev_priv(dev);
 821
 822	skb->protocol = mpt_lan_type_trans(skb, dev);
 823
 824	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
 825		 "delivered to upper level.\n",
 826			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
 827
 828	dev->stats.rx_bytes += skb->len;
 829	dev->stats.rx_packets++;
 830
 831	skb->dev = dev;
 832	netif_rx(skb);
 833
 834	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
 835		 atomic_read(&priv->buckets_out)));
 836
 837	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
 838		mpt_lan_wake_post_buckets_task(dev, 1);
 839
 840	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
 841		  "remaining, %d received back since sod\n",
 842		  atomic_read(&priv->buckets_out), priv->total_received));
 843
 844	return 0;
 845}
 846
 847/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 848//static inline int
 849static int
 850mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
 851{
 852	struct mpt_lan_priv *priv = netdev_priv(dev);
 853	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 854	struct sk_buff *skb, *old_skb;
 855	unsigned long flags;
 856	u32 ctx, len;
 857
 858	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
 859	skb = priv->RcvCtl[ctx].skb;
 860
 861	len = GET_LAN_PACKET_LENGTH(tmsg);
 862
 863	if (len < MPT_LAN_RX_COPYBREAK) {
 864		old_skb = skb;
 865
 866		skb = (struct sk_buff *)dev_alloc_skb(len);
 867		if (!skb) {
 868			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
 869					IOC_AND_NETDEV_NAMES_s_s(dev),
 870					__FILE__, __LINE__);
 871			return -ENOMEM;
 872		}
 873
 874		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 875					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 876
 877		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
 878
 879		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 880					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 881		goto out;
 882	}
 883
 884	skb_put(skb, len);
 885
 886	priv->RcvCtl[ctx].skb = NULL;
 887
 888	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 889			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 890
 891out:
 892	spin_lock_irqsave(&priv->rxfidx_lock, flags);
 893	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
 894	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 895
 896	atomic_dec(&priv->buckets_out);
 897	priv->total_received++;
 898
 899	return mpt_lan_receive_skb(dev, skb);
 900}
 901
 902/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 903static int
 904mpt_lan_receive_post_free(struct net_device *dev,
 905			  LANReceivePostReply_t *pRecvRep)
 906{
 907	struct mpt_lan_priv *priv = netdev_priv(dev);
 908	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 909	unsigned long flags;
 910	struct sk_buff *skb;
 911	u32 ctx;
 912	int count;
 913	int i;
 914
 915	count = pRecvRep->NumberOfContexts;
 916
 917/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
 918		  "IOC returned %d buckets, freeing them...\n", count));
 919
 920	spin_lock_irqsave(&priv->rxfidx_lock, flags);
 921	for (i = 0; i < count; i++) {
 922		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
 923
 924		skb = priv->RcvCtl[ctx].skb;
 925
 926//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
 927//				IOC_AND_NETDEV_NAMES_s_s(dev)));
 928//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
 929//				priv, &(priv->buckets_out)));
 930//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
 931
 932		priv->RcvCtl[ctx].skb = NULL;
 933		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 934				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 935		dev_kfree_skb_any(skb);
 936
 937		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
 938	}
 939	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 940
 941	atomic_sub(count, &priv->buckets_out);
 942
 943//	for (i = 0; i < priv->max_buckets_out; i++)
 944//		if (priv->RcvCtl[i].skb != NULL)
 945//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
 946//				  "is still out\n", i));
 947
 948/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
 949		  count));
 950*/
 951/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
 952/**/		  "remaining, %d received back since sod.\n",
 953/**/		  atomic_read(&priv->buckets_out), priv->total_received));
 954	return 0;
 955}
 956
 957/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 958static int
 959mpt_lan_receive_post_reply(struct net_device *dev,
 960			   LANReceivePostReply_t *pRecvRep)
 961{
 962	struct mpt_lan_priv *priv = netdev_priv(dev);
 963	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 964	struct sk_buff *skb, *old_skb;
 965	unsigned long flags;
 966	u32 len, ctx, offset;
 967	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
 968	int count;
 969	int i, l;
 970
 971	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
 972	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
 973		 le16_to_cpu(pRecvRep->IOCStatus)));
 974
 975	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
 976						MPI_IOCSTATUS_LAN_CANCELED)
 977		return mpt_lan_receive_post_free(dev, pRecvRep);
 978
 979	len = le32_to_cpu(pRecvRep->PacketLength);
 980	if (len == 0) {
 981		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
 982			"ReceivePostReply w/ PacketLength zero!\n",
 983				IOC_AND_NETDEV_NAMES_s_s(dev));
 984		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
 985				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
 986		return -1;
 987	}
 988
 989	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
 990	count  = pRecvRep->NumberOfContexts;
 991	skb    = priv->RcvCtl[ctx].skb;
 992
 993	offset = le32_to_cpu(pRecvRep->PacketOffset);
 994//	if (offset != 0) {
 995//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
 996//			"w/ PacketOffset %u\n",
 997//				IOC_AND_NETDEV_NAMES_s_s(dev),
 998//				offset);
 999//	}
1000
1001	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1002			IOC_AND_NETDEV_NAMES_s_s(dev),
1003			offset, len));
1004
1005	if (count > 1) {
1006		int szrem = len;
1007
1008//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1009//			"for single packet, concatenating...\n",
1010//				IOC_AND_NETDEV_NAMES_s_s(dev)));
1011
1012		skb = (struct sk_buff *)dev_alloc_skb(len);
1013		if (!skb) {
1014			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1015					IOC_AND_NETDEV_NAMES_s_s(dev),
1016					__FILE__, __LINE__);
1017			return -ENOMEM;
1018		}
1019
1020		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1021		for (i = 0; i < count; i++) {
1022
1023			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1024			old_skb = priv->RcvCtl[ctx].skb;
1025
1026			l = priv->RcvCtl[ctx].len;
1027			if (szrem < l)
1028				l = szrem;
1029
1030//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1031//					IOC_AND_NETDEV_NAMES_s_s(dev),
1032//					i, l));
1033
1034			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1035						    priv->RcvCtl[ctx].dma,
1036						    priv->RcvCtl[ctx].len,
1037						    PCI_DMA_FROMDEVICE);
1038			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1039
1040			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1041						       priv->RcvCtl[ctx].dma,
1042						       priv->RcvCtl[ctx].len,
1043						       PCI_DMA_FROMDEVICE);
1044
1045			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1046			szrem -= l;
1047		}
1048		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1049
1050	} else if (len < MPT_LAN_RX_COPYBREAK) {
1051
1052		old_skb = skb;
1053
1054		skb = (struct sk_buff *)dev_alloc_skb(len);
1055		if (!skb) {
1056			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1057					IOC_AND_NETDEV_NAMES_s_s(dev),
1058					__FILE__, __LINE__);
1059			return -ENOMEM;
1060		}
1061
1062		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1063					    priv->RcvCtl[ctx].dma,
1064					    priv->RcvCtl[ctx].len,
1065					    PCI_DMA_FROMDEVICE);
1066
1067		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1068
1069		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1070					       priv->RcvCtl[ctx].dma,
1071					       priv->RcvCtl[ctx].len,
1072					       PCI_DMA_FROMDEVICE);
1073
1074		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1075		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1076		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1077
1078	} else {
1079		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1080
1081		priv->RcvCtl[ctx].skb = NULL;
1082
1083		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1084				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1085		priv->RcvCtl[ctx].dma = 0;
1086
1087		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1088		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1089
1090		skb_put(skb,len);
1091	}
1092
1093	atomic_sub(count, &priv->buckets_out);
1094	priv->total_received += count;
1095
1096	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1097		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1098			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1099				IOC_AND_NETDEV_NAMES_s_s(dev),
1100				priv->mpt_rxfidx_tail,
1101				MPT_LAN_MAX_BUCKETS_OUT);
1102
1103		return -1;
1104	}
1105
1106	if (remaining == 0)
1107		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1108			"(priv->buckets_out = %d)\n",
1109			IOC_AND_NETDEV_NAMES_s_s(dev),
1110			atomic_read(&priv->buckets_out));
1111	else if (remaining < 10)
1112		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1113			"(priv->buckets_out = %d)\n",
1114			IOC_AND_NETDEV_NAMES_s_s(dev),
1115			remaining, atomic_read(&priv->buckets_out));
1116	
1117	if ((remaining < priv->bucketthresh) &&
1118	    ((atomic_read(&priv->buckets_out) - remaining) >
1119	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1120		
1121		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1122			"buckets_out count and fw's BucketsRemaining "
1123			"count has crossed the threshold, issuing a "
1124			"LanReset to clear the fw's hashtable. You may "
1125			"want to check your /var/log/messages for \"CRC "
1126			"error\" event notifications.\n");
1127		
1128		mpt_lan_reset(dev);
1129		mpt_lan_wake_post_buckets_task(dev, 0);
1130	}
1131	
1132	return mpt_lan_receive_skb(dev, skb);
1133}
1134
1135/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1136/* Simple SGE's only at the moment */
1137
1138static void
1139mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1140{
1141	struct net_device *dev = priv->dev;
1142	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1143	MPT_FRAME_HDR *mf;
1144	LANReceivePostRequest_t *pRecvReq;
1145	SGETransaction32_t *pTrans;
1146	SGESimple64_t *pSimple;
1147	struct sk_buff *skb;
1148	dma_addr_t dma;
1149	u32 curr, buckets, count, max;
1150	u32 len = (dev->mtu + dev->hard_header_len + 4);
1151	unsigned long flags;
1152	int i;
1153
1154	curr = atomic_read(&priv->buckets_out);
1155	buckets = (priv->max_buckets_out - curr);
1156
1157	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1158			IOC_AND_NETDEV_NAMES_s_s(dev),
1159			__func__, buckets, curr));
1160
1161	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1162			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1163
1164	while (buckets) {
1165		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1166		if (mf == NULL) {
1167			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1168				__func__);
1169			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1170				 __func__, buckets));
1171			goto out;
1172		}
1173		pRecvReq = (LANReceivePostRequest_t *) mf;
1174
1175		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1176		mpt_dev->RequestNB[i] = 0;
1177		count = buckets;
1178		if (count > max)
1179			count = max;
1180
1181		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1182		pRecvReq->ChainOffset = 0;
1183		pRecvReq->MsgFlags    = 0;
1184		pRecvReq->PortNumber  = priv->pnum;
1185
1186		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1187		pSimple = NULL;
1188
1189		for (i = 0; i < count; i++) {
1190			int ctx;
1191
1192			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1193			if (priv->mpt_rxfidx_tail < 0) {
1194				printk (KERN_ERR "%s: Can't alloc context\n",
1195					__func__);
1196				spin_unlock_irqrestore(&priv->rxfidx_lock,
1197						       flags);
1198				break;
1199			}
1200
1201			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1202
1203			skb = priv->RcvCtl[ctx].skb;
1204			if (skb && (priv->RcvCtl[ctx].len != len)) {
1205				pci_unmap_single(mpt_dev->pcidev,
1206						 priv->RcvCtl[ctx].dma,
1207						 priv->RcvCtl[ctx].len,
1208						 PCI_DMA_FROMDEVICE);
1209				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1210				skb = priv->RcvCtl[ctx].skb = NULL;
1211			}
1212
1213			if (skb == NULL) {
1214				skb = dev_alloc_skb(len);
1215				if (skb == NULL) {
1216					printk (KERN_WARNING
1217						MYNAM "/%s: Can't alloc skb\n",
1218						__func__);
1219					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1220					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1221					break;
1222				}
1223
1224				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1225						     len, PCI_DMA_FROMDEVICE);
1226
1227				priv->RcvCtl[ctx].skb = skb;
1228				priv->RcvCtl[ctx].dma = dma;
1229				priv->RcvCtl[ctx].len = len;
1230			}
1231
1232			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1233
1234			pTrans->ContextSize   = sizeof(u32);
1235			pTrans->DetailsLength = 0;
1236			pTrans->Flags         = 0;
1237			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1238
1239			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1240
1241			pSimple->FlagsLength = cpu_to_le32(
1242				((MPI_SGE_FLAGS_END_OF_BUFFER |
1243				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1244				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1245			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1246			if (sizeof(dma_addr_t) > sizeof(u32))
1247				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1248			else
1249				pSimple->Address.High = 0;
1250
1251			pTrans = (SGETransaction32_t *) (pSimple + 1);
1252		}
1253
1254		if (pSimple == NULL) {
1255/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1256/**/				__func__);
1257			mpt_free_msg_frame(mpt_dev, mf);
1258			goto out;
1259		}
1260
1261		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1262
1263		pRecvReq->BucketCount = cpu_to_le32(i);
1264
1265/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1266 *	for (i = 0; i < j + 2; i ++)
1267 *	    printk (" %08x", le32_to_cpu(msg[i]));
1268 *	printk ("\n");
1269 */
1270
1271		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1272
1273		priv->total_posted += i;
1274		buckets -= i;
1275		atomic_add(i, &priv->buckets_out);
1276	}
1277
1278out:
1279	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1280		  __func__, buckets, atomic_read(&priv->buckets_out)));
1281	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1282	__func__, priv->total_posted, priv->total_received));
1283
1284	clear_bit(0, &priv->post_buckets_active);
1285}
1286
1287static void
1288mpt_lan_post_receive_buckets_work(struct work_struct *work)
1289{
1290	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1291						  post_buckets_task.work));
1292}
1293
1294static const struct net_device_ops mpt_netdev_ops = {
1295	.ndo_open       = mpt_lan_open,
1296	.ndo_stop       = mpt_lan_close,
1297	.ndo_start_xmit = mpt_lan_sdu_send,
1298	.ndo_tx_timeout = mpt_lan_tx_timeout,
1299};
1300
1301/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1302static struct net_device *
1303mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1304{
1305	struct net_device *dev;
1306	struct mpt_lan_priv *priv;
1307	u8 HWaddr[FC_ALEN], *a;
1308
1309	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1310	if (!dev)
1311		return NULL;
1312
1313	dev->mtu = MPT_LAN_MTU;
1314
1315	priv = netdev_priv(dev);
1316
1317	priv->dev = dev;
1318	priv->mpt_dev = mpt_dev;
1319	priv->pnum = pnum;
1320
1321	INIT_DELAYED_WORK(&priv->post_buckets_task,
1322			  mpt_lan_post_receive_buckets_work);
1323	priv->post_buckets_active = 0;
1324
1325	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1326			__LINE__, dev->mtu + dev->hard_header_len + 4));
1327
1328	atomic_set(&priv->buckets_out, 0);
1329	priv->total_posted = 0;
1330	priv->total_received = 0;
1331	priv->max_buckets_out = max_buckets_out;
1332	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1333		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1334
1335	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1336			__LINE__,
1337			mpt_dev->pfacts[0].MaxLanBuckets,
1338			max_buckets_out,
1339			priv->max_buckets_out));
1340
1341	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1342	spin_lock_init(&priv->txfidx_lock);
1343	spin_lock_init(&priv->rxfidx_lock);
1344
1345	/*  Grab pre-fetched LANPage1 stuff. :-) */
1346	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1347
1348	HWaddr[0] = a[5];
1349	HWaddr[1] = a[4];
1350	HWaddr[2] = a[3];
1351	HWaddr[3] = a[2];
1352	HWaddr[4] = a[1];
1353	HWaddr[5] = a[0];
1354
1355	dev->addr_len = FC_ALEN;
1356	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1357	memset(dev->broadcast, 0xff, FC_ALEN);
1358
1359	/* The Tx queue is 127 deep on the 909.
1360	 * Give ourselves some breathing room.
1361	 */
1362	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1363			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1364
1365	dev->netdev_ops = &mpt_netdev_ops;
1366	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1367
1368	/* MTU range: 96 - 65280 */
1369	dev->min_mtu = MPT_LAN_MIN_MTU;
1370	dev->max_mtu = MPT_LAN_MAX_MTU;
1371
1372	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1373		"and setting initial values\n"));
1374
1375	if (register_netdev(dev) != 0) {
1376		free_netdev(dev);
1377		dev = NULL;
1378	}
1379	return dev;
1380}
1381
1382static int
1383mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1384{
1385	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1386	struct net_device	*dev;
1387	int			i;
1388
1389	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1390		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1391		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1392		       ioc->name, ioc->pfacts[i].PortNumber,
1393		       ioc->pfacts[i].ProtocolFlags,
1394		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1395			       ioc->pfacts[i].ProtocolFlags));
1396
1397		if (!(ioc->pfacts[i].ProtocolFlags &
1398					MPI_PORTFACTS_PROTOCOL_LAN)) {
1399			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1400			       "seems to be disabled on this adapter port!\n",
1401			       ioc->name);
1402			continue;
1403		}
1404
1405		dev = mpt_register_lan_device(ioc, i);
1406		if (!dev) {
1407			printk(KERN_ERR MYNAM ": %s: Unable to register "
1408			       "port%d as a LAN device\n", ioc->name,
1409			       ioc->pfacts[i].PortNumber);
1410			continue;
1411		}
1412		
1413		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1414		       "registered as '%s'\n", ioc->name, dev->name);
1415		printk(KERN_INFO MYNAM ": %s/%s: "
1416		       "LanAddr = %pM\n",
1417		       IOC_AND_NETDEV_NAMES_s_s(dev),
1418		       dev->dev_addr);
1419	
1420		ioc->netdev = dev;
1421
1422		return 0;
1423	}
1424
1425	return -ENODEV;
1426}
1427
1428static void
1429mptlan_remove(struct pci_dev *pdev)
1430{
1431	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1432	struct net_device	*dev = ioc->netdev;
1433
1434	if(dev != NULL) {
1435		unregister_netdev(dev);
1436		free_netdev(dev);
1437	}
1438}
1439
1440static struct mpt_pci_driver mptlan_driver = {
1441	.probe		= mptlan_probe,
1442	.remove		= mptlan_remove,
1443};
1444
1445static int __init mpt_lan_init (void)
1446{
1447	show_mptmod_ver(LANAME, LANVER);
1448
1449	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1450				"lan_reply");
1451	if (LanCtx <= 0) {
1452		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1453		return -EBUSY;
1454	}
1455
1456	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1457
1458	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1459		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1460		       "handler with mptbase! The world is at an end! "
1461		       "Everything is fading to black! Goodbye.\n");
1462		return -EBUSY;
1463	}
1464
1465	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1466	
1467	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1468	return 0;
1469}
1470
1471static void __exit mpt_lan_exit(void)
1472{
1473	mpt_device_driver_deregister(MPTLAN_DRIVER);
1474	mpt_reset_deregister(LanCtx);
1475
1476	if (LanCtx) {
1477		mpt_deregister(LanCtx);
1478		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1479	}
1480}
1481
1482module_init(mpt_lan_init);
1483module_exit(mpt_lan_exit);
1484
1485/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1486static unsigned short
1487mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1488{
1489	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1490	struct fcllc *fcllc;
1491
1492	skb_reset_mac_header(skb);
1493	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1494
1495	if (fch->dtype == htons(0xffff)) {
1496		u32 *p = (u32 *) fch;
1497
1498		swab32s(p + 0);
1499		swab32s(p + 1);
1500		swab32s(p + 2);
1501		swab32s(p + 3);
1502
1503		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1504				NETDEV_PTR_TO_IOC_NAME_s(dev));
1505		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1506				fch->saddr);
1507	}
1508
1509	if (*fch->daddr & 1) {
1510		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1511			skb->pkt_type = PACKET_BROADCAST;
1512		} else {
1513			skb->pkt_type = PACKET_MULTICAST;
1514		}
1515	} else {
1516		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1517			skb->pkt_type = PACKET_OTHERHOST;
1518		} else {
1519			skb->pkt_type = PACKET_HOST;
1520		}
1521	}
1522
1523	fcllc = (struct fcllc *)skb->data;
1524
1525	/* Strip the SNAP header from ARP packets since we don't
1526	 * pass them through to the 802.2/SNAP layers.
1527	 */
1528	if (fcllc->dsap == EXTENDED_SAP &&
1529		(fcllc->ethertype == htons(ETH_P_IP) ||
1530		 fcllc->ethertype == htons(ETH_P_ARP))) {
1531		skb_pull(skb, sizeof(struct fcllc));
1532		return fcllc->ethertype;
1533	}
1534
1535	return htons(ETH_P_802_2);
1536}
1537
1538/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
v5.4
   1/*
   2 *  linux/drivers/message/fusion/mptlan.c
   3 *      IP Over Fibre Channel device driver.
   4 *      For use with LSI Fibre Channel PCI chip/adapters
   5 *      running LSI Fusion MPT (Message Passing Technology) firmware.
   6 *
   7 *  Copyright (c) 2000-2008 LSI Corporation
   8 *  (mailto:DL-MPTFusionLinux@lsi.com)
   9 *
  10 */
  11/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  12/*
  13    This program is free software; you can redistribute it and/or modify
  14    it under the terms of the GNU General Public License as published by
  15    the Free Software Foundation; version 2 of the License.
  16
  17    This program is distributed in the hope that it will be useful,
  18    but WITHOUT ANY WARRANTY; without even the implied warranty of
  19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20    GNU General Public License for more details.
  21
  22    NO WARRANTY
  23    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  24    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  25    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  26    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  27    solely responsible for determining the appropriateness of using and
  28    distributing the Program and assumes all risks associated with its
  29    exercise of rights under this Agreement, including but not limited to
  30    the risks and costs of program errors, damage to or loss of data,
  31    programs or equipment, and unavailability or interruption of operations.
  32
  33    DISCLAIMER OF LIABILITY
  34    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  35    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  36    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  37    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  38    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  39    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  40    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  41
  42    You should have received a copy of the GNU General Public License
  43    along with this program; if not, write to the Free Software
  44    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  45*/
  46
  47/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  48/*
  49 * Define statements used for debugging
  50 */
  51//#define MPT_LAN_IO_DEBUG
  52
  53/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  54
  55#include "mptlan.h"
  56#include <linux/init.h>
  57#include <linux/module.h>
  58#include <linux/fs.h>
  59#include <linux/sched.h>
  60#include <linux/slab.h>
  61
  62#define my_VERSION	MPT_LINUX_VERSION_COMMON
  63#define MYNAM		"mptlan"
  64
  65MODULE_LICENSE("GPL");
  66MODULE_VERSION(my_VERSION);
  67
  68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  69/*
  70 * MPT LAN message sizes without variable part.
  71 */
  72#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
  73	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
  74
  75#define MPT_LAN_TRANSACTION32_SIZE \
  76	(sizeof(SGETransaction32_t) - sizeof(u32))
  77
  78/*
  79 *  Fusion MPT LAN private structures
  80 */
  81
  82struct BufferControl {
  83	struct sk_buff	*skb;
  84	dma_addr_t	dma;
  85	unsigned int	len;
  86};
  87
  88struct mpt_lan_priv {
  89	MPT_ADAPTER *mpt_dev;
  90	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
  91
  92	atomic_t buckets_out;		/* number of unused buckets on IOC */
  93	int bucketthresh;		/* Send more when this many left */
  94
  95	int *mpt_txfidx; /* Free Tx Context list */
  96	int mpt_txfidx_tail;
  97	spinlock_t txfidx_lock;
  98
  99	int *mpt_rxfidx; /* Free Rx Context list */
 100	int mpt_rxfidx_tail;
 101	spinlock_t rxfidx_lock;
 102
 103	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
 104	struct BufferControl *SendCtl;	/* Send BufferControl structs */
 105
 106	int max_buckets_out;		/* Max buckets to send to IOC */
 107	int tx_max_out;			/* IOC's Tx queue len */
 108
 109	u32 total_posted;
 110	u32 total_received;
 111
 112	struct delayed_work post_buckets_task;
 113	struct net_device *dev;
 114	unsigned long post_buckets_active;
 115};
 116
 117struct mpt_lan_ohdr {
 118	u16	dtype;
 119	u8	daddr[FC_ALEN];
 120	u16	stype;
 121	u8	saddr[FC_ALEN];
 122};
 123
 124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 125
 126/*
 127 *  Forward protos...
 128 */
 129static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
 130		       MPT_FRAME_HDR *reply);
 131static int  mpt_lan_open(struct net_device *dev);
 132static int  mpt_lan_reset(struct net_device *dev);
 133static int  mpt_lan_close(struct net_device *dev);
 134static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 135static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
 136					   int priority);
 137static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
 138static int  mpt_lan_receive_post_reply(struct net_device *dev,
 139				       LANReceivePostReply_t *pRecvRep);
 140static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
 141static int  mpt_lan_send_reply(struct net_device *dev,
 142			       LANSendReply_t *pSendRep);
 143static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
 144static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
 145static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
 146					 struct net_device *dev);
 147
 148/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 149/*
 150 *  Fusion MPT LAN private data
 151 */
 152static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
 153
 154static u32 max_buckets_out = 127;
 155static u32 tx_max_out_p = 127 - 16;
 156
 157/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 158/**
 159 *	lan_reply - Handle all data sent from the hardware.
 160 *	@ioc: Pointer to MPT_ADAPTER structure
 161 *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
 162 *	@reply: Pointer to MPT reply frame
 163 *
 164 *	Returns 1 indicating original alloc'd request frame ptr
 165 *	should be freed, or 0 if it shouldn't.
 166 */
 167static int
 168lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
 169{
 170	struct net_device *dev = ioc->netdev;
 171	int FreeReqFrame = 0;
 172
 173	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
 174		  IOC_AND_NETDEV_NAMES_s_s(dev)));
 175
 176//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
 177//			mf, reply));
 178
 179	if (mf == NULL) {
 180		u32 tmsg = CAST_PTR_TO_U32(reply);
 181
 182		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
 183				IOC_AND_NETDEV_NAMES_s_s(dev),
 184				tmsg));
 185
 186		switch (GET_LAN_FORM(tmsg)) {
 187
 188		// NOTE!  (Optimization) First case here is now caught in
 189		//  mptbase.c::mpt_interrupt() routine and callcack here
 190		//  is now skipped for this case!
 191#if 0
 192		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
 193//			dioprintk((KERN_INFO MYNAM "/lan_reply: "
 194//				  "MessageContext turbo reply received\n"));
 195			FreeReqFrame = 1;
 196			break;
 197#endif
 198
 199		case LAN_REPLY_FORM_SEND_SINGLE:
 200//			dioprintk((MYNAM "/lan_reply: "
 201//				  "calling mpt_lan_send_reply (turbo)\n"));
 202
 203			// Potential BUG here?
 204			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
 205			//  If/when mpt_lan_send_turbo would return 1 here,
 206			//  calling routine (mptbase.c|mpt_interrupt)
 207			//  would Oops because mf has already been set
 208			//  to NULL.  So after return from this func,
 209			//  mpt_interrupt() will attempt to put (NULL) mf ptr
 210			//  item back onto its adapter FreeQ - Oops!:-(
 211			//  It's Ok, since mpt_lan_send_turbo() *currently*
 212			//  always returns 0, but..., just in case:
 213
 214			(void) mpt_lan_send_turbo(dev, tmsg);
 215			FreeReqFrame = 0;
 216
 217			break;
 218
 219		case LAN_REPLY_FORM_RECEIVE_SINGLE:
 220//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
 221//				  "rcv-Turbo = %08x\n", tmsg));
 222			mpt_lan_receive_post_turbo(dev, tmsg);
 223			break;
 224
 225		default:
 226			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
 227				"that I don't know what to do with\n");
 228
 229			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
 230
 231			break;
 232		}
 233
 234		return FreeReqFrame;
 235	}
 236
 237//	msg = (u32 *) reply;
 238//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
 239//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
 240//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
 241//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
 242//		  reply->u.hdr.Function));
 243
 244	switch (reply->u.hdr.Function) {
 245
 246	case MPI_FUNCTION_LAN_SEND:
 247	{
 248		LANSendReply_t *pSendRep;
 249
 250		pSendRep = (LANSendReply_t *) reply;
 251		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
 252		break;
 253	}
 254
 255	case MPI_FUNCTION_LAN_RECEIVE:
 256	{
 257		LANReceivePostReply_t *pRecvRep;
 258
 259		pRecvRep = (LANReceivePostReply_t *) reply;
 260		if (pRecvRep->NumberOfContexts) {
 261			mpt_lan_receive_post_reply(dev, pRecvRep);
 262			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
 263				FreeReqFrame = 1;
 264		} else
 265			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
 266				  "ReceivePostReply received.\n"));
 267		break;
 268	}
 269
 270	case MPI_FUNCTION_LAN_RESET:
 271		/* Just a default reply. Might want to check it to
 272		 * make sure that everything went ok.
 273		 */
 274		FreeReqFrame = 1;
 275		break;
 276
 277	case MPI_FUNCTION_EVENT_NOTIFICATION:
 278	case MPI_FUNCTION_EVENT_ACK:
 279		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
 280		 *  Should be routed to mpt_lan_event_process(), but just in case...
 281		 */
 282		FreeReqFrame = 1;
 283		break;
 284
 285	default:
 286		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
 287			"reply that I don't know what to do with\n");
 288
 289		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
 290		FreeReqFrame = 1;
 291
 292		break;
 293	}
 294
 295	return FreeReqFrame;
 296}
 297
 298/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 299static int
 300mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 301{
 302	struct net_device *dev = ioc->netdev;
 303	struct mpt_lan_priv *priv;
 304
 305	if (dev == NULL)
 306		return(1);
 307	else
 308		priv = netdev_priv(dev);
 309
 310	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
 311			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
 312			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
 313
 314	if (priv->mpt_rxfidx == NULL)
 315		return (1);
 316
 317	if (reset_phase == MPT_IOC_SETUP_RESET) {
 318		;
 319	} else if (reset_phase == MPT_IOC_PRE_RESET) {
 320		int i;
 321		unsigned long flags;
 322
 323		netif_stop_queue(dev);
 324
 325		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
 326
 327		atomic_set(&priv->buckets_out, 0);
 328
 329		/* Reset Rx Free Tail index and re-populate the queue. */
 330		spin_lock_irqsave(&priv->rxfidx_lock, flags);
 331		priv->mpt_rxfidx_tail = -1;
 332		for (i = 0; i < priv->max_buckets_out; i++)
 333			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 334		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 335	} else {
 336		mpt_lan_post_receive_buckets(priv);
 337		netif_wake_queue(dev);
 338	}
 339
 340	return 1;
 341}
 342
 343/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 344static int
 345mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
 346{
 347	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
 348
 349	switch (le32_to_cpu(pEvReply->Event)) {
 350	case MPI_EVENT_NONE:				/* 00 */
 351	case MPI_EVENT_LOG_DATA:			/* 01 */
 352	case MPI_EVENT_STATE_CHANGE:			/* 02 */
 353	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
 354	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
 355	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
 356	case MPI_EVENT_RESCAN:				/* 06 */
 357		/* Ok, do we need to do anything here? As far as
 358		   I can tell, this is when a new device gets added
 359		   to the loop. */
 360	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
 361	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
 362	case MPI_EVENT_LOGOUT:				/* 09 */
 363	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
 364	default:
 365		break;
 366	}
 367
 368	/*
 369	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
 370	 *  Do NOT do it here now!
 371	 */
 372
 373	return 1;
 374}
 375
 376/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 377static int
 378mpt_lan_open(struct net_device *dev)
 379{
 380	struct mpt_lan_priv *priv = netdev_priv(dev);
 381	int i;
 382
 383	if (mpt_lan_reset(dev) != 0) {
 384		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 385
 386		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
 387
 388		if (mpt_dev->active)
 389			printk ("The ioc is active. Perhaps it needs to be"
 390				" reset?\n");
 391		else
 392			printk ("The ioc in inactive, most likely in the "
 393				"process of being reset. Please try again in "
 394				"a moment.\n");
 395	}
 396
 397	priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
 398					 GFP_KERNEL);
 399	if (priv->mpt_txfidx == NULL)
 400		goto out;
 401	priv->mpt_txfidx_tail = -1;
 402
 403	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
 404				GFP_KERNEL);
 405	if (priv->SendCtl == NULL)
 406		goto out_mpt_txfidx;
 407	for (i = 0; i < priv->tx_max_out; i++)
 408		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
 409
 410	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
 411
 412	priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
 413					 GFP_KERNEL);
 414	if (priv->mpt_rxfidx == NULL)
 415		goto out_SendCtl;
 416	priv->mpt_rxfidx_tail = -1;
 417
 418	priv->RcvCtl = kcalloc(priv->max_buckets_out,
 419			       sizeof(struct BufferControl),
 420			       GFP_KERNEL);
 421	if (priv->RcvCtl == NULL)
 422		goto out_mpt_rxfidx;
 423	for (i = 0; i < priv->max_buckets_out; i++)
 424		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 425
 426/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
 427/**/	for (i = 0; i < priv->tx_max_out; i++)
 428/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
 429/**/	dlprintk(("\n"));
 430
 431	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 432
 433	mpt_lan_post_receive_buckets(priv);
 434	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
 435			IOC_AND_NETDEV_NAMES_s_s(dev));
 436
 437	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
 438		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
 439			" Notifications. This is a bad thing! We're not going "
 440			"to go ahead, but I'd be leery of system stability at "
 441			"this point.\n");
 442	}
 443
 444	netif_start_queue(dev);
 445	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
 446
 447	return 0;
 448out_mpt_rxfidx:
 449	kfree(priv->mpt_rxfidx);
 450	priv->mpt_rxfidx = NULL;
 451out_SendCtl:
 452	kfree(priv->SendCtl);
 453	priv->SendCtl = NULL;
 454out_mpt_txfidx:
 455	kfree(priv->mpt_txfidx);
 456	priv->mpt_txfidx = NULL;
 457out:	return -ENOMEM;
 458}
 459
 460/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 461/* Send a LanReset message to the FW. This should result in the FW returning
 462   any buckets it still has. */
 463static int
 464mpt_lan_reset(struct net_device *dev)
 465{
 466	MPT_FRAME_HDR *mf;
 467	LANResetRequest_t *pResetReq;
 468	struct mpt_lan_priv *priv = netdev_priv(dev);
 469
 470	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
 471
 472	if (mf == NULL) {
 473/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
 474		"Unable to allocate a request frame.\n"));
 475*/
 476		return -1;
 477	}
 478
 479	pResetReq = (LANResetRequest_t *) mf;
 480
 481	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
 482	pResetReq->ChainOffset	= 0;
 483	pResetReq->Reserved	= 0;
 484	pResetReq->PortNumber	= priv->pnum;
 485	pResetReq->MsgFlags	= 0;
 486	pResetReq->Reserved2	= 0;
 487
 488	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
 489
 490	return 0;
 491}
 492
 493/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 494static int
 495mpt_lan_close(struct net_device *dev)
 496{
 497	struct mpt_lan_priv *priv = netdev_priv(dev);
 498	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 499	unsigned long timeout;
 500	int i;
 501
 502	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
 503
 504	mpt_event_deregister(LanCtx);
 505
 506	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
 507		  "since driver was loaded, %d still out\n",
 508		  priv->total_posted,atomic_read(&priv->buckets_out)));
 509
 510	netif_stop_queue(dev);
 511
 512	mpt_lan_reset(dev);
 513
 514	timeout = jiffies + 2 * HZ;
 515	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
 516		schedule_timeout_interruptible(1);
 517
 518	for (i = 0; i < priv->max_buckets_out; i++) {
 519		if (priv->RcvCtl[i].skb != NULL) {
 520/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
 521/**/				  "is still out\n", i));
 522			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
 523					 priv->RcvCtl[i].len,
 524					 PCI_DMA_FROMDEVICE);
 525			dev_kfree_skb(priv->RcvCtl[i].skb);
 526		}
 527	}
 528
 529	kfree(priv->RcvCtl);
 530	kfree(priv->mpt_rxfidx);
 531
 532	for (i = 0; i < priv->tx_max_out; i++) {
 533		if (priv->SendCtl[i].skb != NULL) {
 534			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
 535					 priv->SendCtl[i].len,
 536					 PCI_DMA_TODEVICE);
 537			dev_kfree_skb(priv->SendCtl[i].skb);
 538		}
 539	}
 540
 541	kfree(priv->SendCtl);
 542	kfree(priv->mpt_txfidx);
 543
 544	atomic_set(&priv->buckets_out, 0);
 545
 546	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
 547			IOC_AND_NETDEV_NAMES_s_s(dev));
 548
 549	return 0;
 550}
 551
 552/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 553/* Tx timeout handler. */
 554static void
 555mpt_lan_tx_timeout(struct net_device *dev)
 556{
 557	struct mpt_lan_priv *priv = netdev_priv(dev);
 558	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 559
 560	if (mpt_dev->active) {
 561		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
 562		netif_wake_queue(dev);
 563	}
 564}
 565
 566/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 567//static inline int
 568static int
 569mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
 570{
 571	struct mpt_lan_priv *priv = netdev_priv(dev);
 572	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 573	struct sk_buff *sent;
 574	unsigned long flags;
 575	u32 ctx;
 576
 577	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
 578	sent = priv->SendCtl[ctx].skb;
 579
 580	dev->stats.tx_packets++;
 581	dev->stats.tx_bytes += sent->len;
 582
 583	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 584			IOC_AND_NETDEV_NAMES_s_s(dev),
 585			__func__, sent));
 586
 587	priv->SendCtl[ctx].skb = NULL;
 588	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
 589			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
 590	dev_kfree_skb_irq(sent);
 591
 592	spin_lock_irqsave(&priv->txfidx_lock, flags);
 593	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
 594	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 595
 596	netif_wake_queue(dev);
 597	return 0;
 598}
 599
 600/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 601static int
 602mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
 603{
 604	struct mpt_lan_priv *priv = netdev_priv(dev);
 605	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 606	struct sk_buff *sent;
 607	unsigned long flags;
 608	int FreeReqFrame = 0;
 609	u32 *pContext;
 610	u32 ctx;
 611	u8 count;
 612
 613	count = pSendRep->NumberOfContexts;
 614
 615	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
 616		 le16_to_cpu(pSendRep->IOCStatus)));
 617
 618	/* Add check for Loginfo Flag in IOCStatus */
 619
 620	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
 621	case MPI_IOCSTATUS_SUCCESS:
 622		dev->stats.tx_packets += count;
 623		break;
 624
 625	case MPI_IOCSTATUS_LAN_CANCELED:
 626	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
 627		break;
 628
 629	case MPI_IOCSTATUS_INVALID_SGL:
 630		dev->stats.tx_errors += count;
 631		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
 632				IOC_AND_NETDEV_NAMES_s_s(dev));
 633		goto out;
 634
 635	default:
 636		dev->stats.tx_errors += count;
 637		break;
 638	}
 639
 640	pContext = &pSendRep->BufferContext;
 641
 642	spin_lock_irqsave(&priv->txfidx_lock, flags);
 643	while (count > 0) {
 644		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
 645
 646		sent = priv->SendCtl[ctx].skb;
 647		dev->stats.tx_bytes += sent->len;
 648
 649		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 650				IOC_AND_NETDEV_NAMES_s_s(dev),
 651				__func__, sent));
 652
 653		priv->SendCtl[ctx].skb = NULL;
 654		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
 655				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
 656		dev_kfree_skb_irq(sent);
 657
 658		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
 659
 660		pContext++;
 661		count--;
 662	}
 663	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 664
 665out:
 666	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
 667		FreeReqFrame = 1;
 668
 669	netif_wake_queue(dev);
 670	return FreeReqFrame;
 671}
 672
 673/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 674static netdev_tx_t
 675mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
 676{
 677	struct mpt_lan_priv *priv = netdev_priv(dev);
 678	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 679	MPT_FRAME_HDR *mf;
 680	LANSendRequest_t *pSendReq;
 681	SGETransaction32_t *pTrans;
 682	SGESimple64_t *pSimple;
 683	const unsigned char *mac;
 684	dma_addr_t dma;
 685	unsigned long flags;
 686	int ctx;
 687	u16 cur_naa = 0x1000;
 688
 689	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
 690			__func__, skb));
 691
 692	spin_lock_irqsave(&priv->txfidx_lock, flags);
 693	if (priv->mpt_txfidx_tail < 0) {
 694		netif_stop_queue(dev);
 695		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 696
 697		printk (KERN_ERR "%s: no tx context available: %u\n",
 698			__func__, priv->mpt_txfidx_tail);
 699		return NETDEV_TX_BUSY;
 700	}
 701
 702	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
 703	if (mf == NULL) {
 704		netif_stop_queue(dev);
 705		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 706
 707		printk (KERN_ERR "%s: Unable to alloc request frame\n",
 708			__func__);
 709		return NETDEV_TX_BUSY;
 710	}
 711
 712	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
 713	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
 714
 715//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
 716//			IOC_AND_NETDEV_NAMES_s_s(dev)));
 717
 718	pSendReq = (LANSendRequest_t *) mf;
 719
 720	/* Set the mac.raw pointer, since this apparently isn't getting
 721	 * done before we get the skb. Pull the data pointer past the mac data.
 722	 */
 723	skb_reset_mac_header(skb);
 724	skb_pull(skb, 12);
 725
 726        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
 727			     PCI_DMA_TODEVICE);
 728
 729	priv->SendCtl[ctx].skb = skb;
 730	priv->SendCtl[ctx].dma = dma;
 731	priv->SendCtl[ctx].len = skb->len;
 732
 733	/* Message Header */
 734	pSendReq->Reserved    = 0;
 735	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
 736	pSendReq->ChainOffset = 0;
 737	pSendReq->Reserved2   = 0;
 738	pSendReq->MsgFlags    = 0;
 739	pSendReq->PortNumber  = priv->pnum;
 740
 741	/* Transaction Context Element */
 742	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
 743
 744	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
 745	pTrans->ContextSize   = sizeof(u32);
 746	pTrans->DetailsLength = 2 * sizeof(u32);
 747	pTrans->Flags         = 0;
 748	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
 749
 750//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
 751//			IOC_AND_NETDEV_NAMES_s_s(dev),
 752//			ctx, skb, skb->data));
 753
 754	mac = skb_mac_header(skb);
 755
 756	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
 757						    (mac[0] <<  8) |
 758						    (mac[1] <<  0));
 759	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
 760						    (mac[3] << 16) |
 761						    (mac[4] <<  8) |
 762						    (mac[5] <<  0));
 763
 764	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
 765
 766	/* If we ever decide to send more than one Simple SGE per LANSend, then
 767	   we will need to make sure that LAST_ELEMENT only gets set on the
 768	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
 769	pSimple->FlagsLength = cpu_to_le32(
 770			((MPI_SGE_FLAGS_LAST_ELEMENT |
 771			  MPI_SGE_FLAGS_END_OF_BUFFER |
 772			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 773			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
 774			  MPI_SGE_FLAGS_HOST_TO_IOC |
 775			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
 776			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
 777			skb->len);
 778	pSimple->Address.Low = cpu_to_le32((u32) dma);
 779	if (sizeof(dma_addr_t) > sizeof(u32))
 780		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
 781	else
 782		pSimple->Address.High = 0;
 783
 784	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
 785	netif_trans_update(dev);
 786
 787	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
 788			IOC_AND_NETDEV_NAMES_s_s(dev),
 789			le32_to_cpu(pSimple->FlagsLength)));
 790
 791	return NETDEV_TX_OK;
 792}
 793
 794/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 795static void
 796mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
 797/*
 798 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
 799 */
 800{
 801	struct mpt_lan_priv *priv = netdev_priv(dev);
 802	
 803	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
 804		if (priority) {
 805			schedule_delayed_work(&priv->post_buckets_task, 0);
 806		} else {
 807			schedule_delayed_work(&priv->post_buckets_task, 1);
 808			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
 809				   "timer.\n"));
 810		}
 811	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
 812			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
 813	}
 814}
 815
 816/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 817static int
 818mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
 819{
 820	struct mpt_lan_priv *priv = netdev_priv(dev);
 821
 822	skb->protocol = mpt_lan_type_trans(skb, dev);
 823
 824	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
 825		 "delivered to upper level.\n",
 826			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
 827
 828	dev->stats.rx_bytes += skb->len;
 829	dev->stats.rx_packets++;
 830
 831	skb->dev = dev;
 832	netif_rx(skb);
 833
 834	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
 835		 atomic_read(&priv->buckets_out)));
 836
 837	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
 838		mpt_lan_wake_post_buckets_task(dev, 1);
 839
 840	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
 841		  "remaining, %d received back since sod\n",
 842		  atomic_read(&priv->buckets_out), priv->total_received));
 843
 844	return 0;
 845}
 846
 847/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 848//static inline int
 849static int
 850mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
 851{
 852	struct mpt_lan_priv *priv = netdev_priv(dev);
 853	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 854	struct sk_buff *skb, *old_skb;
 855	unsigned long flags;
 856	u32 ctx, len;
 857
 858	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
 859	skb = priv->RcvCtl[ctx].skb;
 860
 861	len = GET_LAN_PACKET_LENGTH(tmsg);
 862
 863	if (len < MPT_LAN_RX_COPYBREAK) {
 864		old_skb = skb;
 865
 866		skb = (struct sk_buff *)dev_alloc_skb(len);
 867		if (!skb) {
 868			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
 869					IOC_AND_NETDEV_NAMES_s_s(dev),
 870					__FILE__, __LINE__);
 871			return -ENOMEM;
 872		}
 873
 874		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 875					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 876
 877		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
 878
 879		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 880					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 881		goto out;
 882	}
 883
 884	skb_put(skb, len);
 885
 886	priv->RcvCtl[ctx].skb = NULL;
 887
 888	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 889			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 890
 891out:
 892	spin_lock_irqsave(&priv->rxfidx_lock, flags);
 893	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
 894	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 895
 896	atomic_dec(&priv->buckets_out);
 897	priv->total_received++;
 898
 899	return mpt_lan_receive_skb(dev, skb);
 900}
 901
 902/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 903static int
 904mpt_lan_receive_post_free(struct net_device *dev,
 905			  LANReceivePostReply_t *pRecvRep)
 906{
 907	struct mpt_lan_priv *priv = netdev_priv(dev);
 908	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 909	unsigned long flags;
 910	struct sk_buff *skb;
 911	u32 ctx;
 912	int count;
 913	int i;
 914
 915	count = pRecvRep->NumberOfContexts;
 916
 917/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
 918		  "IOC returned %d buckets, freeing them...\n", count));
 919
 920	spin_lock_irqsave(&priv->rxfidx_lock, flags);
 921	for (i = 0; i < count; i++) {
 922		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
 923
 924		skb = priv->RcvCtl[ctx].skb;
 925
 926//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
 927//				IOC_AND_NETDEV_NAMES_s_s(dev)));
 928//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
 929//				priv, &(priv->buckets_out)));
 930//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
 931
 932		priv->RcvCtl[ctx].skb = NULL;
 933		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
 934				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 935		dev_kfree_skb_any(skb);
 936
 937		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
 938	}
 939	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 940
 941	atomic_sub(count, &priv->buckets_out);
 942
 943//	for (i = 0; i < priv->max_buckets_out; i++)
 944//		if (priv->RcvCtl[i].skb != NULL)
 945//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
 946//				  "is still out\n", i));
 947
 948/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
 949		  count));
 950*/
 951/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
 952/**/		  "remaining, %d received back since sod.\n",
 953/**/		  atomic_read(&priv->buckets_out), priv->total_received));
 954	return 0;
 955}
 956
 957/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 958static int
 959mpt_lan_receive_post_reply(struct net_device *dev,
 960			   LANReceivePostReply_t *pRecvRep)
 961{
 962	struct mpt_lan_priv *priv = netdev_priv(dev);
 963	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 964	struct sk_buff *skb, *old_skb;
 965	unsigned long flags;
 966	u32 len, ctx, offset;
 967	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
 968	int count;
 969	int i, l;
 970
 971	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
 972	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
 973		 le16_to_cpu(pRecvRep->IOCStatus)));
 974
 975	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
 976						MPI_IOCSTATUS_LAN_CANCELED)
 977		return mpt_lan_receive_post_free(dev, pRecvRep);
 978
 979	len = le32_to_cpu(pRecvRep->PacketLength);
 980	if (len == 0) {
 981		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
 982			"ReceivePostReply w/ PacketLength zero!\n",
 983				IOC_AND_NETDEV_NAMES_s_s(dev));
 984		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
 985				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
 986		return -1;
 987	}
 988
 989	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
 990	count  = pRecvRep->NumberOfContexts;
 991	skb    = priv->RcvCtl[ctx].skb;
 992
 993	offset = le32_to_cpu(pRecvRep->PacketOffset);
 994//	if (offset != 0) {
 995//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
 996//			"w/ PacketOffset %u\n",
 997//				IOC_AND_NETDEV_NAMES_s_s(dev),
 998//				offset);
 999//	}
1000
1001	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1002			IOC_AND_NETDEV_NAMES_s_s(dev),
1003			offset, len));
1004
1005	if (count > 1) {
1006		int szrem = len;
1007
1008//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1009//			"for single packet, concatenating...\n",
1010//				IOC_AND_NETDEV_NAMES_s_s(dev)));
1011
1012		skb = (struct sk_buff *)dev_alloc_skb(len);
1013		if (!skb) {
1014			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1015					IOC_AND_NETDEV_NAMES_s_s(dev),
1016					__FILE__, __LINE__);
1017			return -ENOMEM;
1018		}
1019
1020		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1021		for (i = 0; i < count; i++) {
1022
1023			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1024			old_skb = priv->RcvCtl[ctx].skb;
1025
1026			l = priv->RcvCtl[ctx].len;
1027			if (szrem < l)
1028				l = szrem;
1029
1030//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1031//					IOC_AND_NETDEV_NAMES_s_s(dev),
1032//					i, l));
1033
1034			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1035						    priv->RcvCtl[ctx].dma,
1036						    priv->RcvCtl[ctx].len,
1037						    PCI_DMA_FROMDEVICE);
1038			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1039
1040			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1041						       priv->RcvCtl[ctx].dma,
1042						       priv->RcvCtl[ctx].len,
1043						       PCI_DMA_FROMDEVICE);
1044
1045			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1046			szrem -= l;
1047		}
1048		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1049
1050	} else if (len < MPT_LAN_RX_COPYBREAK) {
1051
1052		old_skb = skb;
1053
1054		skb = (struct sk_buff *)dev_alloc_skb(len);
1055		if (!skb) {
1056			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1057					IOC_AND_NETDEV_NAMES_s_s(dev),
1058					__FILE__, __LINE__);
1059			return -ENOMEM;
1060		}
1061
1062		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1063					    priv->RcvCtl[ctx].dma,
1064					    priv->RcvCtl[ctx].len,
1065					    PCI_DMA_FROMDEVICE);
1066
1067		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1068
1069		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1070					       priv->RcvCtl[ctx].dma,
1071					       priv->RcvCtl[ctx].len,
1072					       PCI_DMA_FROMDEVICE);
1073
1074		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1075		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1076		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1077
1078	} else {
1079		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1080
1081		priv->RcvCtl[ctx].skb = NULL;
1082
1083		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1084				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1085		priv->RcvCtl[ctx].dma = 0;
1086
1087		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1088		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1089
1090		skb_put(skb,len);
1091	}
1092
1093	atomic_sub(count, &priv->buckets_out);
1094	priv->total_received += count;
1095
1096	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1097		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1098			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1099				IOC_AND_NETDEV_NAMES_s_s(dev),
1100				priv->mpt_rxfidx_tail,
1101				MPT_LAN_MAX_BUCKETS_OUT);
1102
1103		return -1;
1104	}
1105
1106	if (remaining == 0)
1107		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1108			"(priv->buckets_out = %d)\n",
1109			IOC_AND_NETDEV_NAMES_s_s(dev),
1110			atomic_read(&priv->buckets_out));
1111	else if (remaining < 10)
1112		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1113			"(priv->buckets_out = %d)\n",
1114			IOC_AND_NETDEV_NAMES_s_s(dev),
1115			remaining, atomic_read(&priv->buckets_out));
1116	
1117	if ((remaining < priv->bucketthresh) &&
1118	    ((atomic_read(&priv->buckets_out) - remaining) >
1119	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1120		
1121		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1122			"buckets_out count and fw's BucketsRemaining "
1123			"count has crossed the threshold, issuing a "
1124			"LanReset to clear the fw's hashtable. You may "
1125			"want to check your /var/log/messages for \"CRC "
1126			"error\" event notifications.\n");
1127		
1128		mpt_lan_reset(dev);
1129		mpt_lan_wake_post_buckets_task(dev, 0);
1130	}
1131	
1132	return mpt_lan_receive_skb(dev, skb);
1133}
1134
1135/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1136/* Simple SGE's only at the moment */
1137
1138static void
1139mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1140{
1141	struct net_device *dev = priv->dev;
1142	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1143	MPT_FRAME_HDR *mf;
1144	LANReceivePostRequest_t *pRecvReq;
1145	SGETransaction32_t *pTrans;
1146	SGESimple64_t *pSimple;
1147	struct sk_buff *skb;
1148	dma_addr_t dma;
1149	u32 curr, buckets, count, max;
1150	u32 len = (dev->mtu + dev->hard_header_len + 4);
1151	unsigned long flags;
1152	int i;
1153
1154	curr = atomic_read(&priv->buckets_out);
1155	buckets = (priv->max_buckets_out - curr);
1156
1157	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1158			IOC_AND_NETDEV_NAMES_s_s(dev),
1159			__func__, buckets, curr));
1160
1161	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1162			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1163
1164	while (buckets) {
1165		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1166		if (mf == NULL) {
1167			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1168				__func__);
1169			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1170				 __func__, buckets));
1171			goto out;
1172		}
1173		pRecvReq = (LANReceivePostRequest_t *) mf;
1174
1175		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1176		mpt_dev->RequestNB[i] = 0;
1177		count = buckets;
1178		if (count > max)
1179			count = max;
1180
1181		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1182		pRecvReq->ChainOffset = 0;
1183		pRecvReq->MsgFlags    = 0;
1184		pRecvReq->PortNumber  = priv->pnum;
1185
1186		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1187		pSimple = NULL;
1188
1189		for (i = 0; i < count; i++) {
1190			int ctx;
1191
1192			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1193			if (priv->mpt_rxfidx_tail < 0) {
1194				printk (KERN_ERR "%s: Can't alloc context\n",
1195					__func__);
1196				spin_unlock_irqrestore(&priv->rxfidx_lock,
1197						       flags);
1198				break;
1199			}
1200
1201			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1202
1203			skb = priv->RcvCtl[ctx].skb;
1204			if (skb && (priv->RcvCtl[ctx].len != len)) {
1205				pci_unmap_single(mpt_dev->pcidev,
1206						 priv->RcvCtl[ctx].dma,
1207						 priv->RcvCtl[ctx].len,
1208						 PCI_DMA_FROMDEVICE);
1209				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1210				skb = priv->RcvCtl[ctx].skb = NULL;
1211			}
1212
1213			if (skb == NULL) {
1214				skb = dev_alloc_skb(len);
1215				if (skb == NULL) {
1216					printk (KERN_WARNING
1217						MYNAM "/%s: Can't alloc skb\n",
1218						__func__);
1219					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1220					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1221					break;
1222				}
1223
1224				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1225						     len, PCI_DMA_FROMDEVICE);
1226
1227				priv->RcvCtl[ctx].skb = skb;
1228				priv->RcvCtl[ctx].dma = dma;
1229				priv->RcvCtl[ctx].len = len;
1230			}
1231
1232			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1233
1234			pTrans->ContextSize   = sizeof(u32);
1235			pTrans->DetailsLength = 0;
1236			pTrans->Flags         = 0;
1237			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1238
1239			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1240
1241			pSimple->FlagsLength = cpu_to_le32(
1242				((MPI_SGE_FLAGS_END_OF_BUFFER |
1243				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1244				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1245			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1246			if (sizeof(dma_addr_t) > sizeof(u32))
1247				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1248			else
1249				pSimple->Address.High = 0;
1250
1251			pTrans = (SGETransaction32_t *) (pSimple + 1);
1252		}
1253
1254		if (pSimple == NULL) {
1255/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1256/**/				__func__);
1257			mpt_free_msg_frame(mpt_dev, mf);
1258			goto out;
1259		}
1260
1261		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1262
1263		pRecvReq->BucketCount = cpu_to_le32(i);
1264
1265/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1266 *	for (i = 0; i < j + 2; i ++)
1267 *	    printk (" %08x", le32_to_cpu(msg[i]));
1268 *	printk ("\n");
1269 */
1270
1271		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1272
1273		priv->total_posted += i;
1274		buckets -= i;
1275		atomic_add(i, &priv->buckets_out);
1276	}
1277
1278out:
1279	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1280		  __func__, buckets, atomic_read(&priv->buckets_out)));
1281	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1282	__func__, priv->total_posted, priv->total_received));
1283
1284	clear_bit(0, &priv->post_buckets_active);
1285}
1286
1287static void
1288mpt_lan_post_receive_buckets_work(struct work_struct *work)
1289{
1290	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1291						  post_buckets_task.work));
1292}
1293
1294static const struct net_device_ops mpt_netdev_ops = {
1295	.ndo_open       = mpt_lan_open,
1296	.ndo_stop       = mpt_lan_close,
1297	.ndo_start_xmit = mpt_lan_sdu_send,
1298	.ndo_tx_timeout = mpt_lan_tx_timeout,
1299};
1300
1301/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1302static struct net_device *
1303mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1304{
1305	struct net_device *dev;
1306	struct mpt_lan_priv *priv;
1307	u8 HWaddr[FC_ALEN], *a;
1308
1309	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1310	if (!dev)
1311		return NULL;
1312
1313	dev->mtu = MPT_LAN_MTU;
1314
1315	priv = netdev_priv(dev);
1316
1317	priv->dev = dev;
1318	priv->mpt_dev = mpt_dev;
1319	priv->pnum = pnum;
1320
1321	INIT_DELAYED_WORK(&priv->post_buckets_task,
1322			  mpt_lan_post_receive_buckets_work);
1323	priv->post_buckets_active = 0;
1324
1325	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1326			__LINE__, dev->mtu + dev->hard_header_len + 4));
1327
1328	atomic_set(&priv->buckets_out, 0);
1329	priv->total_posted = 0;
1330	priv->total_received = 0;
1331	priv->max_buckets_out = max_buckets_out;
1332	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1333		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1334
1335	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1336			__LINE__,
1337			mpt_dev->pfacts[0].MaxLanBuckets,
1338			max_buckets_out,
1339			priv->max_buckets_out));
1340
1341	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1342	spin_lock_init(&priv->txfidx_lock);
1343	spin_lock_init(&priv->rxfidx_lock);
1344
1345	/*  Grab pre-fetched LANPage1 stuff. :-) */
1346	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1347
1348	HWaddr[0] = a[5];
1349	HWaddr[1] = a[4];
1350	HWaddr[2] = a[3];
1351	HWaddr[3] = a[2];
1352	HWaddr[4] = a[1];
1353	HWaddr[5] = a[0];
1354
1355	dev->addr_len = FC_ALEN;
1356	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1357	memset(dev->broadcast, 0xff, FC_ALEN);
1358
1359	/* The Tx queue is 127 deep on the 909.
1360	 * Give ourselves some breathing room.
1361	 */
1362	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1363			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1364
1365	dev->netdev_ops = &mpt_netdev_ops;
1366	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1367
1368	/* MTU range: 96 - 65280 */
1369	dev->min_mtu = MPT_LAN_MIN_MTU;
1370	dev->max_mtu = MPT_LAN_MAX_MTU;
1371
1372	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1373		"and setting initial values\n"));
1374
1375	if (register_netdev(dev) != 0) {
1376		free_netdev(dev);
1377		dev = NULL;
1378	}
1379	return dev;
1380}
1381
1382static int
1383mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1384{
1385	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1386	struct net_device	*dev;
1387	int			i;
1388
1389	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1390		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1391		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1392		       ioc->name, ioc->pfacts[i].PortNumber,
1393		       ioc->pfacts[i].ProtocolFlags,
1394		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1395			       ioc->pfacts[i].ProtocolFlags));
1396
1397		if (!(ioc->pfacts[i].ProtocolFlags &
1398					MPI_PORTFACTS_PROTOCOL_LAN)) {
1399			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1400			       "seems to be disabled on this adapter port!\n",
1401			       ioc->name);
1402			continue;
1403		}
1404
1405		dev = mpt_register_lan_device(ioc, i);
1406		if (!dev) {
1407			printk(KERN_ERR MYNAM ": %s: Unable to register "
1408			       "port%d as a LAN device\n", ioc->name,
1409			       ioc->pfacts[i].PortNumber);
1410			continue;
1411		}
1412		
1413		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1414		       "registered as '%s'\n", ioc->name, dev->name);
1415		printk(KERN_INFO MYNAM ": %s/%s: "
1416		       "LanAddr = %pM\n",
1417		       IOC_AND_NETDEV_NAMES_s_s(dev),
1418		       dev->dev_addr);
1419	
1420		ioc->netdev = dev;
1421
1422		return 0;
1423	}
1424
1425	return -ENODEV;
1426}
1427
1428static void
1429mptlan_remove(struct pci_dev *pdev)
1430{
1431	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1432	struct net_device	*dev = ioc->netdev;
1433
1434	if(dev != NULL) {
1435		unregister_netdev(dev);
1436		free_netdev(dev);
1437	}
1438}
1439
1440static struct mpt_pci_driver mptlan_driver = {
1441	.probe		= mptlan_probe,
1442	.remove		= mptlan_remove,
1443};
1444
1445static int __init mpt_lan_init (void)
1446{
1447	show_mptmod_ver(LANAME, LANVER);
1448
1449	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1450				"lan_reply");
1451	if (LanCtx <= 0) {
1452		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1453		return -EBUSY;
1454	}
1455
1456	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1457
1458	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1459		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1460		       "handler with mptbase! The world is at an end! "
1461		       "Everything is fading to black! Goodbye.\n");
1462		return -EBUSY;
1463	}
1464
1465	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1466	
1467	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1468	return 0;
1469}
1470
1471static void __exit mpt_lan_exit(void)
1472{
1473	mpt_device_driver_deregister(MPTLAN_DRIVER);
1474	mpt_reset_deregister(LanCtx);
1475
1476	if (LanCtx) {
1477		mpt_deregister(LanCtx);
1478		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1479	}
1480}
1481
1482module_init(mpt_lan_init);
1483module_exit(mpt_lan_exit);
1484
1485/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1486static unsigned short
1487mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1488{
1489	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1490	struct fcllc *fcllc;
1491
1492	skb_reset_mac_header(skb);
1493	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1494
1495	if (fch->dtype == htons(0xffff)) {
1496		u32 *p = (u32 *) fch;
1497
1498		swab32s(p + 0);
1499		swab32s(p + 1);
1500		swab32s(p + 2);
1501		swab32s(p + 3);
1502
1503		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1504				NETDEV_PTR_TO_IOC_NAME_s(dev));
1505		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1506				fch->saddr);
1507	}
1508
1509	if (*fch->daddr & 1) {
1510		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1511			skb->pkt_type = PACKET_BROADCAST;
1512		} else {
1513			skb->pkt_type = PACKET_MULTICAST;
1514		}
1515	} else {
1516		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1517			skb->pkt_type = PACKET_OTHERHOST;
1518		} else {
1519			skb->pkt_type = PACKET_HOST;
1520		}
1521	}
1522
1523	fcllc = (struct fcllc *)skb->data;
1524
1525	/* Strip the SNAP header from ARP packets since we don't
1526	 * pass them through to the 802.2/SNAP layers.
1527	 */
1528	if (fcllc->dsap == EXTENDED_SAP &&
1529		(fcllc->ethertype == htons(ETH_P_IP) ||
1530		 fcllc->ethertype == htons(ETH_P_ARP))) {
1531		skb_pull(skb, sizeof(struct fcllc));
1532		return fcllc->ethertype;
1533	}
1534
1535	return htons(ETH_P_802_2);
1536}
1537
1538/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/