Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1/* cnic.c: Broadcom CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2011 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16
  17#include <linux/kernel.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/slab.h>
  21#include <linux/pci.h>
  22#include <linux/init.h>
  23#include <linux/netdevice.h>
  24#include <linux/uio_driver.h>
  25#include <linux/in.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/delay.h>
  28#include <linux/ethtool.h>
  29#include <linux/if_vlan.h>
  30#include <linux/prefetch.h>
  31#include <linux/random.h>
  32#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  33#define BCM_VLAN 1
  34#endif
  35#include <net/ip.h>
  36#include <net/tcp.h>
  37#include <net/route.h>
  38#include <net/ipv6.h>
  39#include <net/ip6_route.h>
  40#include <net/ip6_checksum.h>
  41#include <scsi/iscsi_if.h>
  42
  43#include "cnic_if.h"
  44#include "bnx2.h"
  45#include "bnx2x/bnx2x_reg.h"
  46#include "bnx2x/bnx2x_fw_defs.h"
  47#include "bnx2x/bnx2x_hsi.h"
  48#include "../scsi/bnx2i/57xx_iscsi_constants.h"
  49#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
  50#include "cnic.h"
  51#include "cnic_defs.h"
  52
  53#define DRV_MODULE_NAME		"cnic"
  54
  55static char version[] __devinitdata =
  56	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  57
  58MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  59	      "Chen (zongxi@broadcom.com");
  60MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
  61MODULE_LICENSE("GPL");
  62MODULE_VERSION(CNIC_MODULE_VERSION);
  63
  64/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  65static LIST_HEAD(cnic_dev_list);
  66static LIST_HEAD(cnic_udev_list);
  67static DEFINE_RWLOCK(cnic_dev_lock);
  68static DEFINE_MUTEX(cnic_lock);
  69
  70static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  71
  72/* helper function, assuming cnic_lock is held */
  73static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  74{
  75	return rcu_dereference_protected(cnic_ulp_tbl[type],
  76					 lockdep_is_held(&cnic_lock));
  77}
  78
  79static int cnic_service_bnx2(void *, void *);
  80static int cnic_service_bnx2x(void *, void *);
  81static int cnic_ctl(void *, struct cnic_ctl_info *);
  82
  83static struct cnic_ops cnic_bnx2_ops = {
  84	.cnic_owner	= THIS_MODULE,
  85	.cnic_handler	= cnic_service_bnx2,
  86	.cnic_ctl	= cnic_ctl,
  87};
  88
  89static struct cnic_ops cnic_bnx2x_ops = {
  90	.cnic_owner	= THIS_MODULE,
  91	.cnic_handler	= cnic_service_bnx2x,
  92	.cnic_ctl	= cnic_ctl,
  93};
  94
  95static struct workqueue_struct *cnic_wq;
  96
  97static void cnic_shutdown_rings(struct cnic_dev *);
  98static void cnic_init_rings(struct cnic_dev *);
  99static int cnic_cm_set_pg(struct cnic_sock *);
 100
 101static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 102{
 103	struct cnic_uio_dev *udev = uinfo->priv;
 104	struct cnic_dev *dev;
 105
 106	if (!capable(CAP_NET_ADMIN))
 107		return -EPERM;
 108
 109	if (udev->uio_dev != -1)
 110		return -EBUSY;
 111
 112	rtnl_lock();
 113	dev = udev->dev;
 114
 115	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 116		rtnl_unlock();
 117		return -ENODEV;
 118	}
 119
 120	udev->uio_dev = iminor(inode);
 121
 122	cnic_shutdown_rings(dev);
 123	cnic_init_rings(dev);
 124	rtnl_unlock();
 125
 126	return 0;
 127}
 128
 129static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 130{
 131	struct cnic_uio_dev *udev = uinfo->priv;
 132
 133	udev->uio_dev = -1;
 134	return 0;
 135}
 136
 137static inline void cnic_hold(struct cnic_dev *dev)
 138{
 139	atomic_inc(&dev->ref_count);
 140}
 141
 142static inline void cnic_put(struct cnic_dev *dev)
 143{
 144	atomic_dec(&dev->ref_count);
 145}
 146
 147static inline void csk_hold(struct cnic_sock *csk)
 148{
 149	atomic_inc(&csk->ref_count);
 150}
 151
 152static inline void csk_put(struct cnic_sock *csk)
 153{
 154	atomic_dec(&csk->ref_count);
 155}
 156
 157static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 158{
 159	struct cnic_dev *cdev;
 160
 161	read_lock(&cnic_dev_lock);
 162	list_for_each_entry(cdev, &cnic_dev_list, list) {
 163		if (netdev == cdev->netdev) {
 164			cnic_hold(cdev);
 165			read_unlock(&cnic_dev_lock);
 166			return cdev;
 167		}
 168	}
 169	read_unlock(&cnic_dev_lock);
 170	return NULL;
 171}
 172
 173static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 174{
 175	atomic_inc(&ulp_ops->ref_count);
 176}
 177
 178static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 179{
 180	atomic_dec(&ulp_ops->ref_count);
 181}
 182
 183static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 184{
 185	struct cnic_local *cp = dev->cnic_priv;
 186	struct cnic_eth_dev *ethdev = cp->ethdev;
 187	struct drv_ctl_info info;
 188	struct drv_ctl_io *io = &info.data.io;
 189
 190	info.cmd = DRV_CTL_CTX_WR_CMD;
 191	io->cid_addr = cid_addr;
 192	io->offset = off;
 193	io->data = val;
 194	ethdev->drv_ctl(dev->netdev, &info);
 195}
 196
 197static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 198{
 199	struct cnic_local *cp = dev->cnic_priv;
 200	struct cnic_eth_dev *ethdev = cp->ethdev;
 201	struct drv_ctl_info info;
 202	struct drv_ctl_io *io = &info.data.io;
 203
 204	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 205	io->offset = off;
 206	io->dma_addr = addr;
 207	ethdev->drv_ctl(dev->netdev, &info);
 208}
 209
 210static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 211{
 212	struct cnic_local *cp = dev->cnic_priv;
 213	struct cnic_eth_dev *ethdev = cp->ethdev;
 214	struct drv_ctl_info info;
 215	struct drv_ctl_l2_ring *ring = &info.data.ring;
 216
 217	if (start)
 218		info.cmd = DRV_CTL_START_L2_CMD;
 219	else
 220		info.cmd = DRV_CTL_STOP_L2_CMD;
 221
 222	ring->cid = cid;
 223	ring->client_id = cl_id;
 224	ethdev->drv_ctl(dev->netdev, &info);
 225}
 226
 227static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 228{
 229	struct cnic_local *cp = dev->cnic_priv;
 230	struct cnic_eth_dev *ethdev = cp->ethdev;
 231	struct drv_ctl_info info;
 232	struct drv_ctl_io *io = &info.data.io;
 233
 234	info.cmd = DRV_CTL_IO_WR_CMD;
 235	io->offset = off;
 236	io->data = val;
 237	ethdev->drv_ctl(dev->netdev, &info);
 238}
 239
 240static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 241{
 242	struct cnic_local *cp = dev->cnic_priv;
 243	struct cnic_eth_dev *ethdev = cp->ethdev;
 244	struct drv_ctl_info info;
 245	struct drv_ctl_io *io = &info.data.io;
 246
 247	info.cmd = DRV_CTL_IO_RD_CMD;
 248	io->offset = off;
 249	ethdev->drv_ctl(dev->netdev, &info);
 250	return io->data;
 251}
 252
 253static int cnic_in_use(struct cnic_sock *csk)
 254{
 255	return test_bit(SK_F_INUSE, &csk->flags);
 256}
 257
 258static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 259{
 260	struct cnic_local *cp = dev->cnic_priv;
 261	struct cnic_eth_dev *ethdev = cp->ethdev;
 262	struct drv_ctl_info info;
 263
 264	info.cmd = cmd;
 265	info.data.credit.credit_count = count;
 266	ethdev->drv_ctl(dev->netdev, &info);
 267}
 268
 269static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 270{
 271	u32 i;
 272
 273	for (i = 0; i < cp->max_cid_space; i++) {
 274		if (cp->ctx_tbl[i].cid == cid) {
 275			*l5_cid = i;
 276			return 0;
 277		}
 278	}
 279	return -EINVAL;
 280}
 281
 282static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 283			   struct cnic_sock *csk)
 284{
 285	struct iscsi_path path_req;
 286	char *buf = NULL;
 287	u16 len = 0;
 288	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 289	struct cnic_ulp_ops *ulp_ops;
 290	struct cnic_uio_dev *udev = cp->udev;
 291	int rc = 0, retry = 0;
 292
 293	if (!udev || udev->uio_dev == -1)
 294		return -ENODEV;
 295
 296	if (csk) {
 297		len = sizeof(path_req);
 298		buf = (char *) &path_req;
 299		memset(&path_req, 0, len);
 300
 301		msg_type = ISCSI_KEVENT_PATH_REQ;
 302		path_req.handle = (u64) csk->l5_cid;
 303		if (test_bit(SK_F_IPV6, &csk->flags)) {
 304			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 305			       sizeof(struct in6_addr));
 306			path_req.ip_addr_len = 16;
 307		} else {
 308			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 309			       sizeof(struct in_addr));
 310			path_req.ip_addr_len = 4;
 311		}
 312		path_req.vlan_id = csk->vlan_id;
 313		path_req.pmtu = csk->mtu;
 314	}
 315
 316	while (retry < 3) {
 317		rc = 0;
 318		rcu_read_lock();
 319		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
 320		if (ulp_ops)
 321			rc = ulp_ops->iscsi_nl_send_msg(
 322				cp->ulp_handle[CNIC_ULP_ISCSI],
 323				msg_type, buf, len);
 324		rcu_read_unlock();
 325		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 326			break;
 327
 328		msleep(100);
 329		retry++;
 330	}
 331	return rc;
 332}
 333
 334static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 335
 336static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 337				  char *buf, u16 len)
 338{
 339	int rc = -EINVAL;
 340
 341	switch (msg_type) {
 342	case ISCSI_UEVENT_PATH_UPDATE: {
 343		struct cnic_local *cp;
 344		u32 l5_cid;
 345		struct cnic_sock *csk;
 346		struct iscsi_path *path_resp;
 347
 348		if (len < sizeof(*path_resp))
 349			break;
 350
 351		path_resp = (struct iscsi_path *) buf;
 352		cp = dev->cnic_priv;
 353		l5_cid = (u32) path_resp->handle;
 354		if (l5_cid >= MAX_CM_SK_TBL_SZ)
 355			break;
 356
 357		rcu_read_lock();
 358		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
 359			rc = -ENODEV;
 360			rcu_read_unlock();
 361			break;
 362		}
 363		csk = &cp->csk_tbl[l5_cid];
 364		csk_hold(csk);
 365		if (cnic_in_use(csk) &&
 366		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 367
 368			memcpy(csk->ha, path_resp->mac_addr, 6);
 369			if (test_bit(SK_F_IPV6, &csk->flags))
 370				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 371				       sizeof(struct in6_addr));
 372			else
 373				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 374				       sizeof(struct in_addr));
 375
 376			if (is_valid_ether_addr(csk->ha)) {
 377				cnic_cm_set_pg(csk);
 378			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 379				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 380
 381				cnic_cm_upcall(cp, csk,
 382					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 383				clear_bit(SK_F_CONNECT_START, &csk->flags);
 384			}
 385		}
 386		csk_put(csk);
 387		rcu_read_unlock();
 388		rc = 0;
 389	}
 390	}
 391
 392	return rc;
 393}
 394
 395static int cnic_offld_prep(struct cnic_sock *csk)
 396{
 397	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 398		return 0;
 399
 400	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 401		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 402		return 0;
 403	}
 404
 405	return 1;
 406}
 407
 408static int cnic_close_prep(struct cnic_sock *csk)
 409{
 410	clear_bit(SK_F_CONNECT_START, &csk->flags);
 411	smp_mb__after_clear_bit();
 412
 413	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 414		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 415			msleep(1);
 416
 417		return 1;
 418	}
 419	return 0;
 420}
 421
 422static int cnic_abort_prep(struct cnic_sock *csk)
 423{
 424	clear_bit(SK_F_CONNECT_START, &csk->flags);
 425	smp_mb__after_clear_bit();
 426
 427	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 428		msleep(1);
 429
 430	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 431		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 432		return 1;
 433	}
 434
 435	return 0;
 436}
 437
 438int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 439{
 440	struct cnic_dev *dev;
 441
 442	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 443		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 444		return -EINVAL;
 445	}
 446	mutex_lock(&cnic_lock);
 447	if (cnic_ulp_tbl_prot(ulp_type)) {
 448		pr_err("%s: Type %d has already been registered\n",
 449		       __func__, ulp_type);
 450		mutex_unlock(&cnic_lock);
 451		return -EBUSY;
 452	}
 453
 454	read_lock(&cnic_dev_lock);
 455	list_for_each_entry(dev, &cnic_dev_list, list) {
 456		struct cnic_local *cp = dev->cnic_priv;
 457
 458		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 459	}
 460	read_unlock(&cnic_dev_lock);
 461
 462	atomic_set(&ulp_ops->ref_count, 0);
 463	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 464	mutex_unlock(&cnic_lock);
 465
 466	/* Prevent race conditions with netdev_event */
 467	rtnl_lock();
 468	list_for_each_entry(dev, &cnic_dev_list, list) {
 469		struct cnic_local *cp = dev->cnic_priv;
 470
 471		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 472			ulp_ops->cnic_init(dev);
 473	}
 474	rtnl_unlock();
 475
 476	return 0;
 477}
 478
 479int cnic_unregister_driver(int ulp_type)
 480{
 481	struct cnic_dev *dev;
 482	struct cnic_ulp_ops *ulp_ops;
 483	int i = 0;
 484
 485	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 486		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 487		return -EINVAL;
 488	}
 489	mutex_lock(&cnic_lock);
 490	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 491	if (!ulp_ops) {
 492		pr_err("%s: Type %d has not been registered\n",
 493		       __func__, ulp_type);
 494		goto out_unlock;
 495	}
 496	read_lock(&cnic_dev_lock);
 497	list_for_each_entry(dev, &cnic_dev_list, list) {
 498		struct cnic_local *cp = dev->cnic_priv;
 499
 500		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 501			pr_err("%s: Type %d still has devices registered\n",
 502			       __func__, ulp_type);
 503			read_unlock(&cnic_dev_lock);
 504			goto out_unlock;
 505		}
 506	}
 507	read_unlock(&cnic_dev_lock);
 508
 509	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
 510
 511	mutex_unlock(&cnic_lock);
 512	synchronize_rcu();
 513	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 514		msleep(100);
 515		i++;
 516	}
 517
 518	if (atomic_read(&ulp_ops->ref_count) != 0)
 519		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
 520	return 0;
 521
 522out_unlock:
 523	mutex_unlock(&cnic_lock);
 524	return -EINVAL;
 525}
 526
 527static int cnic_start_hw(struct cnic_dev *);
 528static void cnic_stop_hw(struct cnic_dev *);
 529
 530static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 531				void *ulp_ctx)
 532{
 533	struct cnic_local *cp = dev->cnic_priv;
 534	struct cnic_ulp_ops *ulp_ops;
 535
 536	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 537		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 538		return -EINVAL;
 539	}
 540	mutex_lock(&cnic_lock);
 541	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 542		pr_err("%s: Driver with type %d has not been registered\n",
 543		       __func__, ulp_type);
 544		mutex_unlock(&cnic_lock);
 545		return -EAGAIN;
 546	}
 547	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 548		pr_err("%s: Type %d has already been registered to this device\n",
 549		       __func__, ulp_type);
 550		mutex_unlock(&cnic_lock);
 551		return -EBUSY;
 552	}
 553
 554	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 555	cp->ulp_handle[ulp_type] = ulp_ctx;
 556	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 557	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 558	cnic_hold(dev);
 559
 560	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 561		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 562			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 563
 564	mutex_unlock(&cnic_lock);
 565
 566	return 0;
 567
 568}
 569EXPORT_SYMBOL(cnic_register_driver);
 570
 571static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 572{
 573	struct cnic_local *cp = dev->cnic_priv;
 574	int i = 0;
 575
 576	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 577		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 578		return -EINVAL;
 579	}
 580	mutex_lock(&cnic_lock);
 581	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 582		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
 583		cnic_put(dev);
 584	} else {
 585		pr_err("%s: device not registered to this ulp type %d\n",
 586		       __func__, ulp_type);
 587		mutex_unlock(&cnic_lock);
 588		return -EINVAL;
 589	}
 590	mutex_unlock(&cnic_lock);
 591
 592	if (ulp_type == CNIC_ULP_ISCSI)
 593		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 594
 595	synchronize_rcu();
 596
 597	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 598	       i < 20) {
 599		msleep(100);
 600		i++;
 601	}
 602	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 603		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 604
 605	return 0;
 606}
 607EXPORT_SYMBOL(cnic_unregister_driver);
 608
 609static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 610			    u32 next)
 611{
 612	id_tbl->start = start_id;
 613	id_tbl->max = size;
 614	id_tbl->next = next;
 615	spin_lock_init(&id_tbl->lock);
 616	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
 617	if (!id_tbl->table)
 618		return -ENOMEM;
 619
 620	return 0;
 621}
 622
 623static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 624{
 625	kfree(id_tbl->table);
 626	id_tbl->table = NULL;
 627}
 628
 629static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 630{
 631	int ret = -1;
 632
 633	id -= id_tbl->start;
 634	if (id >= id_tbl->max)
 635		return ret;
 636
 637	spin_lock(&id_tbl->lock);
 638	if (!test_bit(id, id_tbl->table)) {
 639		set_bit(id, id_tbl->table);
 640		ret = 0;
 641	}
 642	spin_unlock(&id_tbl->lock);
 643	return ret;
 644}
 645
 646/* Returns -1 if not successful */
 647static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 648{
 649	u32 id;
 650
 651	spin_lock(&id_tbl->lock);
 652	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 653	if (id >= id_tbl->max) {
 654		id = -1;
 655		if (id_tbl->next != 0) {
 656			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 657			if (id >= id_tbl->next)
 658				id = -1;
 659		}
 660	}
 661
 662	if (id < id_tbl->max) {
 663		set_bit(id, id_tbl->table);
 664		id_tbl->next = (id + 1) & (id_tbl->max - 1);
 665		id += id_tbl->start;
 666	}
 667
 668	spin_unlock(&id_tbl->lock);
 669
 670	return id;
 671}
 672
 673static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 674{
 675	if (id == -1)
 676		return;
 677
 678	id -= id_tbl->start;
 679	if (id >= id_tbl->max)
 680		return;
 681
 682	clear_bit(id, id_tbl->table);
 683}
 684
 685static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 686{
 687	int i;
 688
 689	if (!dma->pg_arr)
 690		return;
 691
 692	for (i = 0; i < dma->num_pages; i++) {
 693		if (dma->pg_arr[i]) {
 694			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
 695					  dma->pg_arr[i], dma->pg_map_arr[i]);
 696			dma->pg_arr[i] = NULL;
 697		}
 698	}
 699	if (dma->pgtbl) {
 700		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 701				  dma->pgtbl, dma->pgtbl_map);
 702		dma->pgtbl = NULL;
 703	}
 704	kfree(dma->pg_arr);
 705	dma->pg_arr = NULL;
 706	dma->num_pages = 0;
 707}
 708
 709static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 710{
 711	int i;
 712	__le32 *page_table = (__le32 *) dma->pgtbl;
 713
 714	for (i = 0; i < dma->num_pages; i++) {
 715		/* Each entry needs to be in big endian format. */
 716		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 717		page_table++;
 718		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 719		page_table++;
 720	}
 721}
 722
 723static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 724{
 725	int i;
 726	__le32 *page_table = (__le32 *) dma->pgtbl;
 727
 728	for (i = 0; i < dma->num_pages; i++) {
 729		/* Each entry needs to be in little endian format. */
 730		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 731		page_table++;
 732		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 733		page_table++;
 734	}
 735}
 736
 737static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 738			  int pages, int use_pg_tbl)
 739{
 740	int i, size;
 741	struct cnic_local *cp = dev->cnic_priv;
 742
 743	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 744	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 745	if (dma->pg_arr == NULL)
 746		return -ENOMEM;
 747
 748	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 749	dma->num_pages = pages;
 750
 751	for (i = 0; i < pages; i++) {
 752		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 753						    BCM_PAGE_SIZE,
 754						    &dma->pg_map_arr[i],
 755						    GFP_ATOMIC);
 756		if (dma->pg_arr[i] == NULL)
 757			goto error;
 758	}
 759	if (!use_pg_tbl)
 760		return 0;
 761
 762	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
 763			  ~(BCM_PAGE_SIZE - 1);
 764	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 765					&dma->pgtbl_map, GFP_ATOMIC);
 766	if (dma->pgtbl == NULL)
 767		goto error;
 768
 769	cp->setup_pgtbl(dev, dma);
 770
 771	return 0;
 772
 773error:
 774	cnic_free_dma(dev, dma);
 775	return -ENOMEM;
 776}
 777
 778static void cnic_free_context(struct cnic_dev *dev)
 779{
 780	struct cnic_local *cp = dev->cnic_priv;
 781	int i;
 782
 783	for (i = 0; i < cp->ctx_blks; i++) {
 784		if (cp->ctx_arr[i].ctx) {
 785			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 786					  cp->ctx_arr[i].ctx,
 787					  cp->ctx_arr[i].mapping);
 788			cp->ctx_arr[i].ctx = NULL;
 789		}
 790	}
 791}
 792
 793static void __cnic_free_uio(struct cnic_uio_dev *udev)
 794{
 795	uio_unregister_device(&udev->cnic_uinfo);
 796
 797	if (udev->l2_buf) {
 798		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 799				  udev->l2_buf, udev->l2_buf_map);
 800		udev->l2_buf = NULL;
 801	}
 802
 803	if (udev->l2_ring) {
 804		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 805				  udev->l2_ring, udev->l2_ring_map);
 806		udev->l2_ring = NULL;
 807	}
 808
 809	pci_dev_put(udev->pdev);
 810	kfree(udev);
 811}
 812
 813static void cnic_free_uio(struct cnic_uio_dev *udev)
 814{
 815	if (!udev)
 816		return;
 817
 818	write_lock(&cnic_dev_lock);
 819	list_del_init(&udev->list);
 820	write_unlock(&cnic_dev_lock);
 821	__cnic_free_uio(udev);
 822}
 823
 824static void cnic_free_resc(struct cnic_dev *dev)
 825{
 826	struct cnic_local *cp = dev->cnic_priv;
 827	struct cnic_uio_dev *udev = cp->udev;
 828
 829	if (udev) {
 830		udev->dev = NULL;
 831		cp->udev = NULL;
 832	}
 833
 834	cnic_free_context(dev);
 835	kfree(cp->ctx_arr);
 836	cp->ctx_arr = NULL;
 837	cp->ctx_blks = 0;
 838
 839	cnic_free_dma(dev, &cp->gbl_buf_info);
 840	cnic_free_dma(dev, &cp->kwq_info);
 841	cnic_free_dma(dev, &cp->kwq_16_data_info);
 842	cnic_free_dma(dev, &cp->kcq2.dma);
 843	cnic_free_dma(dev, &cp->kcq1.dma);
 844	kfree(cp->iscsi_tbl);
 845	cp->iscsi_tbl = NULL;
 846	kfree(cp->ctx_tbl);
 847	cp->ctx_tbl = NULL;
 848
 849	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 850	cnic_free_id_tbl(&cp->cid_tbl);
 851}
 852
 853static int cnic_alloc_context(struct cnic_dev *dev)
 854{
 855	struct cnic_local *cp = dev->cnic_priv;
 856
 857	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
 858		int i, k, arr_size;
 859
 860		cp->ctx_blk_size = BCM_PAGE_SIZE;
 861		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
 862		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 863			   sizeof(struct cnic_ctx);
 864		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 865		if (cp->ctx_arr == NULL)
 866			return -ENOMEM;
 867
 868		k = 0;
 869		for (i = 0; i < 2; i++) {
 870			u32 j, reg, off, lo, hi;
 871
 872			if (i == 0)
 873				off = BNX2_PG_CTX_MAP;
 874			else
 875				off = BNX2_ISCSI_CTX_MAP;
 876
 877			reg = cnic_reg_rd_ind(dev, off);
 878			lo = reg >> 16;
 879			hi = reg & 0xffff;
 880			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 881				cp->ctx_arr[k].cid = j;
 882		}
 883
 884		cp->ctx_blks = k;
 885		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 886			cp->ctx_blks = 0;
 887			return -ENOMEM;
 888		}
 889
 890		for (i = 0; i < cp->ctx_blks; i++) {
 891			cp->ctx_arr[i].ctx =
 892				dma_alloc_coherent(&dev->pcidev->dev,
 893						   BCM_PAGE_SIZE,
 894						   &cp->ctx_arr[i].mapping,
 895						   GFP_KERNEL);
 896			if (cp->ctx_arr[i].ctx == NULL)
 897				return -ENOMEM;
 898		}
 899	}
 900	return 0;
 901}
 902
 903static u16 cnic_bnx2_next_idx(u16 idx)
 904{
 905	return idx + 1;
 906}
 907
 908static u16 cnic_bnx2_hw_idx(u16 idx)
 909{
 910	return idx;
 911}
 912
 913static u16 cnic_bnx2x_next_idx(u16 idx)
 914{
 915	idx++;
 916	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 917		idx++;
 918
 919	return idx;
 920}
 921
 922static u16 cnic_bnx2x_hw_idx(u16 idx)
 923{
 924	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 925		idx++;
 926	return idx;
 927}
 928
 929static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 930			  bool use_pg_tbl)
 931{
 932	int err, i, use_page_tbl = 0;
 933	struct kcqe **kcq;
 934
 935	if (use_pg_tbl)
 936		use_page_tbl = 1;
 937
 938	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 939	if (err)
 940		return err;
 941
 942	kcq = (struct kcqe **) info->dma.pg_arr;
 943	info->kcq = kcq;
 944
 945	info->next_idx = cnic_bnx2_next_idx;
 946	info->hw_idx = cnic_bnx2_hw_idx;
 947	if (use_pg_tbl)
 948		return 0;
 949
 950	info->next_idx = cnic_bnx2x_next_idx;
 951	info->hw_idx = cnic_bnx2x_hw_idx;
 952
 953	for (i = 0; i < KCQ_PAGE_CNT; i++) {
 954		struct bnx2x_bd_chain_next *next =
 955			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
 956		int j = i + 1;
 957
 958		if (j >= KCQ_PAGE_CNT)
 959			j = 0;
 960		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
 961		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
 962	}
 963	return 0;
 964}
 965
 966static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 967{
 968	struct cnic_local *cp = dev->cnic_priv;
 969	struct cnic_uio_dev *udev;
 970
 971	read_lock(&cnic_dev_lock);
 972	list_for_each_entry(udev, &cnic_udev_list, list) {
 973		if (udev->pdev == dev->pcidev) {
 974			udev->dev = dev;
 975			cp->udev = udev;
 976			read_unlock(&cnic_dev_lock);
 977			return 0;
 978		}
 979	}
 980	read_unlock(&cnic_dev_lock);
 981
 982	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
 983	if (!udev)
 984		return -ENOMEM;
 985
 986	udev->uio_dev = -1;
 987
 988	udev->dev = dev;
 989	udev->pdev = dev->pcidev;
 990	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
 991	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
 992					   &udev->l2_ring_map,
 993					   GFP_KERNEL | __GFP_COMP);
 994	if (!udev->l2_ring)
 995		goto err_udev;
 996
 997	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
 998	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
 999	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1000					  &udev->l2_buf_map,
1001					  GFP_KERNEL | __GFP_COMP);
1002	if (!udev->l2_buf)
1003		goto err_dma;
1004
1005	write_lock(&cnic_dev_lock);
1006	list_add(&udev->list, &cnic_udev_list);
1007	write_unlock(&cnic_dev_lock);
1008
1009	pci_dev_get(udev->pdev);
1010
1011	cp->udev = udev;
1012
1013	return 0;
1014 err_dma:
1015	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016			  udev->l2_ring, udev->l2_ring_map);
1017 err_udev:
1018	kfree(udev);
1019	return -ENOMEM;
1020}
1021
1022static int cnic_init_uio(struct cnic_dev *dev)
1023{
1024	struct cnic_local *cp = dev->cnic_priv;
1025	struct cnic_uio_dev *udev = cp->udev;
1026	struct uio_info *uinfo;
1027	int ret = 0;
1028
1029	if (!udev)
1030		return -ENOMEM;
1031
1032	uinfo = &udev->cnic_uinfo;
1033
1034	uinfo->mem[0].addr = dev->netdev->base_addr;
1035	uinfo->mem[0].internal_addr = dev->regview;
1036	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1037	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1038
1039	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1040		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1041					PAGE_MASK;
1042		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1043			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1044		else
1045			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1046
1047		uinfo->name = "bnx2_cnic";
1048	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1049		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1050			PAGE_MASK;
1051		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1052
1053		uinfo->name = "bnx2x_cnic";
1054	}
1055
1056	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1057
1058	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1059	uinfo->mem[2].size = udev->l2_ring_size;
1060	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1061
1062	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1063	uinfo->mem[3].size = udev->l2_buf_size;
1064	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1065
1066	uinfo->version = CNIC_MODULE_VERSION;
1067	uinfo->irq = UIO_IRQ_CUSTOM;
1068
1069	uinfo->open = cnic_uio_open;
1070	uinfo->release = cnic_uio_close;
1071
1072	if (udev->uio_dev == -1) {
1073		if (!uinfo->priv) {
1074			uinfo->priv = udev;
1075
1076			ret = uio_register_device(&udev->pdev->dev, uinfo);
1077		}
1078	} else {
1079		cnic_init_rings(dev);
1080	}
1081
1082	return ret;
1083}
1084
1085static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1086{
1087	struct cnic_local *cp = dev->cnic_priv;
1088	int ret;
1089
1090	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1091	if (ret)
1092		goto error;
1093	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1094
1095	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1096	if (ret)
1097		goto error;
1098
1099	ret = cnic_alloc_context(dev);
1100	if (ret)
1101		goto error;
1102
1103	ret = cnic_alloc_uio_rings(dev, 2);
1104	if (ret)
1105		goto error;
1106
1107	ret = cnic_init_uio(dev);
1108	if (ret)
1109		goto error;
1110
1111	return 0;
1112
1113error:
1114	cnic_free_resc(dev);
1115	return ret;
1116}
1117
1118static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1119{
1120	struct cnic_local *cp = dev->cnic_priv;
1121	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1122	int total_mem, blks, i;
1123
1124	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1125	blks = total_mem / ctx_blk_size;
1126	if (total_mem % ctx_blk_size)
1127		blks++;
1128
1129	if (blks > cp->ethdev->ctx_tbl_len)
1130		return -ENOMEM;
1131
1132	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1133	if (cp->ctx_arr == NULL)
1134		return -ENOMEM;
1135
1136	cp->ctx_blks = blks;
1137	cp->ctx_blk_size = ctx_blk_size;
1138	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1139		cp->ctx_align = 0;
1140	else
1141		cp->ctx_align = ctx_blk_size;
1142
1143	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1144
1145	for (i = 0; i < blks; i++) {
1146		cp->ctx_arr[i].ctx =
1147			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1148					   &cp->ctx_arr[i].mapping,
1149					   GFP_KERNEL);
1150		if (cp->ctx_arr[i].ctx == NULL)
1151			return -ENOMEM;
1152
1153		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1154			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1155				cnic_free_context(dev);
1156				cp->ctx_blk_size += cp->ctx_align;
1157				i = -1;
1158				continue;
1159			}
1160		}
1161	}
1162	return 0;
1163}
1164
1165static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1166{
1167	struct cnic_local *cp = dev->cnic_priv;
1168	struct cnic_eth_dev *ethdev = cp->ethdev;
1169	u32 start_cid = ethdev->starting_cid;
1170	int i, j, n, ret, pages;
1171	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1172
1173	cp->iro_arr = ethdev->iro_arr;
1174
1175	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1176	cp->iscsi_start_cid = start_cid;
1177	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1178
1179	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1180		cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1181		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1182		if (!cp->fcoe_init_cid)
1183			cp->fcoe_init_cid = 0x10;
1184	}
1185
1186	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1187				GFP_KERNEL);
1188	if (!cp->iscsi_tbl)
1189		goto error;
1190
1191	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1192				cp->max_cid_space, GFP_KERNEL);
1193	if (!cp->ctx_tbl)
1194		goto error;
1195
1196	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1197		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1198		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1199	}
1200
1201	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1202		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1203
1204	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1205		PAGE_SIZE;
1206
1207	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1208	if (ret)
1209		return -ENOMEM;
1210
1211	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1212	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1213		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1214
1215		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1216		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1217						   off;
1218
1219		if ((i % n) == (n - 1))
1220			j++;
1221	}
1222
1223	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1224	if (ret)
1225		goto error;
1226
1227	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1228		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1229		if (ret)
1230			goto error;
1231	}
1232
1233	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1234	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1235	if (ret)
1236		goto error;
1237
1238	ret = cnic_alloc_bnx2x_context(dev);
1239	if (ret)
1240		goto error;
1241
1242	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1243
1244	cp->l2_rx_ring_size = 15;
1245
1246	ret = cnic_alloc_uio_rings(dev, 4);
1247	if (ret)
1248		goto error;
1249
1250	ret = cnic_init_uio(dev);
1251	if (ret)
1252		goto error;
1253
1254	return 0;
1255
1256error:
1257	cnic_free_resc(dev);
1258	return -ENOMEM;
1259}
1260
1261static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1262{
1263	return cp->max_kwq_idx -
1264		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1265}
1266
1267static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1268				  u32 num_wqes)
1269{
1270	struct cnic_local *cp = dev->cnic_priv;
1271	struct kwqe *prod_qe;
1272	u16 prod, sw_prod, i;
1273
1274	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1275		return -EAGAIN;		/* bnx2 is down */
1276
1277	spin_lock_bh(&cp->cnic_ulp_lock);
1278	if (num_wqes > cnic_kwq_avail(cp) &&
1279	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1280		spin_unlock_bh(&cp->cnic_ulp_lock);
1281		return -EAGAIN;
1282	}
1283
1284	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1285
1286	prod = cp->kwq_prod_idx;
1287	sw_prod = prod & MAX_KWQ_IDX;
1288	for (i = 0; i < num_wqes; i++) {
1289		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1290		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1291		prod++;
1292		sw_prod = prod & MAX_KWQ_IDX;
1293	}
1294	cp->kwq_prod_idx = prod;
1295
1296	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1297
1298	spin_unlock_bh(&cp->cnic_ulp_lock);
1299	return 0;
1300}
1301
1302static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1303				   union l5cm_specific_data *l5_data)
1304{
1305	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1306	dma_addr_t map;
1307
1308	map = ctx->kwqe_data_mapping;
1309	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1310	l5_data->phy_address.hi = (u64) map >> 32;
1311	return ctx->kwqe_data;
1312}
1313
1314static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1315				u32 type, union l5cm_specific_data *l5_data)
1316{
1317	struct cnic_local *cp = dev->cnic_priv;
1318	struct l5cm_spe kwqe;
1319	struct kwqe_16 *kwq[1];
1320	u16 type_16;
1321	int ret;
1322
1323	kwqe.hdr.conn_and_cmd_data =
1324		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1325			     BNX2X_HW_CID(cp, cid)));
1326
1327	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1328	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1329		   SPE_HDR_FUNCTION_ID;
1330
1331	kwqe.hdr.type = cpu_to_le16(type_16);
1332	kwqe.hdr.reserved1 = 0;
1333	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1334	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1335
1336	kwq[0] = (struct kwqe_16 *) &kwqe;
1337
1338	spin_lock_bh(&cp->cnic_ulp_lock);
1339	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1340	spin_unlock_bh(&cp->cnic_ulp_lock);
1341
1342	if (ret == 1)
1343		return 0;
1344
1345	return -EBUSY;
1346}
1347
1348static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1349				   struct kcqe *cqes[], u32 num_cqes)
1350{
1351	struct cnic_local *cp = dev->cnic_priv;
1352	struct cnic_ulp_ops *ulp_ops;
1353
1354	rcu_read_lock();
1355	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1356	if (likely(ulp_ops)) {
1357		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1358					  cqes, num_cqes);
1359	}
1360	rcu_read_unlock();
1361}
1362
1363static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1364{
1365	struct cnic_local *cp = dev->cnic_priv;
1366	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1367	int hq_bds, pages;
1368	u32 pfid = cp->pfid;
1369
1370	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1371	cp->num_ccells = req1->num_ccells_per_conn;
1372	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1373			      cp->num_iscsi_tasks;
1374	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1375			BNX2X_ISCSI_R2TQE_SIZE;
1376	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1377	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1378	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1379	cp->num_cqs = req1->num_cqs;
1380
1381	if (!dev->max_iscsi_conn)
1382		return 0;
1383
1384	/* init Tstorm RAM */
1385	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1386		  req1->rq_num_wqes);
1387	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1388		  PAGE_SIZE);
1389	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1390		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1391	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1392		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1393		  req1->num_tasks_per_conn);
1394
1395	/* init Ustorm RAM */
1396	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1397		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1398		  req1->rq_buffer_size);
1399	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1400		  PAGE_SIZE);
1401	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1402		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1403	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1404		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1405		  req1->num_tasks_per_conn);
1406	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1407		  req1->rq_num_wqes);
1408	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1409		  req1->cq_num_wqes);
1410	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1411		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1412
1413	/* init Xstorm RAM */
1414	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1415		  PAGE_SIZE);
1416	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1417		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1418	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1419		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1420		  req1->num_tasks_per_conn);
1421	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1422		  hq_bds);
1423	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1424		  req1->num_tasks_per_conn);
1425	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1426		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1427
1428	/* init Cstorm RAM */
1429	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1430		  PAGE_SIZE);
1431	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1432		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1433	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1434		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1435		  req1->num_tasks_per_conn);
1436	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1437		  req1->cq_num_wqes);
1438	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1439		  hq_bds);
1440
1441	return 0;
1442}
1443
1444static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1445{
1446	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1447	struct cnic_local *cp = dev->cnic_priv;
1448	u32 pfid = cp->pfid;
1449	struct iscsi_kcqe kcqe;
1450	struct kcqe *cqes[1];
1451
1452	memset(&kcqe, 0, sizeof(kcqe));
1453	if (!dev->max_iscsi_conn) {
1454		kcqe.completion_status =
1455			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1456		goto done;
1457	}
1458
1459	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1460		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1461	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1462		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1463		req2->error_bit_map[1]);
1464
1465	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1466		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1467	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1468		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1469	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1470		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1471		req2->error_bit_map[1]);
1472
1473	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1474		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1475
1476	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1477
1478done:
1479	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1480	cqes[0] = (struct kcqe *) &kcqe;
1481	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1482
1483	return 0;
1484}
1485
1486static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1487{
1488	struct cnic_local *cp = dev->cnic_priv;
1489	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1490
1491	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1492		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1493
1494		cnic_free_dma(dev, &iscsi->hq_info);
1495		cnic_free_dma(dev, &iscsi->r2tq_info);
1496		cnic_free_dma(dev, &iscsi->task_array_info);
1497		cnic_free_id(&cp->cid_tbl, ctx->cid);
1498	} else {
1499		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1500	}
1501
1502	ctx->cid = 0;
1503}
1504
1505static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1506{
1507	u32 cid;
1508	int ret, pages;
1509	struct cnic_local *cp = dev->cnic_priv;
1510	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1511	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1512
1513	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1514		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1515		if (cid == -1) {
1516			ret = -ENOMEM;
1517			goto error;
1518		}
1519		ctx->cid = cid;
1520		return 0;
1521	}
1522
1523	cid = cnic_alloc_new_id(&cp->cid_tbl);
1524	if (cid == -1) {
1525		ret = -ENOMEM;
1526		goto error;
1527	}
1528
1529	ctx->cid = cid;
1530	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1531
1532	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1533	if (ret)
1534		goto error;
1535
1536	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1537	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1538	if (ret)
1539		goto error;
1540
1541	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1542	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1543	if (ret)
1544		goto error;
1545
1546	return 0;
1547
1548error:
1549	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1550	return ret;
1551}
1552
1553static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1554				struct regpair *ctx_addr)
1555{
1556	struct cnic_local *cp = dev->cnic_priv;
1557	struct cnic_eth_dev *ethdev = cp->ethdev;
1558	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1559	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1560	unsigned long align_off = 0;
1561	dma_addr_t ctx_map;
1562	void *ctx;
1563
1564	if (cp->ctx_align) {
1565		unsigned long mask = cp->ctx_align - 1;
1566
1567		if (cp->ctx_arr[blk].mapping & mask)
1568			align_off = cp->ctx_align -
1569				    (cp->ctx_arr[blk].mapping & mask);
1570	}
1571	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1572		(off * BNX2X_CONTEXT_MEM_SIZE);
1573	ctx = cp->ctx_arr[blk].ctx + align_off +
1574	      (off * BNX2X_CONTEXT_MEM_SIZE);
1575	if (init)
1576		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1577
1578	ctx_addr->lo = ctx_map & 0xffffffff;
1579	ctx_addr->hi = (u64) ctx_map >> 32;
1580	return ctx;
1581}
1582
1583static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1584				u32 num)
1585{
1586	struct cnic_local *cp = dev->cnic_priv;
1587	struct iscsi_kwqe_conn_offload1 *req1 =
1588			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1589	struct iscsi_kwqe_conn_offload2 *req2 =
1590			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1591	struct iscsi_kwqe_conn_offload3 *req3;
1592	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1593	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1594	u32 cid = ctx->cid;
1595	u32 hw_cid = BNX2X_HW_CID(cp, cid);
1596	struct iscsi_context *ictx;
1597	struct regpair context_addr;
1598	int i, j, n = 2, n_max;
1599	u8 port = CNIC_PORT(cp);
1600
1601	ctx->ctx_flags = 0;
1602	if (!req2->num_additional_wqes)
1603		return -EINVAL;
1604
1605	n_max = req2->num_additional_wqes + 2;
1606
1607	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1608	if (ictx == NULL)
1609		return -ENOMEM;
1610
1611	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1612
1613	ictx->xstorm_ag_context.hq_prod = 1;
1614
1615	ictx->xstorm_st_context.iscsi.first_burst_length =
1616		ISCSI_DEF_FIRST_BURST_LEN;
1617	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1618		ISCSI_DEF_MAX_RECV_SEG_LEN;
1619	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1620		req1->sq_page_table_addr_lo;
1621	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1622		req1->sq_page_table_addr_hi;
1623	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1624	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1625	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1626		iscsi->hq_info.pgtbl_map & 0xffffffff;
1627	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1628		(u64) iscsi->hq_info.pgtbl_map >> 32;
1629	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1630		iscsi->hq_info.pgtbl[0];
1631	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1632		iscsi->hq_info.pgtbl[1];
1633	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1634		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1635	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1636		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1637	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1638		iscsi->r2tq_info.pgtbl[0];
1639	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1640		iscsi->r2tq_info.pgtbl[1];
1641	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1642		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1643	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1644		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1645	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1646		BNX2X_ISCSI_PBL_NOT_CACHED;
1647	ictx->xstorm_st_context.iscsi.flags.flags |=
1648		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1649	ictx->xstorm_st_context.iscsi.flags.flags |=
1650		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1651	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1652		ETH_P_8021Q;
1653	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1654		cp->port_mode == CHIP_2_PORT_MODE) {
1655
1656		port = 0;
1657	}
1658	ictx->xstorm_st_context.common.flags =
1659		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1660	ictx->xstorm_st_context.common.flags =
1661		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1662
1663	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1664	/* TSTORM requires the base address of RQ DB & not PTE */
1665	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1666		req2->rq_page_table_addr_lo & PAGE_MASK;
1667	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1668		req2->rq_page_table_addr_hi;
1669	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1670	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1671	ictx->tstorm_st_context.tcp.flags2 |=
1672		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1673	ictx->tstorm_st_context.tcp.ooo_support_mode =
1674		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1675
1676	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1677
1678	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1679		req2->rq_page_table_addr_lo;
1680	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1681		req2->rq_page_table_addr_hi;
1682	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1683	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1684	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1685		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1686	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1687		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1688	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1689		iscsi->r2tq_info.pgtbl[0];
1690	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1691		iscsi->r2tq_info.pgtbl[1];
1692	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1693		req1->cq_page_table_addr_lo;
1694	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1695		req1->cq_page_table_addr_hi;
1696	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1697	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1698	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1699	ictx->ustorm_st_context.task_pbe_cache_index =
1700		BNX2X_ISCSI_PBL_NOT_CACHED;
1701	ictx->ustorm_st_context.task_pdu_cache_index =
1702		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1703
1704	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1705		if (j == 3) {
1706			if (n >= n_max)
1707				break;
1708			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1709			j = 0;
1710		}
1711		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1712		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1713			req3->qp_first_pte[j].hi;
1714		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1715			req3->qp_first_pte[j].lo;
1716	}
1717
1718	ictx->ustorm_st_context.task_pbl_base.lo =
1719		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1720	ictx->ustorm_st_context.task_pbl_base.hi =
1721		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1722	ictx->ustorm_st_context.tce_phy_addr.lo =
1723		iscsi->task_array_info.pgtbl[0];
1724	ictx->ustorm_st_context.tce_phy_addr.hi =
1725		iscsi->task_array_info.pgtbl[1];
1726	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1727	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1728	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1729	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1730		ISCSI_DEF_MAX_BURST_LEN;
1731	ictx->ustorm_st_context.negotiated_rx |=
1732		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1733		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1734
1735	ictx->cstorm_st_context.hq_pbl_base.lo =
1736		iscsi->hq_info.pgtbl_map & 0xffffffff;
1737	ictx->cstorm_st_context.hq_pbl_base.hi =
1738		(u64) iscsi->hq_info.pgtbl_map >> 32;
1739	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1740	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1741	ictx->cstorm_st_context.task_pbl_base.lo =
1742		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1743	ictx->cstorm_st_context.task_pbl_base.hi =
1744		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1745	/* CSTORM and USTORM initialization is different, CSTORM requires
1746	 * CQ DB base & not PTE addr */
1747	ictx->cstorm_st_context.cq_db_base.lo =
1748		req1->cq_page_table_addr_lo & PAGE_MASK;
1749	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1750	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1751	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1752	for (i = 0; i < cp->num_cqs; i++) {
1753		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1754			ISCSI_INITIAL_SN;
1755		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1756			ISCSI_INITIAL_SN;
1757	}
1758
1759	ictx->xstorm_ag_context.cdu_reserved =
1760		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1761				       ISCSI_CONNECTION_TYPE);
1762	ictx->ustorm_ag_context.cdu_usage =
1763		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1764				       ISCSI_CONNECTION_TYPE);
1765	return 0;
1766
1767}
1768
1769static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1770				   u32 num, int *work)
1771{
1772	struct iscsi_kwqe_conn_offload1 *req1;
1773	struct iscsi_kwqe_conn_offload2 *req2;
1774	struct cnic_local *cp = dev->cnic_priv;
1775	struct cnic_context *ctx;
1776	struct iscsi_kcqe kcqe;
1777	struct kcqe *cqes[1];
1778	u32 l5_cid;
1779	int ret = 0;
1780
1781	if (num < 2) {
1782		*work = num;
1783		return -EINVAL;
1784	}
1785
1786	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1787	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1788	if ((num - 2) < req2->num_additional_wqes) {
1789		*work = num;
1790		return -EINVAL;
1791	}
1792	*work = 2 + req2->num_additional_wqes;
1793
1794	l5_cid = req1->iscsi_conn_id;
1795	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1796		return -EINVAL;
1797
1798	memset(&kcqe, 0, sizeof(kcqe));
1799	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1800	kcqe.iscsi_conn_id = l5_cid;
1801	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1802
1803	ctx = &cp->ctx_tbl[l5_cid];
1804	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1805		kcqe.completion_status =
1806			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1807		goto done;
1808	}
1809
1810	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1811		atomic_dec(&cp->iscsi_conn);
1812		goto done;
1813	}
1814	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1815	if (ret) {
1816		atomic_dec(&cp->iscsi_conn);
1817		ret = 0;
1818		goto done;
1819	}
1820	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1821	if (ret < 0) {
1822		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1823		atomic_dec(&cp->iscsi_conn);
1824		goto done;
1825	}
1826
1827	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1828	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1829
1830done:
1831	cqes[0] = (struct kcqe *) &kcqe;
1832	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1833	return ret;
1834}
1835
1836
1837static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1838{
1839	struct cnic_local *cp = dev->cnic_priv;
1840	struct iscsi_kwqe_conn_update *req =
1841		(struct iscsi_kwqe_conn_update *) kwqe;
1842	void *data;
1843	union l5cm_specific_data l5_data;
1844	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1845	int ret;
1846
1847	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1848		return -EINVAL;
1849
1850	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1851	if (!data)
1852		return -ENOMEM;
1853
1854	memcpy(data, kwqe, sizeof(struct kwqe));
1855
1856	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1857			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1858	return ret;
1859}
1860
1861static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1862{
1863	struct cnic_local *cp = dev->cnic_priv;
1864	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1865	union l5cm_specific_data l5_data;
1866	int ret;
1867	u32 hw_cid;
1868
1869	init_waitqueue_head(&ctx->waitq);
1870	ctx->wait_cond = 0;
1871	memset(&l5_data, 0, sizeof(l5_data));
1872	hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1873
1874	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1875				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1876
1877	if (ret == 0) {
1878		wait_event(ctx->waitq, ctx->wait_cond);
1879		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1880			return -EBUSY;
1881	}
1882
1883	return ret;
1884}
1885
1886static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1887{
1888	struct cnic_local *cp = dev->cnic_priv;
1889	struct iscsi_kwqe_conn_destroy *req =
1890		(struct iscsi_kwqe_conn_destroy *) kwqe;
1891	u32 l5_cid = req->reserved0;
1892	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1893	int ret = 0;
1894	struct iscsi_kcqe kcqe;
1895	struct kcqe *cqes[1];
1896
1897	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1898		goto skip_cfc_delete;
1899
1900	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1901		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1902
1903		if (delta > (2 * HZ))
1904			delta = 0;
1905
1906		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1907		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1908		goto destroy_reply;
1909	}
1910
1911	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1912
1913skip_cfc_delete:
1914	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1915
1916	if (!ret) {
1917		atomic_dec(&cp->iscsi_conn);
1918		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1919	}
1920
1921destroy_reply:
1922	memset(&kcqe, 0, sizeof(kcqe));
1923	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1924	kcqe.iscsi_conn_id = l5_cid;
1925	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1926	kcqe.iscsi_conn_context_id = req->context_id;
1927
1928	cqes[0] = (struct kcqe *) &kcqe;
1929	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1930
1931	return ret;
1932}
1933
1934static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1935				      struct l4_kwq_connect_req1 *kwqe1,
1936				      struct l4_kwq_connect_req3 *kwqe3,
1937				      struct l5cm_active_conn_buffer *conn_buf)
1938{
1939	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1940	struct l5cm_xstorm_conn_buffer *xstorm_buf =
1941		&conn_buf->xstorm_conn_buffer;
1942	struct l5cm_tstorm_conn_buffer *tstorm_buf =
1943		&conn_buf->tstorm_conn_buffer;
1944	struct regpair context_addr;
1945	u32 cid = BNX2X_SW_CID(kwqe1->cid);
1946	struct in6_addr src_ip, dst_ip;
1947	int i;
1948	u32 *addrp;
1949
1950	addrp = (u32 *) &conn_addr->local_ip_addr;
1951	for (i = 0; i < 4; i++, addrp++)
1952		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1953
1954	addrp = (u32 *) &conn_addr->remote_ip_addr;
1955	for (i = 0; i < 4; i++, addrp++)
1956		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1957
1958	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1959
1960	xstorm_buf->context_addr.hi = context_addr.hi;
1961	xstorm_buf->context_addr.lo = context_addr.lo;
1962	xstorm_buf->mss = 0xffff;
1963	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1964	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1965		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1966	xstorm_buf->pseudo_header_checksum =
1967		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1968
1969	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1970		tstorm_buf->params |=
1971			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1972	if (kwqe3->ka_timeout) {
1973		tstorm_buf->ka_enable = 1;
1974		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1975		tstorm_buf->ka_interval = kwqe3->ka_interval;
1976		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1977	}
1978	tstorm_buf->max_rt_time = 0xffffffff;
1979}
1980
1981static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1982{
1983	struct cnic_local *cp = dev->cnic_priv;
1984	u32 pfid = cp->pfid;
1985	u8 *mac = dev->mac_addr;
1986
1987	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1988		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1989	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1990		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1991	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1992		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1993	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1994		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1995	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1996		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1997	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1998		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1999
2000	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2001		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2002	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2003		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2004		 mac[4]);
2005	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2006		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2007	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2008		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2009		 mac[2]);
2010	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2011		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2012	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2013		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2014		 mac[0]);
2015}
2016
2017static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2018{
2019	struct cnic_local *cp = dev->cnic_priv;
2020	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2021	u16 tstorm_flags = 0;
2022
2023	if (tcp_ts) {
2024		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2025		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2026	}
2027
2028	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2029		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2030
2031	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2032		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2033}
2034
2035static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2036			      u32 num, int *work)
2037{
2038	struct cnic_local *cp = dev->cnic_priv;
2039	struct l4_kwq_connect_req1 *kwqe1 =
2040		(struct l4_kwq_connect_req1 *) wqes[0];
2041	struct l4_kwq_connect_req3 *kwqe3;
2042	struct l5cm_active_conn_buffer *conn_buf;
2043	struct l5cm_conn_addr_params *conn_addr;
2044	union l5cm_specific_data l5_data;
2045	u32 l5_cid = kwqe1->pg_cid;
2046	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2047	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2048	int ret;
2049
2050	if (num < 2) {
2051		*work = num;
2052		return -EINVAL;
2053	}
2054
2055	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2056		*work = 3;
2057	else
2058		*work = 2;
2059
2060	if (num < *work) {
2061		*work = num;
2062		return -EINVAL;
2063	}
2064
2065	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2066		netdev_err(dev->netdev, "conn_buf size too big\n");
2067		return -ENOMEM;
2068	}
2069	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2070	if (!conn_buf)
2071		return -ENOMEM;
2072
2073	memset(conn_buf, 0, sizeof(*conn_buf));
2074
2075	conn_addr = &conn_buf->conn_addr_buf;
2076	conn_addr->remote_addr_0 = csk->ha[0];
2077	conn_addr->remote_addr_1 = csk->ha[1];
2078	conn_addr->remote_addr_2 = csk->ha[2];
2079	conn_addr->remote_addr_3 = csk->ha[3];
2080	conn_addr->remote_addr_4 = csk->ha[4];
2081	conn_addr->remote_addr_5 = csk->ha[5];
2082
2083	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2084		struct l4_kwq_connect_req2 *kwqe2 =
2085			(struct l4_kwq_connect_req2 *) wqes[1];
2086
2087		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2088		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2089		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2090
2091		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2092		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2093		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2094		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2095	}
2096	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2097
2098	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2099	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2100	conn_addr->local_tcp_port = kwqe1->src_port;
2101	conn_addr->remote_tcp_port = kwqe1->dst_port;
2102
2103	conn_addr->pmtu = kwqe3->pmtu;
2104	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2105
2106	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2107		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2108
2109	cnic_bnx2x_set_tcp_timestamp(dev,
2110		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2111
2112	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2113			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2114	if (!ret)
2115		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2116
2117	return ret;
2118}
2119
2120static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2121{
2122	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2123	union l5cm_specific_data l5_data;
2124	int ret;
2125
2126	memset(&l5_data, 0, sizeof(l5_data));
2127	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2128			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2129	return ret;
2130}
2131
2132static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2133{
2134	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2135	union l5cm_specific_data l5_data;
2136	int ret;
2137
2138	memset(&l5_data, 0, sizeof(l5_data));
2139	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2140			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2141	return ret;
2142}
2143static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2144{
2145	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2146	struct l4_kcq kcqe;
2147	struct kcqe *cqes[1];
2148
2149	memset(&kcqe, 0, sizeof(kcqe));
2150	kcqe.pg_host_opaque = req->host_opaque;
2151	kcqe.pg_cid = req->host_opaque;
2152	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2153	cqes[0] = (struct kcqe *) &kcqe;
2154	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2155	return 0;
2156}
2157
2158static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2159{
2160	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2161	struct l4_kcq kcqe;
2162	struct kcqe *cqes[1];
2163
2164	memset(&kcqe, 0, sizeof(kcqe));
2165	kcqe.pg_host_opaque = req->pg_host_opaque;
2166	kcqe.pg_cid = req->pg_cid;
2167	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2168	cqes[0] = (struct kcqe *) &kcqe;
2169	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2170	return 0;
2171}
2172
2173static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2174{
2175	struct fcoe_kwqe_stat *req;
2176	struct fcoe_stat_ramrod_params *fcoe_stat;
2177	union l5cm_specific_data l5_data;
2178	struct cnic_local *cp = dev->cnic_priv;
2179	int ret;
2180	u32 cid;
2181
2182	req = (struct fcoe_kwqe_stat *) kwqe;
2183	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2184
2185	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2186	if (!fcoe_stat)
2187		return -ENOMEM;
2188
2189	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2190	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2191
2192	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2193				  FCOE_CONNECTION_TYPE, &l5_data);
2194	return ret;
2195}
2196
2197static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2198				 u32 num, int *work)
2199{
2200	int ret;
2201	struct cnic_local *cp = dev->cnic_priv;
2202	u32 cid;
2203	struct fcoe_init_ramrod_params *fcoe_init;
2204	struct fcoe_kwqe_init1 *req1;
2205	struct fcoe_kwqe_init2 *req2;
2206	struct fcoe_kwqe_init3 *req3;
2207	union l5cm_specific_data l5_data;
2208
2209	if (num < 3) {
2210		*work = num;
2211		return -EINVAL;
2212	}
2213	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2214	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2215	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2216	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2217		*work = 1;
2218		return -EINVAL;
2219	}
2220	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2221		*work = 2;
2222		return -EINVAL;
2223	}
2224
2225	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2226		netdev_err(dev->netdev, "fcoe_init size too big\n");
2227		return -ENOMEM;
2228	}
2229	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2230	if (!fcoe_init)
2231		return -ENOMEM;
2232
2233	memset(fcoe_init, 0, sizeof(*fcoe_init));
2234	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2235	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2236	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2237	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2238	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2239	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2240
2241	fcoe_init->sb_num = cp->status_blk_num;
2242	fcoe_init->eq_prod = MAX_KCQ_IDX;
2243	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2244	cp->kcq2.sw_prod_idx = 0;
2245
2246	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2247	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2248				  FCOE_CONNECTION_TYPE, &l5_data);
2249	*work = 3;
2250	return ret;
2251}
2252
2253static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2254				 u32 num, int *work)
2255{
2256	int ret = 0;
2257	u32 cid = -1, l5_cid;
2258	struct cnic_local *cp = dev->cnic_priv;
2259	struct fcoe_kwqe_conn_offload1 *req1;
2260	struct fcoe_kwqe_conn_offload2 *req2;
2261	struct fcoe_kwqe_conn_offload3 *req3;
2262	struct fcoe_kwqe_conn_offload4 *req4;
2263	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2264	struct cnic_context *ctx;
2265	struct fcoe_context *fctx;
2266	struct regpair ctx_addr;
2267	union l5cm_specific_data l5_data;
2268	struct fcoe_kcqe kcqe;
2269	struct kcqe *cqes[1];
2270
2271	if (num < 4) {
2272		*work = num;
2273		return -EINVAL;
2274	}
2275	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2276	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2277	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2278	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2279
2280	*work = 4;
2281
2282	l5_cid = req1->fcoe_conn_id;
2283	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2284		goto err_reply;
2285
2286	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2287
2288	ctx = &cp->ctx_tbl[l5_cid];
2289	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2290		goto err_reply;
2291
2292	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2293	if (ret) {
2294		ret = 0;
2295		goto err_reply;
2296	}
2297	cid = ctx->cid;
2298
2299	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2300	if (fctx) {
2301		u32 hw_cid = BNX2X_HW_CID(cp, cid);
2302		u32 val;
2303
2304		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2305					     FCOE_CONNECTION_TYPE);
2306		fctx->xstorm_ag_context.cdu_reserved = val;
2307		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2308					     FCOE_CONNECTION_TYPE);
2309		fctx->ustorm_ag_context.cdu_usage = val;
2310	}
2311	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2312		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2313		goto err_reply;
2314	}
2315	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2316	if (!fcoe_offload)
2317		goto err_reply;
2318
2319	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2320	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2321	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2322	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2323	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2324
2325	cid = BNX2X_HW_CID(cp, cid);
2326	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2327				  FCOE_CONNECTION_TYPE, &l5_data);
2328	if (!ret)
2329		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2330
2331	return ret;
2332
2333err_reply:
2334	if (cid != -1)
2335		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2336
2337	memset(&kcqe, 0, sizeof(kcqe));
2338	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2339	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2340	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2341
2342	cqes[0] = (struct kcqe *) &kcqe;
2343	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2344	return ret;
2345}
2346
2347static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2348{
2349	struct fcoe_kwqe_conn_enable_disable *req;
2350	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2351	union l5cm_specific_data l5_data;
2352	int ret;
2353	u32 cid, l5_cid;
2354	struct cnic_local *cp = dev->cnic_priv;
2355
2356	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2357	cid = req->context_id;
2358	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2359
2360	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2361		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2362		return -ENOMEM;
2363	}
2364	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2365	if (!fcoe_enable)
2366		return -ENOMEM;
2367
2368	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2369	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2370	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2371				  FCOE_CONNECTION_TYPE, &l5_data);
2372	return ret;
2373}
2374
2375static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2376{
2377	struct fcoe_kwqe_conn_enable_disable *req;
2378	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2379	union l5cm_specific_data l5_data;
2380	int ret;
2381	u32 cid, l5_cid;
2382	struct cnic_local *cp = dev->cnic_priv;
2383
2384	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2385	cid = req->context_id;
2386	l5_cid = req->conn_id;
2387	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2388		return -EINVAL;
2389
2390	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2391
2392	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2393		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2394		return -ENOMEM;
2395	}
2396	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2397	if (!fcoe_disable)
2398		return -ENOMEM;
2399
2400	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2401	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2402	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2403				  FCOE_CONNECTION_TYPE, &l5_data);
2404	return ret;
2405}
2406
2407static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2408{
2409	struct fcoe_kwqe_conn_destroy *req;
2410	union l5cm_specific_data l5_data;
2411	int ret;
2412	u32 cid, l5_cid;
2413	struct cnic_local *cp = dev->cnic_priv;
2414	struct cnic_context *ctx;
2415	struct fcoe_kcqe kcqe;
2416	struct kcqe *cqes[1];
2417
2418	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2419	cid = req->context_id;
2420	l5_cid = req->conn_id;
2421	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2422		return -EINVAL;
2423
2424	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2425
2426	ctx = &cp->ctx_tbl[l5_cid];
2427
2428	init_waitqueue_head(&ctx->waitq);
2429	ctx->wait_cond = 0;
2430
2431	memset(&l5_data, 0, sizeof(l5_data));
2432	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2433				  FCOE_CONNECTION_TYPE, &l5_data);
2434	if (ret == 0) {
2435		wait_event(ctx->waitq, ctx->wait_cond);
2436		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2437		queue_delayed_work(cnic_wq, &cp->delete_task,
2438				   msecs_to_jiffies(2000));
2439	}
2440
2441	memset(&kcqe, 0, sizeof(kcqe));
2442	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2443	kcqe.fcoe_conn_id = req->conn_id;
2444	kcqe.fcoe_conn_context_id = cid;
2445
2446	cqes[0] = (struct kcqe *) &kcqe;
2447	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2448	return ret;
2449}
2450
2451static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2452{
2453	struct cnic_local *cp = dev->cnic_priv;
2454	u32 i;
2455
2456	for (i = start_cid; i < cp->max_cid_space; i++) {
2457		struct cnic_context *ctx = &cp->ctx_tbl[i];
2458		int j;
2459
2460		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2461			msleep(10);
2462
2463		for (j = 0; j < 5; j++) {
2464			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2465				break;
2466			msleep(20);
2467		}
2468
2469		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2470			netdev_warn(dev->netdev, "CID %x not deleted\n",
2471				   ctx->cid);
2472	}
2473}
2474
2475static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2476{
2477	struct fcoe_kwqe_destroy *req;
2478	union l5cm_specific_data l5_data;
2479	struct cnic_local *cp = dev->cnic_priv;
2480	int ret;
2481	u32 cid;
2482
2483	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2484
2485	req = (struct fcoe_kwqe_destroy *) kwqe;
2486	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2487
2488	memset(&l5_data, 0, sizeof(l5_data));
2489	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2490				  FCOE_CONNECTION_TYPE, &l5_data);
2491	return ret;
2492}
2493
2494static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2495					 struct kwqe *wqes[], u32 num_wqes)
2496{
2497	int i, work, ret;
2498	u32 opcode;
2499	struct kwqe *kwqe;
2500
2501	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2502		return -EAGAIN;		/* bnx2 is down */
2503
2504	for (i = 0; i < num_wqes; ) {
2505		kwqe = wqes[i];
2506		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2507		work = 1;
2508
2509		switch (opcode) {
2510		case ISCSI_KWQE_OPCODE_INIT1:
2511			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2512			break;
2513		case ISCSI_KWQE_OPCODE_INIT2:
2514			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2515			break;
2516		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2517			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2518						     num_wqes - i, &work);
2519			break;
2520		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2521			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2522			break;
2523		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2524			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2525			break;
2526		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2527			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2528						 &work);
2529			break;
2530		case L4_KWQE_OPCODE_VALUE_CLOSE:
2531			ret = cnic_bnx2x_close(dev, kwqe);
2532			break;
2533		case L4_KWQE_OPCODE_VALUE_RESET:
2534			ret = cnic_bnx2x_reset(dev, kwqe);
2535			break;
2536		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2537			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2538			break;
2539		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2540			ret = cnic_bnx2x_update_pg(dev, kwqe);
2541			break;
2542		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2543			ret = 0;
2544			break;
2545		default:
2546			ret = 0;
2547			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2548				   opcode);
2549			break;
2550		}
2551		if (ret < 0)
2552			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2553				   opcode);
2554		i += work;
2555	}
2556	return 0;
2557}
2558
2559static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2560					struct kwqe *wqes[], u32 num_wqes)
2561{
2562	struct cnic_local *cp = dev->cnic_priv;
2563	int i, work, ret;
2564	u32 opcode;
2565	struct kwqe *kwqe;
2566
2567	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2568		return -EAGAIN;		/* bnx2 is down */
2569
2570	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2571		return -EINVAL;
2572
2573	for (i = 0; i < num_wqes; ) {
2574		kwqe = wqes[i];
2575		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2576		work = 1;
2577
2578		switch (opcode) {
2579		case FCOE_KWQE_OPCODE_INIT1:
2580			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2581						    num_wqes - i, &work);
2582			break;
2583		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2584			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2585						    num_wqes - i, &work);
2586			break;
2587		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2588			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2589			break;
2590		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2591			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2592			break;
2593		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2594			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2595			break;
2596		case FCOE_KWQE_OPCODE_DESTROY:
2597			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2598			break;
2599		case FCOE_KWQE_OPCODE_STAT:
2600			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2601			break;
2602		default:
2603			ret = 0;
2604			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2605				   opcode);
2606			break;
2607		}
2608		if (ret < 0)
2609			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2610				   opcode);
2611		i += work;
2612	}
2613	return 0;
2614}
2615
2616static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2617				   u32 num_wqes)
2618{
2619	int ret = -EINVAL;
2620	u32 layer_code;
2621
2622	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2623		return -EAGAIN;		/* bnx2x is down */
2624
2625	if (!num_wqes)
2626		return 0;
2627
2628	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2629	switch (layer_code) {
2630	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2631	case KWQE_FLAGS_LAYER_MASK_L4:
2632	case KWQE_FLAGS_LAYER_MASK_L2:
2633		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2634		break;
2635
2636	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2637		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2638		break;
2639	}
2640	return ret;
2641}
2642
2643static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2644{
2645	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2646		return KCQE_FLAGS_LAYER_MASK_L4;
2647
2648	return opflag & KCQE_FLAGS_LAYER_MASK;
2649}
2650
2651static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2652{
2653	struct cnic_local *cp = dev->cnic_priv;
2654	int i, j, comp = 0;
2655
2656	i = 0;
2657	j = 1;
2658	while (num_cqes) {
2659		struct cnic_ulp_ops *ulp_ops;
2660		int ulp_type;
2661		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2662		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2663
2664		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2665			comp++;
2666
2667		while (j < num_cqes) {
2668			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2669
2670			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2671				break;
2672
2673			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2674				comp++;
2675			j++;
2676		}
2677
2678		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2679			ulp_type = CNIC_ULP_RDMA;
2680		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2681			ulp_type = CNIC_ULP_ISCSI;
2682		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2683			ulp_type = CNIC_ULP_FCOE;
2684		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2685			ulp_type = CNIC_ULP_L4;
2686		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2687			goto end;
2688		else {
2689			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2690				   kcqe_op_flag);
2691			goto end;
2692		}
2693
2694		rcu_read_lock();
2695		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2696		if (likely(ulp_ops)) {
2697			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2698						  cp->completed_kcq + i, j);
2699		}
2700		rcu_read_unlock();
2701end:
2702		num_cqes -= j;
2703		i += j;
2704		j = 1;
2705	}
2706	if (unlikely(comp))
2707		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2708}
2709
2710static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2711{
2712	struct cnic_local *cp = dev->cnic_priv;
2713	u16 i, ri, hw_prod, last;
2714	struct kcqe *kcqe;
2715	int kcqe_cnt = 0, last_cnt = 0;
2716
2717	i = ri = last = info->sw_prod_idx;
2718	ri &= MAX_KCQ_IDX;
2719	hw_prod = *info->hw_prod_idx_ptr;
2720	hw_prod = info->hw_idx(hw_prod);
2721
2722	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2723		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2724		cp->completed_kcq[kcqe_cnt++] = kcqe;
2725		i = info->next_idx(i);
2726		ri = i & MAX_KCQ_IDX;
2727		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2728			last_cnt = kcqe_cnt;
2729			last = i;
2730		}
2731	}
2732
2733	info->sw_prod_idx = last;
2734	return last_cnt;
2735}
2736
2737static int cnic_l2_completion(struct cnic_local *cp)
2738{
2739	u16 hw_cons, sw_cons;
2740	struct cnic_uio_dev *udev = cp->udev;
2741	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2742					(udev->l2_ring + (2 * BCM_PAGE_SIZE));
2743	u32 cmd;
2744	int comp = 0;
2745
2746	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2747		return 0;
2748
2749	hw_cons = *cp->rx_cons_ptr;
2750	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2751		hw_cons++;
2752
2753	sw_cons = cp->rx_cons;
2754	while (sw_cons != hw_cons) {
2755		u8 cqe_fp_flags;
2756
2757		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2758		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2759		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2760			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2761			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2762			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2763			    cmd == RAMROD_CMD_ID_ETH_HALT)
2764				comp++;
2765		}
2766		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2767	}
2768	return comp;
2769}
2770
2771static void cnic_chk_pkt_rings(struct cnic_local *cp)
2772{
2773	u16 rx_cons, tx_cons;
2774	int comp = 0;
2775
2776	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2777		return;
2778
2779	rx_cons = *cp->rx_cons_ptr;
2780	tx_cons = *cp->tx_cons_ptr;
2781	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2782		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2783			comp = cnic_l2_completion(cp);
2784
2785		cp->tx_cons = tx_cons;
2786		cp->rx_cons = rx_cons;
2787
2788		if (cp->udev)
2789			uio_event_notify(&cp->udev->cnic_uinfo);
2790	}
2791	if (comp)
2792		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2793}
2794
2795static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2796{
2797	struct cnic_local *cp = dev->cnic_priv;
2798	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2799	int kcqe_cnt;
2800
2801	/* status block index must be read before reading other fields */
2802	rmb();
2803	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2804
2805	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2806
2807		service_kcqes(dev, kcqe_cnt);
2808
2809		/* Tell compiler that status_blk fields can change. */
2810		barrier();
2811		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2812		/* status block index must be read first */
2813		rmb();
2814		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2815	}
2816
2817	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2818
2819	cnic_chk_pkt_rings(cp);
2820
2821	return status_idx;
2822}
2823
2824static int cnic_service_bnx2(void *data, void *status_blk)
2825{
2826	struct cnic_dev *dev = data;
2827
2828	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2829		struct status_block *sblk = status_blk;
2830
2831		return sblk->status_idx;
2832	}
2833
2834	return cnic_service_bnx2_queues(dev);
2835}
2836
2837static void cnic_service_bnx2_msix(unsigned long data)
2838{
2839	struct cnic_dev *dev = (struct cnic_dev *) data;
2840	struct cnic_local *cp = dev->cnic_priv;
2841
2842	cp->last_status_idx = cnic_service_bnx2_queues(dev);
2843
2844	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2845		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2846}
2847
2848static void cnic_doirq(struct cnic_dev *dev)
2849{
2850	struct cnic_local *cp = dev->cnic_priv;
2851
2852	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2853		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2854
2855		prefetch(cp->status_blk.gen);
2856		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2857
2858		tasklet_schedule(&cp->cnic_irq_task);
2859	}
2860}
2861
2862static irqreturn_t cnic_irq(int irq, void *dev_instance)
2863{
2864	struct cnic_dev *dev = dev_instance;
2865	struct cnic_local *cp = dev->cnic_priv;
2866
2867	if (cp->ack_int)
2868		cp->ack_int(dev);
2869
2870	cnic_doirq(dev);
2871
2872	return IRQ_HANDLED;
2873}
2874
2875static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2876				      u16 index, u8 op, u8 update)
2877{
2878	struct cnic_local *cp = dev->cnic_priv;
2879	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2880		       COMMAND_REG_INT_ACK);
2881	struct igu_ack_register igu_ack;
2882
2883	igu_ack.status_block_index = index;
2884	igu_ack.sb_id_and_flags =
2885			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2886			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2887			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2888			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2889
2890	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2891}
2892
2893static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2894			    u16 index, u8 op, u8 update)
2895{
2896	struct igu_regular cmd_data;
2897	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2898
2899	cmd_data.sb_id_and_flags =
2900		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
2901		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2902		(update << IGU_REGULAR_BUPDATE_SHIFT) |
2903		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
2904
2905
2906	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2907}
2908
2909static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2910{
2911	struct cnic_local *cp = dev->cnic_priv;
2912
2913	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2914			   IGU_INT_DISABLE, 0);
2915}
2916
2917static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2918{
2919	struct cnic_local *cp = dev->cnic_priv;
2920
2921	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2922			IGU_INT_DISABLE, 0);
2923}
2924
2925static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2926{
2927	u32 last_status = *info->status_idx_ptr;
2928	int kcqe_cnt;
2929
2930	/* status block index must be read before reading the KCQ */
2931	rmb();
2932	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2933
2934		service_kcqes(dev, kcqe_cnt);
2935
2936		/* Tell compiler that sblk fields can change. */
2937		barrier();
2938
2939		last_status = *info->status_idx_ptr;
2940		/* status block index must be read before reading the KCQ */
2941		rmb();
2942	}
2943	return last_status;
2944}
2945
2946static void cnic_service_bnx2x_bh(unsigned long data)
2947{
2948	struct cnic_dev *dev = (struct cnic_dev *) data;
2949	struct cnic_local *cp = dev->cnic_priv;
2950	u32 status_idx, new_status_idx;
2951
2952	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2953		return;
2954
2955	while (1) {
2956		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2957
2958		CNIC_WR16(dev, cp->kcq1.io_addr,
2959			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2960
2961		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
2962			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2963					   status_idx, IGU_INT_ENABLE, 1);
2964			break;
2965		}
2966
2967		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2968
2969		if (new_status_idx != status_idx)
2970			continue;
2971
2972		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2973			  MAX_KCQ_IDX);
2974
2975		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2976				status_idx, IGU_INT_ENABLE, 1);
2977
2978		break;
2979	}
2980}
2981
2982static int cnic_service_bnx2x(void *data, void *status_blk)
2983{
2984	struct cnic_dev *dev = data;
2985	struct cnic_local *cp = dev->cnic_priv;
2986
2987	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2988		cnic_doirq(dev);
2989
2990	cnic_chk_pkt_rings(cp);
2991
2992	return 0;
2993}
2994
2995static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2996{
2997	struct cnic_ulp_ops *ulp_ops;
2998
2999	if (if_type == CNIC_ULP_ISCSI)
3000		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3001
3002	mutex_lock(&cnic_lock);
3003	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3004					    lockdep_is_held(&cnic_lock));
3005	if (!ulp_ops) {
3006		mutex_unlock(&cnic_lock);
3007		return;
3008	}
3009	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3010	mutex_unlock(&cnic_lock);
3011
3012	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3013		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3014
3015	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3016}
3017
3018static void cnic_ulp_stop(struct cnic_dev *dev)
3019{
3020	struct cnic_local *cp = dev->cnic_priv;
3021	int if_type;
3022
3023	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3024		cnic_ulp_stop_one(cp, if_type);
3025}
3026
3027static void cnic_ulp_start(struct cnic_dev *dev)
3028{
3029	struct cnic_local *cp = dev->cnic_priv;
3030	int if_type;
3031
3032	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3033		struct cnic_ulp_ops *ulp_ops;
3034
3035		mutex_lock(&cnic_lock);
3036		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3037						    lockdep_is_held(&cnic_lock));
3038		if (!ulp_ops || !ulp_ops->cnic_start) {
3039			mutex_unlock(&cnic_lock);
3040			continue;
3041		}
3042		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3043		mutex_unlock(&cnic_lock);
3044
3045		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3046			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3047
3048		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3049	}
3050}
3051
3052static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3053{
3054	struct cnic_dev *dev = data;
3055
3056	switch (info->cmd) {
3057	case CNIC_CTL_STOP_CMD:
3058		cnic_hold(dev);
3059
3060		cnic_ulp_stop(dev);
3061		cnic_stop_hw(dev);
3062
3063		cnic_put(dev);
3064		break;
3065	case CNIC_CTL_START_CMD:
3066		cnic_hold(dev);
3067
3068		if (!cnic_start_hw(dev))
3069			cnic_ulp_start(dev);
3070
3071		cnic_put(dev);
3072		break;
3073	case CNIC_CTL_STOP_ISCSI_CMD: {
3074		struct cnic_local *cp = dev->cnic_priv;
3075		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3076		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3077		break;
3078	}
3079	case CNIC_CTL_COMPLETION_CMD: {
3080		struct cnic_ctl_completion *comp = &info->data.comp;
3081		u32 cid = BNX2X_SW_CID(comp->cid);
3082		u32 l5_cid;
3083		struct cnic_local *cp = dev->cnic_priv;
3084
3085		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3086			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3087
3088			if (unlikely(comp->error)) {
3089				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3090				netdev_err(dev->netdev,
3091					   "CID %x CFC delete comp error %x\n",
3092					   cid, comp->error);
3093			}
3094
3095			ctx->wait_cond = 1;
3096			wake_up(&ctx->waitq);
3097		}
3098		break;
3099	}
3100	default:
3101		return -EINVAL;
3102	}
3103	return 0;
3104}
3105
3106static void cnic_ulp_init(struct cnic_dev *dev)
3107{
3108	int i;
3109	struct cnic_local *cp = dev->cnic_priv;
3110
3111	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3112		struct cnic_ulp_ops *ulp_ops;
3113
3114		mutex_lock(&cnic_lock);
3115		ulp_ops = cnic_ulp_tbl_prot(i);
3116		if (!ulp_ops || !ulp_ops->cnic_init) {
3117			mutex_unlock(&cnic_lock);
3118			continue;
3119		}
3120		ulp_get(ulp_ops);
3121		mutex_unlock(&cnic_lock);
3122
3123		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3124			ulp_ops->cnic_init(dev);
3125
3126		ulp_put(ulp_ops);
3127	}
3128}
3129
3130static void cnic_ulp_exit(struct cnic_dev *dev)
3131{
3132	int i;
3133	struct cnic_local *cp = dev->cnic_priv;
3134
3135	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3136		struct cnic_ulp_ops *ulp_ops;
3137
3138		mutex_lock(&cnic_lock);
3139		ulp_ops = cnic_ulp_tbl_prot(i);
3140		if (!ulp_ops || !ulp_ops->cnic_exit) {
3141			mutex_unlock(&cnic_lock);
3142			continue;
3143		}
3144		ulp_get(ulp_ops);
3145		mutex_unlock(&cnic_lock);
3146
3147		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3148			ulp_ops->cnic_exit(dev);
3149
3150		ulp_put(ulp_ops);
3151	}
3152}
3153
3154static int cnic_cm_offload_pg(struct cnic_sock *csk)
3155{
3156	struct cnic_dev *dev = csk->dev;
3157	struct l4_kwq_offload_pg *l4kwqe;
3158	struct kwqe *wqes[1];
3159
3160	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3161	memset(l4kwqe, 0, sizeof(*l4kwqe));
3162	wqes[0] = (struct kwqe *) l4kwqe;
3163
3164	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3165	l4kwqe->flags =
3166		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3167	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3168
3169	l4kwqe->da0 = csk->ha[0];
3170	l4kwqe->da1 = csk->ha[1];
3171	l4kwqe->da2 = csk->ha[2];
3172	l4kwqe->da3 = csk->ha[3];
3173	l4kwqe->da4 = csk->ha[4];
3174	l4kwqe->da5 = csk->ha[5];
3175
3176	l4kwqe->sa0 = dev->mac_addr[0];
3177	l4kwqe->sa1 = dev->mac_addr[1];
3178	l4kwqe->sa2 = dev->mac_addr[2];
3179	l4kwqe->sa3 = dev->mac_addr[3];
3180	l4kwqe->sa4 = dev->mac_addr[4];
3181	l4kwqe->sa5 = dev->mac_addr[5];
3182
3183	l4kwqe->etype = ETH_P_IP;
3184	l4kwqe->ipid_start = DEF_IPID_START;
3185	l4kwqe->host_opaque = csk->l5_cid;
3186
3187	if (csk->vlan_id) {
3188		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3189		l4kwqe->vlan_tag = csk->vlan_id;
3190		l4kwqe->l2hdr_nbytes += 4;
3191	}
3192
3193	return dev->submit_kwqes(dev, wqes, 1);
3194}
3195
3196static int cnic_cm_update_pg(struct cnic_sock *csk)
3197{
3198	struct cnic_dev *dev = csk->dev;
3199	struct l4_kwq_update_pg *l4kwqe;
3200	struct kwqe *wqes[1];
3201
3202	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3203	memset(l4kwqe, 0, sizeof(*l4kwqe));
3204	wqes[0] = (struct kwqe *) l4kwqe;
3205
3206	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3207	l4kwqe->flags =
3208		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3209	l4kwqe->pg_cid = csk->pg_cid;
3210
3211	l4kwqe->da0 = csk->ha[0];
3212	l4kwqe->da1 = csk->ha[1];
3213	l4kwqe->da2 = csk->ha[2];
3214	l4kwqe->da3 = csk->ha[3];
3215	l4kwqe->da4 = csk->ha[4];
3216	l4kwqe->da5 = csk->ha[5];
3217
3218	l4kwqe->pg_host_opaque = csk->l5_cid;
3219	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3220
3221	return dev->submit_kwqes(dev, wqes, 1);
3222}
3223
3224static int cnic_cm_upload_pg(struct cnic_sock *csk)
3225{
3226	struct cnic_dev *dev = csk->dev;
3227	struct l4_kwq_upload *l4kwqe;
3228	struct kwqe *wqes[1];
3229
3230	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3231	memset(l4kwqe, 0, sizeof(*l4kwqe));
3232	wqes[0] = (struct kwqe *) l4kwqe;
3233
3234	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3235	l4kwqe->flags =
3236		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3237	l4kwqe->cid = csk->pg_cid;
3238
3239	return dev->submit_kwqes(dev, wqes, 1);
3240}
3241
3242static int cnic_cm_conn_req(struct cnic_sock *csk)
3243{
3244	struct cnic_dev *dev = csk->dev;
3245	struct l4_kwq_connect_req1 *l4kwqe1;
3246	struct l4_kwq_connect_req2 *l4kwqe2;
3247	struct l4_kwq_connect_req3 *l4kwqe3;
3248	struct kwqe *wqes[3];
3249	u8 tcp_flags = 0;
3250	int num_wqes = 2;
3251
3252	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3253	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3254	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3255	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3256	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3257	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3258
3259	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3260	l4kwqe3->flags =
3261		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3262	l4kwqe3->ka_timeout = csk->ka_timeout;
3263	l4kwqe3->ka_interval = csk->ka_interval;
3264	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3265	l4kwqe3->tos = csk->tos;
3266	l4kwqe3->ttl = csk->ttl;
3267	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3268	l4kwqe3->pmtu = csk->mtu;
3269	l4kwqe3->rcv_buf = csk->rcv_buf;
3270	l4kwqe3->snd_buf = csk->snd_buf;
3271	l4kwqe3->seed = csk->seed;
3272
3273	wqes[0] = (struct kwqe *) l4kwqe1;
3274	if (test_bit(SK_F_IPV6, &csk->flags)) {
3275		wqes[1] = (struct kwqe *) l4kwqe2;
3276		wqes[2] = (struct kwqe *) l4kwqe3;
3277		num_wqes = 3;
3278
3279		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3280		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3281		l4kwqe2->flags =
3282			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3283			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3284		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3285		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3286		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3287		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3288		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3289		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3290		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3291			       sizeof(struct tcphdr);
3292	} else {
3293		wqes[1] = (struct kwqe *) l4kwqe3;
3294		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3295			       sizeof(struct tcphdr);
3296	}
3297
3298	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3299	l4kwqe1->flags =
3300		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3301		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3302	l4kwqe1->cid = csk->cid;
3303	l4kwqe1->pg_cid = csk->pg_cid;
3304	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3305	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3306	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3307	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3308	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3309		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3310	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3311		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3312	if (csk->tcp_flags & SK_TCP_NAGLE)
3313		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3314	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3315		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3316	if (csk->tcp_flags & SK_TCP_SACK)
3317		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3318	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3319		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3320
3321	l4kwqe1->tcp_flags = tcp_flags;
3322
3323	return dev->submit_kwqes(dev, wqes, num_wqes);
3324}
3325
3326static int cnic_cm_close_req(struct cnic_sock *csk)
3327{
3328	struct cnic_dev *dev = csk->dev;
3329	struct l4_kwq_close_req *l4kwqe;
3330	struct kwqe *wqes[1];
3331
3332	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3333	memset(l4kwqe, 0, sizeof(*l4kwqe));
3334	wqes[0] = (struct kwqe *) l4kwqe;
3335
3336	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3337	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3338	l4kwqe->cid = csk->cid;
3339
3340	return dev->submit_kwqes(dev, wqes, 1);
3341}
3342
3343static int cnic_cm_abort_req(struct cnic_sock *csk)
3344{
3345	struct cnic_dev *dev = csk->dev;
3346	struct l4_kwq_reset_req *l4kwqe;
3347	struct kwqe *wqes[1];
3348
3349	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3350	memset(l4kwqe, 0, sizeof(*l4kwqe));
3351	wqes[0] = (struct kwqe *) l4kwqe;
3352
3353	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3354	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3355	l4kwqe->cid = csk->cid;
3356
3357	return dev->submit_kwqes(dev, wqes, 1);
3358}
3359
3360static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3361			  u32 l5_cid, struct cnic_sock **csk, void *context)
3362{
3363	struct cnic_local *cp = dev->cnic_priv;
3364	struct cnic_sock *csk1;
3365
3366	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3367		return -EINVAL;
3368
3369	if (cp->ctx_tbl) {
3370		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3371
3372		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3373			return -EAGAIN;
3374	}
3375
3376	csk1 = &cp->csk_tbl[l5_cid];
3377	if (atomic_read(&csk1->ref_count))
3378		return -EAGAIN;
3379
3380	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3381		return -EBUSY;
3382
3383	csk1->dev = dev;
3384	csk1->cid = cid;
3385	csk1->l5_cid = l5_cid;
3386	csk1->ulp_type = ulp_type;
3387	csk1->context = context;
3388
3389	csk1->ka_timeout = DEF_KA_TIMEOUT;
3390	csk1->ka_interval = DEF_KA_INTERVAL;
3391	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3392	csk1->tos = DEF_TOS;
3393	csk1->ttl = DEF_TTL;
3394	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3395	csk1->rcv_buf = DEF_RCV_BUF;
3396	csk1->snd_buf = DEF_SND_BUF;
3397	csk1->seed = DEF_SEED;
3398
3399	*csk = csk1;
3400	return 0;
3401}
3402
3403static void cnic_cm_cleanup(struct cnic_sock *csk)
3404{
3405	if (csk->src_port) {
3406		struct cnic_dev *dev = csk->dev;
3407		struct cnic_local *cp = dev->cnic_priv;
3408
3409		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3410		csk->src_port = 0;
3411	}
3412}
3413
3414static void cnic_close_conn(struct cnic_sock *csk)
3415{
3416	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3417		cnic_cm_upload_pg(csk);
3418		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3419	}
3420	cnic_cm_cleanup(csk);
3421}
3422
3423static int cnic_cm_destroy(struct cnic_sock *csk)
3424{
3425	if (!cnic_in_use(csk))
3426		return -EINVAL;
3427
3428	csk_hold(csk);
3429	clear_bit(SK_F_INUSE, &csk->flags);
3430	smp_mb__after_clear_bit();
3431	while (atomic_read(&csk->ref_count) != 1)
3432		msleep(1);
3433	cnic_cm_cleanup(csk);
3434
3435	csk->flags = 0;
3436	csk_put(csk);
3437	return 0;
3438}
3439
3440static inline u16 cnic_get_vlan(struct net_device *dev,
3441				struct net_device **vlan_dev)
3442{
3443	if (dev->priv_flags & IFF_802_1Q_VLAN) {
3444		*vlan_dev = vlan_dev_real_dev(dev);
3445		return vlan_dev_vlan_id(dev);
3446	}
3447	*vlan_dev = dev;
3448	return 0;
3449}
3450
3451static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3452			     struct dst_entry **dst)
3453{
3454#if defined(CONFIG_INET)
3455	struct rtable *rt;
3456
3457	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3458	if (!IS_ERR(rt)) {
3459		*dst = &rt->dst;
3460		return 0;
3461	}
3462	return PTR_ERR(rt);
3463#else
3464	return -ENETUNREACH;
3465#endif
3466}
3467
3468static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3469			     struct dst_entry **dst)
3470{
3471#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3472	struct flowi6 fl6;
3473
3474	memset(&fl6, 0, sizeof(fl6));
3475	ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3476	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3477		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3478
3479	*dst = ip6_route_output(&init_net, NULL, &fl6);
3480	if (*dst)
3481		return 0;
3482#endif
3483
3484	return -ENETUNREACH;
3485}
3486
3487static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3488					   int ulp_type)
3489{
3490	struct cnic_dev *dev = NULL;
3491	struct dst_entry *dst;
3492	struct net_device *netdev = NULL;
3493	int err = -ENETUNREACH;
3494
3495	if (dst_addr->sin_family == AF_INET)
3496		err = cnic_get_v4_route(dst_addr, &dst);
3497	else if (dst_addr->sin_family == AF_INET6) {
3498		struct sockaddr_in6 *dst_addr6 =
3499			(struct sockaddr_in6 *) dst_addr;
3500
3501		err = cnic_get_v6_route(dst_addr6, &dst);
3502	} else
3503		return NULL;
3504
3505	if (err)
3506		return NULL;
3507
3508	if (!dst->dev)
3509		goto done;
3510
3511	cnic_get_vlan(dst->dev, &netdev);
3512
3513	dev = cnic_from_netdev(netdev);
3514
3515done:
3516	dst_release(dst);
3517	if (dev)
3518		cnic_put(dev);
3519	return dev;
3520}
3521
3522static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3523{
3524	struct cnic_dev *dev = csk->dev;
3525	struct cnic_local *cp = dev->cnic_priv;
3526
3527	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3528}
3529
3530static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3531{
3532	struct cnic_dev *dev = csk->dev;
3533	struct cnic_local *cp = dev->cnic_priv;
3534	int is_v6, rc = 0;
3535	struct dst_entry *dst = NULL;
3536	struct net_device *realdev;
3537	__be16 local_port;
3538	u32 port_id;
3539
3540	if (saddr->local.v6.sin6_family == AF_INET6 &&
3541	    saddr->remote.v6.sin6_family == AF_INET6)
3542		is_v6 = 1;
3543	else if (saddr->local.v4.sin_family == AF_INET &&
3544		 saddr->remote.v4.sin_family == AF_INET)
3545		is_v6 = 0;
3546	else
3547		return -EINVAL;
3548
3549	clear_bit(SK_F_IPV6, &csk->flags);
3550
3551	if (is_v6) {
3552		set_bit(SK_F_IPV6, &csk->flags);
3553		cnic_get_v6_route(&saddr->remote.v6, &dst);
3554
3555		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3556		       sizeof(struct in6_addr));
3557		csk->dst_port = saddr->remote.v6.sin6_port;
3558		local_port = saddr->local.v6.sin6_port;
3559
3560	} else {
3561		cnic_get_v4_route(&saddr->remote.v4, &dst);
3562
3563		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3564		csk->dst_port = saddr->remote.v4.sin_port;
3565		local_port = saddr->local.v4.sin_port;
3566	}
3567
3568	csk->vlan_id = 0;
3569	csk->mtu = dev->netdev->mtu;
3570	if (dst && dst->dev) {
3571		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3572		if (realdev == dev->netdev) {
3573			csk->vlan_id = vlan;
3574			csk->mtu = dst_mtu(dst);
3575		}
3576	}
3577
3578	port_id = be16_to_cpu(local_port);
3579	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3580	    port_id < CNIC_LOCAL_PORT_MAX) {
3581		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3582			port_id = 0;
3583	} else
3584		port_id = 0;
3585
3586	if (!port_id) {
3587		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3588		if (port_id == -1) {
3589			rc = -ENOMEM;
3590			goto err_out;
3591		}
3592		local_port = cpu_to_be16(port_id);
3593	}
3594	csk->src_port = local_port;
3595
3596err_out:
3597	dst_release(dst);
3598	return rc;
3599}
3600
3601static void cnic_init_csk_state(struct cnic_sock *csk)
3602{
3603	csk->state = 0;
3604	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3605	clear_bit(SK_F_CLOSING, &csk->flags);
3606}
3607
3608static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3609{
3610	struct cnic_local *cp = csk->dev->cnic_priv;
3611	int err = 0;
3612
3613	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3614		return -EOPNOTSUPP;
3615
3616	if (!cnic_in_use(csk))
3617		return -EINVAL;
3618
3619	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3620		return -EINVAL;
3621
3622	cnic_init_csk_state(csk);
3623
3624	err = cnic_get_route(csk, saddr);
3625	if (err)
3626		goto err_out;
3627
3628	err = cnic_resolve_addr(csk, saddr);
3629	if (!err)
3630		return 0;
3631
3632err_out:
3633	clear_bit(SK_F_CONNECT_START, &csk->flags);
3634	return err;
3635}
3636
3637static int cnic_cm_abort(struct cnic_sock *csk)
3638{
3639	struct cnic_local *cp = csk->dev->cnic_priv;
3640	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3641
3642	if (!cnic_in_use(csk))
3643		return -EINVAL;
3644
3645	if (cnic_abort_prep(csk))
3646		return cnic_cm_abort_req(csk);
3647
3648	/* Getting here means that we haven't started connect, or
3649	 * connect was not successful.
3650	 */
3651
3652	cp->close_conn(csk, opcode);
3653	if (csk->state != opcode)
3654		return -EALREADY;
3655
3656	return 0;
3657}
3658
3659static int cnic_cm_close(struct cnic_sock *csk)
3660{
3661	if (!cnic_in_use(csk))
3662		return -EINVAL;
3663
3664	if (cnic_close_prep(csk)) {
3665		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3666		return cnic_cm_close_req(csk);
3667	} else {
3668		return -EALREADY;
3669	}
3670	return 0;
3671}
3672
3673static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3674			   u8 opcode)
3675{
3676	struct cnic_ulp_ops *ulp_ops;
3677	int ulp_type = csk->ulp_type;
3678
3679	rcu_read_lock();
3680	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3681	if (ulp_ops) {
3682		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3683			ulp_ops->cm_connect_complete(csk);
3684		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3685			ulp_ops->cm_close_complete(csk);
3686		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3687			ulp_ops->cm_remote_abort(csk);
3688		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3689			ulp_ops->cm_abort_complete(csk);
3690		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3691			ulp_ops->cm_remote_close(csk);
3692	}
3693	rcu_read_unlock();
3694}
3695
3696static int cnic_cm_set_pg(struct cnic_sock *csk)
3697{
3698	if (cnic_offld_prep(csk)) {
3699		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3700			cnic_cm_update_pg(csk);
3701		else
3702			cnic_cm_offload_pg(csk);
3703	}
3704	return 0;
3705}
3706
3707static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3708{
3709	struct cnic_local *cp = dev->cnic_priv;
3710	u32 l5_cid = kcqe->pg_host_opaque;
3711	u8 opcode = kcqe->op_code;
3712	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3713
3714	csk_hold(csk);
3715	if (!cnic_in_use(csk))
3716		goto done;
3717
3718	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3719		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3720		goto done;
3721	}
3722	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3723	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3724		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3725		cnic_cm_upcall(cp, csk,
3726			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3727		goto done;
3728	}
3729
3730	csk->pg_cid = kcqe->pg_cid;
3731	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3732	cnic_cm_conn_req(csk);
3733
3734done:
3735	csk_put(csk);
3736}
3737
3738static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3739{
3740	struct cnic_local *cp = dev->cnic_priv;
3741	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3742	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3743	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3744
3745	ctx->timestamp = jiffies;
3746	ctx->wait_cond = 1;
3747	wake_up(&ctx->waitq);
3748}
3749
3750static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3751{
3752	struct cnic_local *cp = dev->cnic_priv;
3753	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3754	u8 opcode = l4kcqe->op_code;
3755	u32 l5_cid;
3756	struct cnic_sock *csk;
3757
3758	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3759		cnic_process_fcoe_term_conn(dev, kcqe);
3760		return;
3761	}
3762	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3763	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3764		cnic_cm_process_offld_pg(dev, l4kcqe);
3765		return;
3766	}
3767
3768	l5_cid = l4kcqe->conn_id;
3769	if (opcode & 0x80)
3770		l5_cid = l4kcqe->cid;
3771	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3772		return;
3773
3774	csk = &cp->csk_tbl[l5_cid];
3775	csk_hold(csk);
3776
3777	if (!cnic_in_use(csk)) {
3778		csk_put(csk);
3779		return;
3780	}
3781
3782	switch (opcode) {
3783	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3784		if (l4kcqe->status != 0) {
3785			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3786			cnic_cm_upcall(cp, csk,
3787				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3788		}
3789		break;
3790	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3791		if (l4kcqe->status == 0)
3792			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3793
3794		smp_mb__before_clear_bit();
3795		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3796		cnic_cm_upcall(cp, csk, opcode);
3797		break;
3798
3799	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3800	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3801	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3802	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3803	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3804		cp->close_conn(csk, opcode);
3805		break;
3806
3807	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3808		/* after we already sent CLOSE_REQ */
3809		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3810		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3811		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3812			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3813		else
3814			cnic_cm_upcall(cp, csk, opcode);
3815		break;
3816	}
3817	csk_put(csk);
3818}
3819
3820static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3821{
3822	struct cnic_dev *dev = data;
3823	int i;
3824
3825	for (i = 0; i < num; i++)
3826		cnic_cm_process_kcqe(dev, kcqe[i]);
3827}
3828
3829static struct cnic_ulp_ops cm_ulp_ops = {
3830	.indicate_kcqes		= cnic_cm_indicate_kcqe,
3831};
3832
3833static void cnic_cm_free_mem(struct cnic_dev *dev)
3834{
3835	struct cnic_local *cp = dev->cnic_priv;
3836
3837	kfree(cp->csk_tbl);
3838	cp->csk_tbl = NULL;
3839	cnic_free_id_tbl(&cp->csk_port_tbl);
3840}
3841
3842static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3843{
3844	struct cnic_local *cp = dev->cnic_priv;
3845	u32 port_id;
3846
3847	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3848			      GFP_KERNEL);
3849	if (!cp->csk_tbl)
3850		return -ENOMEM;
3851
3852	port_id = random32();
3853	port_id %= CNIC_LOCAL_PORT_RANGE;
3854	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3855			     CNIC_LOCAL_PORT_MIN, port_id)) {
3856		cnic_cm_free_mem(dev);
3857		return -ENOMEM;
3858	}
3859	return 0;
3860}
3861
3862static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3863{
3864	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3865		/* Unsolicited RESET_COMP or RESET_RECEIVED */
3866		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3867		csk->state = opcode;
3868	}
3869
3870	/* 1. If event opcode matches the expected event in csk->state
3871	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3872	 *    event
3873	 * 3. If the expected event is 0, meaning the connection was never
3874	 *    never established, we accept the opcode from cm_abort.
3875	 */
3876	if (opcode == csk->state || csk->state == 0 ||
3877	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
3878	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3879		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3880			if (csk->state == 0)
3881				csk->state = opcode;
3882			return 1;
3883		}
3884	}
3885	return 0;
3886}
3887
3888static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3889{
3890	struct cnic_dev *dev = csk->dev;
3891	struct cnic_local *cp = dev->cnic_priv;
3892
3893	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3894		cnic_cm_upcall(cp, csk, opcode);
3895		return;
3896	}
3897
3898	clear_bit(SK_F_CONNECT_START, &csk->flags);
3899	cnic_close_conn(csk);
3900	csk->state = opcode;
3901	cnic_cm_upcall(cp, csk, opcode);
3902}
3903
3904static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3905{
3906}
3907
3908static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3909{
3910	u32 seed;
3911
3912	seed = random32();
3913	cnic_ctx_wr(dev, 45, 0, seed);
3914	return 0;
3915}
3916
3917static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3918{
3919	struct cnic_dev *dev = csk->dev;
3920	struct cnic_local *cp = dev->cnic_priv;
3921	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3922	union l5cm_specific_data l5_data;
3923	u32 cmd = 0;
3924	int close_complete = 0;
3925
3926	switch (opcode) {
3927	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3928	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3929	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3930		if (cnic_ready_to_close(csk, opcode)) {
3931			if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3932				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3933			else
3934				close_complete = 1;
3935		}
3936		break;
3937	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3938		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3939		break;
3940	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3941		close_complete = 1;
3942		break;
3943	}
3944	if (cmd) {
3945		memset(&l5_data, 0, sizeof(l5_data));
3946
3947		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3948				    &l5_data);
3949	} else if (close_complete) {
3950		ctx->timestamp = jiffies;
3951		cnic_close_conn(csk);
3952		cnic_cm_upcall(cp, csk, csk->state);
3953	}
3954}
3955
3956static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3957{
3958	struct cnic_local *cp = dev->cnic_priv;
3959
3960	if (!cp->ctx_tbl)
3961		return;
3962
3963	if (!netif_running(dev->netdev))
3964		return;
3965
3966	cnic_bnx2x_delete_wait(dev, 0);
3967
3968	cancel_delayed_work(&cp->delete_task);
3969	flush_workqueue(cnic_wq);
3970
3971	if (atomic_read(&cp->iscsi_conn) != 0)
3972		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3973			    atomic_read(&cp->iscsi_conn));
3974}
3975
3976static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3977{
3978	struct cnic_local *cp = dev->cnic_priv;
3979	u32 pfid = cp->pfid;
3980	u32 port = CNIC_PORT(cp);
3981
3982	cnic_init_bnx2x_mac(dev);
3983	cnic_bnx2x_set_tcp_timestamp(dev, 1);
3984
3985	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3986		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3987
3988	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3989		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3990	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3991		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3992		DEF_MAX_DA_COUNT);
3993
3994	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3995		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3996	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3997		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3998	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3999		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4000	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4001		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4002
4003	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4004		DEF_MAX_CWND);
4005	return 0;
4006}
4007
4008static void cnic_delete_task(struct work_struct *work)
4009{
4010	struct cnic_local *cp;
4011	struct cnic_dev *dev;
4012	u32 i;
4013	int need_resched = 0;
4014
4015	cp = container_of(work, struct cnic_local, delete_task.work);
4016	dev = cp->dev;
4017
4018	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4019		struct drv_ctl_info info;
4020
4021		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4022
4023		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4024		cp->ethdev->drv_ctl(dev->netdev, &info);
4025	}
4026
4027	for (i = 0; i < cp->max_cid_space; i++) {
4028		struct cnic_context *ctx = &cp->ctx_tbl[i];
4029		int err;
4030
4031		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4032		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4033			continue;
4034
4035		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4036			need_resched = 1;
4037			continue;
4038		}
4039
4040		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4041			continue;
4042
4043		err = cnic_bnx2x_destroy_ramrod(dev, i);
4044
4045		cnic_free_bnx2x_conn_resc(dev, i);
4046		if (!err) {
4047			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4048				atomic_dec(&cp->iscsi_conn);
4049
4050			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4051		}
4052	}
4053
4054	if (need_resched)
4055		queue_delayed_work(cnic_wq, &cp->delete_task,
4056				   msecs_to_jiffies(10));
4057
4058}
4059
4060static int cnic_cm_open(struct cnic_dev *dev)
4061{
4062	struct cnic_local *cp = dev->cnic_priv;
4063	int err;
4064
4065	err = cnic_cm_alloc_mem(dev);
4066	if (err)
4067		return err;
4068
4069	err = cp->start_cm(dev);
4070
4071	if (err)
4072		goto err_out;
4073
4074	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4075
4076	dev->cm_create = cnic_cm_create;
4077	dev->cm_destroy = cnic_cm_destroy;
4078	dev->cm_connect = cnic_cm_connect;
4079	dev->cm_abort = cnic_cm_abort;
4080	dev->cm_close = cnic_cm_close;
4081	dev->cm_select_dev = cnic_cm_select_dev;
4082
4083	cp->ulp_handle[CNIC_ULP_L4] = dev;
4084	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4085	return 0;
4086
4087err_out:
4088	cnic_cm_free_mem(dev);
4089	return err;
4090}
4091
4092static int cnic_cm_shutdown(struct cnic_dev *dev)
4093{
4094	struct cnic_local *cp = dev->cnic_priv;
4095	int i;
4096
4097	cp->stop_cm(dev);
4098
4099	if (!cp->csk_tbl)
4100		return 0;
4101
4102	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4103		struct cnic_sock *csk = &cp->csk_tbl[i];
4104
4105		clear_bit(SK_F_INUSE, &csk->flags);
4106		cnic_cm_cleanup(csk);
4107	}
4108	cnic_cm_free_mem(dev);
4109
4110	return 0;
4111}
4112
4113static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4114{
4115	u32 cid_addr;
4116	int i;
4117
4118	cid_addr = GET_CID_ADDR(cid);
4119
4120	for (i = 0; i < CTX_SIZE; i += 4)
4121		cnic_ctx_wr(dev, cid_addr, i, 0);
4122}
4123
4124static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4125{
4126	struct cnic_local *cp = dev->cnic_priv;
4127	int ret = 0, i;
4128	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4129
4130	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4131		return 0;
4132
4133	for (i = 0; i < cp->ctx_blks; i++) {
4134		int j;
4135		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4136		u32 val;
4137
4138		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4139
4140		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4141			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4142		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4143			(u64) cp->ctx_arr[i].mapping >> 32);
4144		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4145			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4146		for (j = 0; j < 10; j++) {
4147
4148			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4149			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4150				break;
4151			udelay(5);
4152		}
4153		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4154			ret = -EBUSY;
4155			break;
4156		}
4157	}
4158	return ret;
4159}
4160
4161static void cnic_free_irq(struct cnic_dev *dev)
4162{
4163	struct cnic_local *cp = dev->cnic_priv;
4164	struct cnic_eth_dev *ethdev = cp->ethdev;
4165
4166	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4167		cp->disable_int_sync(dev);
4168		tasklet_kill(&cp->cnic_irq_task);
4169		free_irq(ethdev->irq_arr[0].vector, dev);
4170	}
4171}
4172
4173static int cnic_request_irq(struct cnic_dev *dev)
4174{
4175	struct cnic_local *cp = dev->cnic_priv;
4176	struct cnic_eth_dev *ethdev = cp->ethdev;
4177	int err;
4178
4179	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4180	if (err)
4181		tasklet_disable(&cp->cnic_irq_task);
4182
4183	return err;
4184}
4185
4186static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4187{
4188	struct cnic_local *cp = dev->cnic_priv;
4189	struct cnic_eth_dev *ethdev = cp->ethdev;
4190
4191	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4192		int err, i = 0;
4193		int sblk_num = cp->status_blk_num;
4194		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4195			   BNX2_HC_SB_CONFIG_1;
4196
4197		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4198
4199		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4200		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4201		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4202
4203		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4204		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4205			     (unsigned long) dev);
4206		err = cnic_request_irq(dev);
4207		if (err)
4208			return err;
4209
4210		while (cp->status_blk.bnx2->status_completion_producer_index &&
4211		       i < 10) {
4212			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4213				1 << (11 + sblk_num));
4214			udelay(10);
4215			i++;
4216			barrier();
4217		}
4218		if (cp->status_blk.bnx2->status_completion_producer_index) {
4219			cnic_free_irq(dev);
4220			goto failed;
4221		}
4222
4223	} else {
4224		struct status_block *sblk = cp->status_blk.gen;
4225		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4226		int i = 0;
4227
4228		while (sblk->status_completion_producer_index && i < 10) {
4229			CNIC_WR(dev, BNX2_HC_COMMAND,
4230				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4231			udelay(10);
4232			i++;
4233			barrier();
4234		}
4235		if (sblk->status_completion_producer_index)
4236			goto failed;
4237
4238	}
4239	return 0;
4240
4241failed:
4242	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4243	return -EBUSY;
4244}
4245
4246static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4247{
4248	struct cnic_local *cp = dev->cnic_priv;
4249	struct cnic_eth_dev *ethdev = cp->ethdev;
4250
4251	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4252		return;
4253
4254	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4255		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4256}
4257
4258static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4259{
4260	struct cnic_local *cp = dev->cnic_priv;
4261	struct cnic_eth_dev *ethdev = cp->ethdev;
4262
4263	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4264		return;
4265
4266	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4267		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4268	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4269	synchronize_irq(ethdev->irq_arr[0].vector);
4270}
4271
4272static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4273{
4274	struct cnic_local *cp = dev->cnic_priv;
4275	struct cnic_eth_dev *ethdev = cp->ethdev;
4276	struct cnic_uio_dev *udev = cp->udev;
4277	u32 cid_addr, tx_cid, sb_id;
4278	u32 val, offset0, offset1, offset2, offset3;
4279	int i;
4280	struct tx_bd *txbd;
4281	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4282	struct status_block *s_blk = cp->status_blk.gen;
4283
4284	sb_id = cp->status_blk_num;
4285	tx_cid = 20;
4286	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4287	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4288		struct status_block_msix *sblk = cp->status_blk.bnx2;
4289
4290		tx_cid = TX_TSS_CID + sb_id - 1;
4291		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4292			(TX_TSS_CID << 7));
4293		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4294	}
4295	cp->tx_cons = *cp->tx_cons_ptr;
4296
4297	cid_addr = GET_CID_ADDR(tx_cid);
4298	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4299		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4300
4301		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4302			cnic_ctx_wr(dev, cid_addr2, i, 0);
4303
4304		offset0 = BNX2_L2CTX_TYPE_XI;
4305		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4306		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4307		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4308	} else {
4309		cnic_init_context(dev, tx_cid);
4310		cnic_init_context(dev, tx_cid + 1);
4311
4312		offset0 = BNX2_L2CTX_TYPE;
4313		offset1 = BNX2_L2CTX_CMD_TYPE;
4314		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4315		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4316	}
4317	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4318	cnic_ctx_wr(dev, cid_addr, offset0, val);
4319
4320	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4321	cnic_ctx_wr(dev, cid_addr, offset1, val);
4322
4323	txbd = udev->l2_ring;
4324
4325	buf_map = udev->l2_buf_map;
4326	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4327		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4328		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4329	}
4330	val = (u64) ring_map >> 32;
4331	cnic_ctx_wr(dev, cid_addr, offset2, val);
4332	txbd->tx_bd_haddr_hi = val;
4333
4334	val = (u64) ring_map & 0xffffffff;
4335	cnic_ctx_wr(dev, cid_addr, offset3, val);
4336	txbd->tx_bd_haddr_lo = val;
4337}
4338
4339static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4340{
4341	struct cnic_local *cp = dev->cnic_priv;
4342	struct cnic_eth_dev *ethdev = cp->ethdev;
4343	struct cnic_uio_dev *udev = cp->udev;
4344	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4345	int i;
4346	struct rx_bd *rxbd;
4347	struct status_block *s_blk = cp->status_blk.gen;
4348	dma_addr_t ring_map = udev->l2_ring_map;
4349
4350	sb_id = cp->status_blk_num;
4351	cnic_init_context(dev, 2);
4352	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4353	coal_reg = BNX2_HC_COMMAND;
4354	coal_val = CNIC_RD(dev, coal_reg);
4355	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4356		struct status_block_msix *sblk = cp->status_blk.bnx2;
4357
4358		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4359		coal_reg = BNX2_HC_COALESCE_NOW;
4360		coal_val = 1 << (11 + sb_id);
4361	}
4362	i = 0;
4363	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4364		CNIC_WR(dev, coal_reg, coal_val);
4365		udelay(10);
4366		i++;
4367		barrier();
4368	}
4369	cp->rx_cons = *cp->rx_cons_ptr;
4370
4371	cid_addr = GET_CID_ADDR(2);
4372	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4373	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4374	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4375
4376	if (sb_id == 0)
4377		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4378	else
4379		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4380	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4381
4382	rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4383	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4384		dma_addr_t buf_map;
4385		int n = (i % cp->l2_rx_ring_size) + 1;
4386
4387		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4388		rxbd->rx_bd_len = cp->l2_single_buf_size;
4389		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4390		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4391		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4392	}
4393	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4394	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4395	rxbd->rx_bd_haddr_hi = val;
4396
4397	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4398	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4399	rxbd->rx_bd_haddr_lo = val;
4400
4401	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4402	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4403}
4404
4405static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4406{
4407	struct kwqe *wqes[1], l2kwqe;
4408
4409	memset(&l2kwqe, 0, sizeof(l2kwqe));
4410	wqes[0] = &l2kwqe;
4411	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4412			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4413			       KWQE_OPCODE_SHIFT) | 2;
4414	dev->submit_kwqes(dev, wqes, 1);
4415}
4416
4417static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4418{
4419	struct cnic_local *cp = dev->cnic_priv;
4420	u32 val;
4421
4422	val = cp->func << 2;
4423
4424	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4425
4426	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4427			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4428	dev->mac_addr[0] = (u8) (val >> 8);
4429	dev->mac_addr[1] = (u8) val;
4430
4431	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4432
4433	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4434			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4435	dev->mac_addr[2] = (u8) (val >> 24);
4436	dev->mac_addr[3] = (u8) (val >> 16);
4437	dev->mac_addr[4] = (u8) (val >> 8);
4438	dev->mac_addr[5] = (u8) val;
4439
4440	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4441
4442	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4443	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4444		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4445
4446	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4447	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4448	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4449}
4450
4451static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4452{
4453	struct cnic_local *cp = dev->cnic_priv;
4454	struct cnic_eth_dev *ethdev = cp->ethdev;
4455	struct status_block *sblk = cp->status_blk.gen;
4456	u32 val, kcq_cid_addr, kwq_cid_addr;
4457	int err;
4458
4459	cnic_set_bnx2_mac(dev);
4460
4461	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4462	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4463	if (BCM_PAGE_BITS > 12)
4464		val |= (12 - 8)  << 4;
4465	else
4466		val |= (BCM_PAGE_BITS - 8)  << 4;
4467
4468	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4469
4470	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4471	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4472	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4473
4474	err = cnic_setup_5709_context(dev, 1);
4475	if (err)
4476		return err;
4477
4478	cnic_init_context(dev, KWQ_CID);
4479	cnic_init_context(dev, KCQ_CID);
4480
4481	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4482	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4483
4484	cp->max_kwq_idx = MAX_KWQ_IDX;
4485	cp->kwq_prod_idx = 0;
4486	cp->kwq_con_idx = 0;
4487	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4488
4489	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4490		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4491	else
4492		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4493
4494	/* Initialize the kernel work queue context. */
4495	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4496	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4497	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4498
4499	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4500	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4501
4502	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4503	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4504
4505	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4506	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4507
4508	val = (u32) cp->kwq_info.pgtbl_map;
4509	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4510
4511	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4512	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4513
4514	cp->kcq1.sw_prod_idx = 0;
4515	cp->kcq1.hw_prod_idx_ptr =
4516		(u16 *) &sblk->status_completion_producer_index;
4517
4518	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4519
4520	/* Initialize the kernel complete queue context. */
4521	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4522	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4523	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4524
4525	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4526	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4527
4528	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4529	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4530
4531	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4532	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4533
4534	val = (u32) cp->kcq1.dma.pgtbl_map;
4535	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4536
4537	cp->int_num = 0;
4538	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4539		struct status_block_msix *msblk = cp->status_blk.bnx2;
4540		u32 sb_id = cp->status_blk_num;
4541		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4542
4543		cp->kcq1.hw_prod_idx_ptr =
4544			(u16 *) &msblk->status_completion_producer_index;
4545		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4546		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4547		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4548		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4549		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4550	}
4551
4552	/* Enable Commnad Scheduler notification when we write to the
4553	 * host producer index of the kernel contexts. */
4554	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4555
4556	/* Enable Command Scheduler notification when we write to either
4557	 * the Send Queue or Receive Queue producer indexes of the kernel
4558	 * bypass contexts. */
4559	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4560	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4561
4562	/* Notify COM when the driver post an application buffer. */
4563	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4564
4565	/* Set the CP and COM doorbells.  These two processors polls the
4566	 * doorbell for a non zero value before running.  This must be done
4567	 * after setting up the kernel queue contexts. */
4568	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4569	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4570
4571	cnic_init_bnx2_tx_ring(dev);
4572	cnic_init_bnx2_rx_ring(dev);
4573
4574	err = cnic_init_bnx2_irq(dev);
4575	if (err) {
4576		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4577		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4578		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4579		return err;
4580	}
4581
4582	return 0;
4583}
4584
4585static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4586{
4587	struct cnic_local *cp = dev->cnic_priv;
4588	struct cnic_eth_dev *ethdev = cp->ethdev;
4589	u32 start_offset = ethdev->ctx_tbl_offset;
4590	int i;
4591
4592	for (i = 0; i < cp->ctx_blks; i++) {
4593		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4594		dma_addr_t map = ctx->mapping;
4595
4596		if (cp->ctx_align) {
4597			unsigned long mask = cp->ctx_align - 1;
4598
4599			map = (map + mask) & ~mask;
4600		}
4601
4602		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4603	}
4604}
4605
4606static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4607{
4608	struct cnic_local *cp = dev->cnic_priv;
4609	struct cnic_eth_dev *ethdev = cp->ethdev;
4610	int err = 0;
4611
4612	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4613		     (unsigned long) dev);
4614	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4615		err = cnic_request_irq(dev);
4616
4617	return err;
4618}
4619
4620static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4621						u16 sb_id, u8 sb_index,
4622						u8 disable)
4623{
4624
4625	u32 addr = BAR_CSTRORM_INTMEM +
4626			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4627			offsetof(struct hc_status_block_data_e1x, index_data) +
4628			sizeof(struct hc_index_data)*sb_index +
4629			offsetof(struct hc_index_data, flags);
4630	u16 flags = CNIC_RD16(dev, addr);
4631	/* clear and set */
4632	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4633	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4634		  HC_INDEX_DATA_HC_ENABLED);
4635	CNIC_WR16(dev, addr, flags);
4636}
4637
4638static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4639{
4640	struct cnic_local *cp = dev->cnic_priv;
4641	u8 sb_id = cp->status_blk_num;
4642
4643	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4644			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4645			offsetof(struct hc_status_block_data_e1x, index_data) +
4646			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4647			offsetof(struct hc_index_data, timeout), 64 / 4);
4648	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4649}
4650
4651static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4652{
4653}
4654
4655static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4656				    struct client_init_ramrod_data *data)
4657{
4658	struct cnic_local *cp = dev->cnic_priv;
4659	struct cnic_uio_dev *udev = cp->udev;
4660	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4661	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4662	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4663	int i;
4664	u32 cli = cp->ethdev->iscsi_l2_client_id;
4665	u32 val;
4666
4667	memset(txbd, 0, BCM_PAGE_SIZE);
4668
4669	buf_map = udev->l2_buf_map;
4670	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4671		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4672		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4673
4674		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4675		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4676		reg_bd->addr_hi = start_bd->addr_hi;
4677		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4678		start_bd->nbytes = cpu_to_le16(0x10);
4679		start_bd->nbd = cpu_to_le16(3);
4680		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4681		start_bd->general_data = (UNICAST_ADDRESS <<
4682			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4683		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4684
4685	}
4686
4687	val = (u64) ring_map >> 32;
4688	txbd->next_bd.addr_hi = cpu_to_le32(val);
4689
4690	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4691
4692	val = (u64) ring_map & 0xffffffff;
4693	txbd->next_bd.addr_lo = cpu_to_le32(val);
4694
4695	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4696
4697	/* Other ramrod params */
4698	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4699	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4700
4701	/* reset xstorm per client statistics */
4702	if (cli < MAX_STAT_COUNTER_ID) {
4703		data->general.statistics_zero_flg = 1;
4704		data->general.statistics_en_flg = 1;
4705		data->general.statistics_counter_id = cli;
4706	}
4707
4708	cp->tx_cons_ptr =
4709		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4710}
4711
4712static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4713				    struct client_init_ramrod_data *data)
4714{
4715	struct cnic_local *cp = dev->cnic_priv;
4716	struct cnic_uio_dev *udev = cp->udev;
4717	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4718				BCM_PAGE_SIZE);
4719	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4720				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
4721	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4722	int i;
4723	u32 cli = cp->ethdev->iscsi_l2_client_id;
4724	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4725	u32 val;
4726	dma_addr_t ring_map = udev->l2_ring_map;
4727
4728	/* General data */
4729	data->general.client_id = cli;
4730	data->general.activate_flg = 1;
4731	data->general.sp_client_id = cli;
4732	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4733	data->general.func_id = cp->pfid;
4734
4735	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4736		dma_addr_t buf_map;
4737		int n = (i % cp->l2_rx_ring_size) + 1;
4738
4739		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4740		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4741		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4742	}
4743
4744	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4745	rxbd->addr_hi = cpu_to_le32(val);
4746	data->rx.bd_page_base.hi = cpu_to_le32(val);
4747
4748	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4749	rxbd->addr_lo = cpu_to_le32(val);
4750	data->rx.bd_page_base.lo = cpu_to_le32(val);
4751
4752	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4753	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4754	rxcqe->addr_hi = cpu_to_le32(val);
4755	data->rx.cqe_page_base.hi = cpu_to_le32(val);
4756
4757	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4758	rxcqe->addr_lo = cpu_to_le32(val);
4759	data->rx.cqe_page_base.lo = cpu_to_le32(val);
4760
4761	/* Other ramrod params */
4762	data->rx.client_qzone_id = cl_qzone_id;
4763	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4764	data->rx.status_block_id = BNX2X_DEF_SB_ID;
4765
4766	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4767
4768	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4769	data->rx.outer_vlan_removal_enable_flg = 1;
4770	data->rx.silent_vlan_removal_flg = 1;
4771	data->rx.silent_vlan_value = 0;
4772	data->rx.silent_vlan_mask = 0xffff;
4773
4774	cp->rx_cons_ptr =
4775		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4776	cp->rx_cons = *cp->rx_cons_ptr;
4777}
4778
4779static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4780{
4781	struct cnic_local *cp = dev->cnic_priv;
4782	u32 pfid = cp->pfid;
4783
4784	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4785			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4786	cp->kcq1.sw_prod_idx = 0;
4787
4788	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4789		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4790
4791		cp->kcq1.hw_prod_idx_ptr =
4792			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4793		cp->kcq1.status_idx_ptr =
4794			&sb->sb.running_index[SM_RX_ID];
4795	} else {
4796		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4797
4798		cp->kcq1.hw_prod_idx_ptr =
4799			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4800		cp->kcq1.status_idx_ptr =
4801			&sb->sb.running_index[SM_RX_ID];
4802	}
4803
4804	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4805		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4806
4807		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4808					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4809		cp->kcq2.sw_prod_idx = 0;
4810		cp->kcq2.hw_prod_idx_ptr =
4811			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4812		cp->kcq2.status_idx_ptr =
4813			&sb->sb.running_index[SM_RX_ID];
4814	}
4815}
4816
4817static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4818{
4819	struct cnic_local *cp = dev->cnic_priv;
4820	struct cnic_eth_dev *ethdev = cp->ethdev;
4821	int func = CNIC_FUNC(cp), ret;
4822	u32 pfid;
4823
4824	cp->port_mode = CHIP_PORT_MODE_NONE;
4825
4826	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4827		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4828
4829		if (!(val & 1))
4830			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4831		else
4832			val = (val >> 1) & 1;
4833
4834		if (val) {
4835			cp->port_mode = CHIP_4_PORT_MODE;
4836			cp->pfid = func >> 1;
4837		} else {
4838			cp->port_mode = CHIP_2_PORT_MODE;
4839			cp->pfid = func & 0x6;
4840		}
4841	} else {
4842		cp->pfid = func;
4843	}
4844	pfid = cp->pfid;
4845
4846	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4847			       cp->iscsi_start_cid, 0);
4848
4849	if (ret)
4850		return -ENOMEM;
4851
4852	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4853		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4854					BNX2X_FCOE_NUM_CONNECTIONS,
4855					cp->fcoe_start_cid, 0);
4856
4857		if (ret)
4858			return -ENOMEM;
4859	}
4860
4861	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4862
4863	cnic_init_bnx2x_kcq(dev);
4864
4865	/* Only 1 EQ */
4866	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4867	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4868		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4869	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4870		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4871		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4872	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4873		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4874		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4875	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4876		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4877		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4878	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4879		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4880		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4881	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4882		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4883	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4884		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4885	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4886		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4887		HC_INDEX_ISCSI_EQ_CONS);
4888
4889	CNIC_WR(dev, BAR_USTRORM_INTMEM +
4890		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4891		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4892	CNIC_WR(dev, BAR_USTRORM_INTMEM +
4893		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4894		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4895
4896	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4897		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4898
4899	cnic_setup_bnx2x_context(dev);
4900
4901	ret = cnic_init_bnx2x_irq(dev);
4902	if (ret)
4903		return ret;
4904
4905	return 0;
4906}
4907
4908static void cnic_init_rings(struct cnic_dev *dev)
4909{
4910	struct cnic_local *cp = dev->cnic_priv;
4911	struct cnic_uio_dev *udev = cp->udev;
4912
4913	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4914		return;
4915
4916	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4917		cnic_init_bnx2_tx_ring(dev);
4918		cnic_init_bnx2_rx_ring(dev);
4919		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4920	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4921		u32 cli = cp->ethdev->iscsi_l2_client_id;
4922		u32 cid = cp->ethdev->iscsi_l2_cid;
4923		u32 cl_qzone_id;
4924		struct client_init_ramrod_data *data;
4925		union l5cm_specific_data l5_data;
4926		struct ustorm_eth_rx_producers rx_prods = {0};
4927		u32 off, i, *cid_ptr;
4928
4929		rx_prods.bd_prod = 0;
4930		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4931		barrier();
4932
4933		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4934
4935		off = BAR_USTRORM_INTMEM +
4936			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
4937			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4938			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4939
4940		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4941			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4942
4943		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4944
4945		data = udev->l2_buf;
4946		cid_ptr = udev->l2_buf + 12;
4947
4948		memset(data, 0, sizeof(*data));
4949
4950		cnic_init_bnx2x_tx_ring(dev, data);
4951		cnic_init_bnx2x_rx_ring(dev, data);
4952
4953		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4954		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4955
4956		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4957
4958		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4959			cid, ETH_CONNECTION_TYPE, &l5_data);
4960
4961		i = 0;
4962		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4963		       ++i < 10)
4964			msleep(1);
4965
4966		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4967			netdev_err(dev->netdev,
4968				"iSCSI CLIENT_SETUP did not complete\n");
4969		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4970		cnic_ring_ctl(dev, cid, cli, 1);
4971		*cid_ptr = cid;
4972	}
4973}
4974
4975static void cnic_shutdown_rings(struct cnic_dev *dev)
4976{
4977	struct cnic_local *cp = dev->cnic_priv;
4978	struct cnic_uio_dev *udev = cp->udev;
4979	void *rx_ring;
4980
4981	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4982		return;
4983
4984	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4985		cnic_shutdown_bnx2_rx_ring(dev);
4986	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4987		u32 cli = cp->ethdev->iscsi_l2_client_id;
4988		u32 cid = cp->ethdev->iscsi_l2_cid;
4989		union l5cm_specific_data l5_data;
4990		int i;
4991
4992		cnic_ring_ctl(dev, cid, cli, 0);
4993
4994		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4995
4996		l5_data.phy_address.lo = cli;
4997		l5_data.phy_address.hi = 0;
4998		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4999			cid, ETH_CONNECTION_TYPE, &l5_data);
5000		i = 0;
5001		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5002		       ++i < 10)
5003			msleep(1);
5004
5005		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5006			netdev_err(dev->netdev,
5007				"iSCSI CLIENT_HALT did not complete\n");
5008		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5009
5010		memset(&l5_data, 0, sizeof(l5_data));
5011		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5012			cid, NONE_CONNECTION_TYPE, &l5_data);
5013		msleep(10);
5014	}
5015	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5016	rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5017	memset(rx_ring, 0, BCM_PAGE_SIZE);
5018}
5019
5020static int cnic_register_netdev(struct cnic_dev *dev)
5021{
5022	struct cnic_local *cp = dev->cnic_priv;
5023	struct cnic_eth_dev *ethdev = cp->ethdev;
5024	int err;
5025
5026	if (!ethdev)
5027		return -ENODEV;
5028
5029	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5030		return 0;
5031
5032	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5033	if (err)
5034		netdev_err(dev->netdev, "register_cnic failed\n");
5035
5036	return err;
5037}
5038
5039static void cnic_unregister_netdev(struct cnic_dev *dev)
5040{
5041	struct cnic_local *cp = dev->cnic_priv;
5042	struct cnic_eth_dev *ethdev = cp->ethdev;
5043
5044	if (!ethdev)
5045		return;
5046
5047	ethdev->drv_unregister_cnic(dev->netdev);
5048}
5049
5050static int cnic_start_hw(struct cnic_dev *dev)
5051{
5052	struct cnic_local *cp = dev->cnic_priv;
5053	struct cnic_eth_dev *ethdev = cp->ethdev;
5054	int err;
5055
5056	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5057		return -EALREADY;
5058
5059	dev->regview = ethdev->io_base;
5060	pci_dev_get(dev->pcidev);
5061	cp->func = PCI_FUNC(dev->pcidev->devfn);
5062	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5063	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5064
5065	err = cp->alloc_resc(dev);
5066	if (err) {
5067		netdev_err(dev->netdev, "allocate resource failure\n");
5068		goto err1;
5069	}
5070
5071	err = cp->start_hw(dev);
5072	if (err)
5073		goto err1;
5074
5075	err = cnic_cm_open(dev);
5076	if (err)
5077		goto err1;
5078
5079	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5080
5081	cp->enable_int(dev);
5082
5083	return 0;
5084
5085err1:
5086	cp->free_resc(dev);
5087	pci_dev_put(dev->pcidev);
5088	return err;
5089}
5090
5091static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5092{
5093	cnic_disable_bnx2_int_sync(dev);
5094
5095	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5096	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5097
5098	cnic_init_context(dev, KWQ_CID);
5099	cnic_init_context(dev, KCQ_CID);
5100
5101	cnic_setup_5709_context(dev, 0);
5102	cnic_free_irq(dev);
5103
5104	cnic_free_resc(dev);
5105}
5106
5107
5108static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5109{
5110	struct cnic_local *cp = dev->cnic_priv;
5111
5112	cnic_free_irq(dev);
5113	*cp->kcq1.hw_prod_idx_ptr = 0;
5114	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5115		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5116	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5117	cnic_free_resc(dev);
5118}
5119
5120static void cnic_stop_hw(struct cnic_dev *dev)
5121{
5122	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5123		struct cnic_local *cp = dev->cnic_priv;
5124		int i = 0;
5125
5126		/* Need to wait for the ring shutdown event to complete
5127		 * before clearing the CNIC_UP flag.
5128		 */
5129		while (cp->udev->uio_dev != -1 && i < 15) {
5130			msleep(100);
5131			i++;
5132		}
5133		cnic_shutdown_rings(dev);
5134		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5135		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5136		synchronize_rcu();
5137		cnic_cm_shutdown(dev);
5138		cp->stop_hw(dev);
5139		pci_dev_put(dev->pcidev);
5140	}
5141}
5142
5143static void cnic_free_dev(struct cnic_dev *dev)
5144{
5145	int i = 0;
5146
5147	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5148		msleep(100);
5149		i++;
5150	}
5151	if (atomic_read(&dev->ref_count) != 0)
5152		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5153
5154	netdev_info(dev->netdev, "Removed CNIC device\n");
5155	dev_put(dev->netdev);
5156	kfree(dev);
5157}
5158
5159static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5160				       struct pci_dev *pdev)
5161{
5162	struct cnic_dev *cdev;
5163	struct cnic_local *cp;
5164	int alloc_size;
5165
5166	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5167
5168	cdev = kzalloc(alloc_size , GFP_KERNEL);
5169	if (cdev == NULL) {
5170		netdev_err(dev, "allocate dev struct failure\n");
5171		return NULL;
5172	}
5173
5174	cdev->netdev = dev;
5175	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5176	cdev->register_device = cnic_register_device;
5177	cdev->unregister_device = cnic_unregister_device;
5178	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5179
5180	cp = cdev->cnic_priv;
5181	cp->dev = cdev;
5182	cp->l2_single_buf_size = 0x400;
5183	cp->l2_rx_ring_size = 3;
5184
5185	spin_lock_init(&cp->cnic_ulp_lock);
5186
5187	netdev_info(dev, "Added CNIC device\n");
5188
5189	return cdev;
5190}
5191
5192static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5193{
5194	struct pci_dev *pdev;
5195	struct cnic_dev *cdev;
5196	struct cnic_local *cp;
5197	struct cnic_eth_dev *ethdev = NULL;
5198	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5199
5200	probe = symbol_get(bnx2_cnic_probe);
5201	if (probe) {
5202		ethdev = (*probe)(dev);
5203		symbol_put(bnx2_cnic_probe);
5204	}
5205	if (!ethdev)
5206		return NULL;
5207
5208	pdev = ethdev->pdev;
5209	if (!pdev)
5210		return NULL;
5211
5212	dev_hold(dev);
5213	pci_dev_get(pdev);
5214	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5215	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5216	    (pdev->revision < 0x10)) {
5217		pci_dev_put(pdev);
5218		goto cnic_err;
5219	}
5220	pci_dev_put(pdev);
5221
5222	cdev = cnic_alloc_dev(dev, pdev);
5223	if (cdev == NULL)
5224		goto cnic_err;
5225
5226	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5227	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5228
5229	cp = cdev->cnic_priv;
5230	cp->ethdev = ethdev;
5231	cdev->pcidev = pdev;
5232	cp->chip_id = ethdev->chip_id;
5233
5234	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5235
5236	cp->cnic_ops = &cnic_bnx2_ops;
5237	cp->start_hw = cnic_start_bnx2_hw;
5238	cp->stop_hw = cnic_stop_bnx2_hw;
5239	cp->setup_pgtbl = cnic_setup_page_tbl;
5240	cp->alloc_resc = cnic_alloc_bnx2_resc;
5241	cp->free_resc = cnic_free_resc;
5242	cp->start_cm = cnic_cm_init_bnx2_hw;
5243	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5244	cp->enable_int = cnic_enable_bnx2_int;
5245	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5246	cp->close_conn = cnic_close_bnx2_conn;
5247	return cdev;
5248
5249cnic_err:
5250	dev_put(dev);
5251	return NULL;
5252}
5253
5254static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5255{
5256	struct pci_dev *pdev;
5257	struct cnic_dev *cdev;
5258	struct cnic_local *cp;
5259	struct cnic_eth_dev *ethdev = NULL;
5260	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5261
5262	probe = symbol_get(bnx2x_cnic_probe);
5263	if (probe) {
5264		ethdev = (*probe)(dev);
5265		symbol_put(bnx2x_cnic_probe);
5266	}
5267	if (!ethdev)
5268		return NULL;
5269
5270	pdev = ethdev->pdev;
5271	if (!pdev)
5272		return NULL;
5273
5274	dev_hold(dev);
5275	cdev = cnic_alloc_dev(dev, pdev);
5276	if (cdev == NULL) {
5277		dev_put(dev);
5278		return NULL;
5279	}
5280
5281	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5282	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5283
5284	cp = cdev->cnic_priv;
5285	cp->ethdev = ethdev;
5286	cdev->pcidev = pdev;
5287	cp->chip_id = ethdev->chip_id;
5288
5289	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5290		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5291	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
5292	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5293		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5294
5295	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5296
5297	cp->cnic_ops = &cnic_bnx2x_ops;
5298	cp->start_hw = cnic_start_bnx2x_hw;
5299	cp->stop_hw = cnic_stop_bnx2x_hw;
5300	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5301	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5302	cp->free_resc = cnic_free_resc;
5303	cp->start_cm = cnic_cm_init_bnx2x_hw;
5304	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5305	cp->enable_int = cnic_enable_bnx2x_int;
5306	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5307	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
5308		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5309	else
5310		cp->ack_int = cnic_ack_bnx2x_msix;
5311	cp->close_conn = cnic_close_bnx2x_conn;
5312	return cdev;
5313}
5314
5315static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5316{
5317	struct ethtool_drvinfo drvinfo;
5318	struct cnic_dev *cdev = NULL;
5319
5320	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5321		memset(&drvinfo, 0, sizeof(drvinfo));
5322		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5323
5324		if (!strcmp(drvinfo.driver, "bnx2"))
5325			cdev = init_bnx2_cnic(dev);
5326		if (!strcmp(drvinfo.driver, "bnx2x"))
5327			cdev = init_bnx2x_cnic(dev);
5328		if (cdev) {
5329			write_lock(&cnic_dev_lock);
5330			list_add(&cdev->list, &cnic_dev_list);
5331			write_unlock(&cnic_dev_lock);
5332		}
5333	}
5334	return cdev;
5335}
5336
5337static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5338			      u16 vlan_id)
5339{
5340	int if_type;
5341
5342	rcu_read_lock();
5343	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5344		struct cnic_ulp_ops *ulp_ops;
5345		void *ctx;
5346
5347		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5348		if (!ulp_ops || !ulp_ops->indicate_netevent)
5349			continue;
5350
5351		ctx = cp->ulp_handle[if_type];
5352
5353		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5354	}
5355	rcu_read_unlock();
5356}
5357
5358/**
5359 * netdev event handler
5360 */
5361static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5362							 void *ptr)
5363{
5364	struct net_device *netdev = ptr;
5365	struct cnic_dev *dev;
5366	int new_dev = 0;
5367
5368	dev = cnic_from_netdev(netdev);
5369
5370	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5371		/* Check for the hot-plug device */
5372		dev = is_cnic_dev(netdev);
5373		if (dev) {
5374			new_dev = 1;
5375			cnic_hold(dev);
5376		}
5377	}
5378	if (dev) {
5379		struct cnic_local *cp = dev->cnic_priv;
5380
5381		if (new_dev)
5382			cnic_ulp_init(dev);
5383		else if (event == NETDEV_UNREGISTER)
5384			cnic_ulp_exit(dev);
5385
5386		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5387			if (cnic_register_netdev(dev) != 0) {
5388				cnic_put(dev);
5389				goto done;
5390			}
5391			if (!cnic_start_hw(dev))
5392				cnic_ulp_start(dev);
5393		}
5394
5395		cnic_rcv_netevent(cp, event, 0);
5396
5397		if (event == NETDEV_GOING_DOWN) {
5398			cnic_ulp_stop(dev);
5399			cnic_stop_hw(dev);
5400			cnic_unregister_netdev(dev);
5401		} else if (event == NETDEV_UNREGISTER) {
5402			write_lock(&cnic_dev_lock);
5403			list_del_init(&dev->list);
5404			write_unlock(&cnic_dev_lock);
5405
5406			cnic_put(dev);
5407			cnic_free_dev(dev);
5408			goto done;
5409		}
5410		cnic_put(dev);
5411	} else {
5412		struct net_device *realdev;
5413		u16 vid;
5414
5415		vid = cnic_get_vlan(netdev, &realdev);
5416		if (realdev) {
5417			dev = cnic_from_netdev(realdev);
5418			if (dev) {
5419				vid |= VLAN_TAG_PRESENT;
5420				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5421				cnic_put(dev);
5422			}
5423		}
5424	}
5425done:
5426	return NOTIFY_DONE;
5427}
5428
5429static struct notifier_block cnic_netdev_notifier = {
5430	.notifier_call = cnic_netdev_event
5431};
5432
5433static void cnic_release(void)
5434{
5435	struct cnic_dev *dev;
5436	struct cnic_uio_dev *udev;
5437
5438	while (!list_empty(&cnic_dev_list)) {
5439		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5440		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5441			cnic_ulp_stop(dev);
5442			cnic_stop_hw(dev);
5443		}
5444
5445		cnic_ulp_exit(dev);
5446		cnic_unregister_netdev(dev);
5447		list_del_init(&dev->list);
5448		cnic_free_dev(dev);
5449	}
5450	while (!list_empty(&cnic_udev_list)) {
5451		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5452				  list);
5453		cnic_free_uio(udev);
5454	}
5455}
5456
5457static int __init cnic_init(void)
5458{
5459	int rc = 0;
5460
5461	pr_info("%s", version);
5462
5463	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5464	if (rc) {
5465		cnic_release();
5466		return rc;
5467	}
5468
5469	cnic_wq = create_singlethread_workqueue("cnic_wq");
5470	if (!cnic_wq) {
5471		cnic_release();
5472		unregister_netdevice_notifier(&cnic_netdev_notifier);
5473		return -ENOMEM;
5474	}
5475
5476	return 0;
5477}
5478
5479static void __exit cnic_exit(void)
5480{
5481	unregister_netdevice_notifier(&cnic_netdev_notifier);
5482	cnic_release();
5483	destroy_workqueue(cnic_wq);
5484}
5485
5486module_init(cnic_init);
5487module_exit(cnic_exit);