Linux Audio

Check our new training course

Loading...
v6.8
   1/* cnic.c: QLogic CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
  12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/module.h>
  18
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/pci.h>
  24#include <linux/init.h>
  25#include <linux/netdevice.h>
  26#include <linux/uio_driver.h>
  27#include <linux/in.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/delay.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/random.h>
  34#if IS_ENABLED(CONFIG_VLAN_8021Q)
  35#define BCM_VLAN 1
  36#endif
  37#include <net/ip.h>
  38#include <net/tcp.h>
  39#include <net/route.h>
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <net/ip6_checksum.h>
  43#include <scsi/iscsi_if.h>
  44
  45#define BCM_CNIC	1
  46#include "cnic_if.h"
  47#include "bnx2.h"
  48#include "bnx2x/bnx2x.h"
  49#include "bnx2x/bnx2x_reg.h"
  50#include "bnx2x/bnx2x_fw_defs.h"
  51#include "bnx2x/bnx2x_hsi.h"
  52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
  53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
  54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
  55#include "cnic.h"
  56#include "cnic_defs.h"
  57
  58#define CNIC_MODULE_NAME	"cnic"
  59
  60static char version[] =
  61	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  62
  63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  64	      "Chen (zongxi@broadcom.com");
  65MODULE_DESCRIPTION("QLogic cnic Driver");
  66MODULE_LICENSE("GPL");
  67MODULE_VERSION(CNIC_MODULE_VERSION);
  68
  69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  70static LIST_HEAD(cnic_dev_list);
  71static LIST_HEAD(cnic_udev_list);
  72static DEFINE_RWLOCK(cnic_dev_lock);
  73static DEFINE_MUTEX(cnic_lock);
  74
  75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  76
  77/* helper function, assuming cnic_lock is held */
  78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  79{
  80	return rcu_dereference_protected(cnic_ulp_tbl[type],
  81					 lockdep_is_held(&cnic_lock));
  82}
  83
  84static int cnic_service_bnx2(void *, void *);
  85static int cnic_service_bnx2x(void *, void *);
  86static int cnic_ctl(void *, struct cnic_ctl_info *);
  87
  88static struct cnic_ops cnic_bnx2_ops = {
  89	.cnic_owner	= THIS_MODULE,
  90	.cnic_handler	= cnic_service_bnx2,
  91	.cnic_ctl	= cnic_ctl,
  92};
  93
  94static struct cnic_ops cnic_bnx2x_ops = {
  95	.cnic_owner	= THIS_MODULE,
  96	.cnic_handler	= cnic_service_bnx2x,
  97	.cnic_ctl	= cnic_ctl,
  98};
  99
 100static struct workqueue_struct *cnic_wq;
 101
 102static void cnic_shutdown_rings(struct cnic_dev *);
 103static void cnic_init_rings(struct cnic_dev *);
 104static int cnic_cm_set_pg(struct cnic_sock *);
 105
 106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 107{
 108	struct cnic_uio_dev *udev = uinfo->priv;
 109	struct cnic_dev *dev;
 110
 111	if (!capable(CAP_NET_ADMIN))
 112		return -EPERM;
 113
 114	if (udev->uio_dev != -1)
 115		return -EBUSY;
 116
 117	rtnl_lock();
 118	dev = udev->dev;
 119
 120	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 121		rtnl_unlock();
 122		return -ENODEV;
 123	}
 124
 125	udev->uio_dev = iminor(inode);
 126
 127	cnic_shutdown_rings(dev);
 128	cnic_init_rings(dev);
 129	rtnl_unlock();
 130
 131	return 0;
 132}
 133
 134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 135{
 136	struct cnic_uio_dev *udev = uinfo->priv;
 137
 138	udev->uio_dev = -1;
 139	return 0;
 140}
 141
 142static inline void cnic_hold(struct cnic_dev *dev)
 143{
 144	atomic_inc(&dev->ref_count);
 145}
 146
 147static inline void cnic_put(struct cnic_dev *dev)
 148{
 149	atomic_dec(&dev->ref_count);
 150}
 151
 152static inline void csk_hold(struct cnic_sock *csk)
 153{
 154	atomic_inc(&csk->ref_count);
 155}
 156
 157static inline void csk_put(struct cnic_sock *csk)
 158{
 159	atomic_dec(&csk->ref_count);
 160}
 161
 162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 163{
 164	struct cnic_dev *cdev;
 165
 166	read_lock(&cnic_dev_lock);
 167	list_for_each_entry(cdev, &cnic_dev_list, list) {
 168		if (netdev == cdev->netdev) {
 169			cnic_hold(cdev);
 170			read_unlock(&cnic_dev_lock);
 171			return cdev;
 172		}
 173	}
 174	read_unlock(&cnic_dev_lock);
 175	return NULL;
 176}
 177
 178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 179{
 180	atomic_inc(&ulp_ops->ref_count);
 181}
 182
 183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 184{
 185	atomic_dec(&ulp_ops->ref_count);
 186}
 187
 188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 189{
 190	struct cnic_local *cp = dev->cnic_priv;
 191	struct cnic_eth_dev *ethdev = cp->ethdev;
 192	struct drv_ctl_info info;
 193	struct drv_ctl_io *io = &info.data.io;
 194
 195	memset(&info, 0, sizeof(struct drv_ctl_info));
 196	info.cmd = DRV_CTL_CTX_WR_CMD;
 197	io->cid_addr = cid_addr;
 198	io->offset = off;
 199	io->data = val;
 200	ethdev->drv_ctl(dev->netdev, &info);
 201}
 202
 203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 204{
 205	struct cnic_local *cp = dev->cnic_priv;
 206	struct cnic_eth_dev *ethdev = cp->ethdev;
 207	struct drv_ctl_info info;
 208	struct drv_ctl_io *io = &info.data.io;
 209
 210	memset(&info, 0, sizeof(struct drv_ctl_info));
 211	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 212	io->offset = off;
 213	io->dma_addr = addr;
 214	ethdev->drv_ctl(dev->netdev, &info);
 215}
 216
 217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 218{
 219	struct cnic_local *cp = dev->cnic_priv;
 220	struct cnic_eth_dev *ethdev = cp->ethdev;
 221	struct drv_ctl_info info;
 222	struct drv_ctl_l2_ring *ring = &info.data.ring;
 223
 224	memset(&info, 0, sizeof(struct drv_ctl_info));
 225	if (start)
 226		info.cmd = DRV_CTL_START_L2_CMD;
 227	else
 228		info.cmd = DRV_CTL_STOP_L2_CMD;
 229
 230	ring->cid = cid;
 231	ring->client_id = cl_id;
 232	ethdev->drv_ctl(dev->netdev, &info);
 233}
 234
 235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 236{
 237	struct cnic_local *cp = dev->cnic_priv;
 238	struct cnic_eth_dev *ethdev = cp->ethdev;
 239	struct drv_ctl_info info;
 240	struct drv_ctl_io *io = &info.data.io;
 241
 242	memset(&info, 0, sizeof(struct drv_ctl_info));
 243	info.cmd = DRV_CTL_IO_WR_CMD;
 244	io->offset = off;
 245	io->data = val;
 246	ethdev->drv_ctl(dev->netdev, &info);
 247}
 248
 249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 250{
 251	struct cnic_local *cp = dev->cnic_priv;
 252	struct cnic_eth_dev *ethdev = cp->ethdev;
 253	struct drv_ctl_info info;
 254	struct drv_ctl_io *io = &info.data.io;
 255
 256	memset(&info, 0, sizeof(struct drv_ctl_info));
 257	info.cmd = DRV_CTL_IO_RD_CMD;
 258	io->offset = off;
 259	ethdev->drv_ctl(dev->netdev, &info);
 260	return io->data;
 261}
 262
 263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 264{
 265	struct cnic_local *cp = dev->cnic_priv;
 266	struct cnic_eth_dev *ethdev = cp->ethdev;
 267	struct drv_ctl_info info;
 268	struct fcoe_capabilities *fcoe_cap =
 269		&info.data.register_data.fcoe_features;
 270
 271	memset(&info, 0, sizeof(struct drv_ctl_info));
 272	if (reg) {
 273		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 274		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
 275			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
 276	} else {
 277		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 278	}
 279
 280	info.data.ulp_type = ulp_type;
 281	info.drv_state = state;
 282	ethdev->drv_ctl(dev->netdev, &info);
 283}
 284
 285static int cnic_in_use(struct cnic_sock *csk)
 286{
 287	return test_bit(SK_F_INUSE, &csk->flags);
 288}
 289
 290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 291{
 292	struct cnic_local *cp = dev->cnic_priv;
 293	struct cnic_eth_dev *ethdev = cp->ethdev;
 294	struct drv_ctl_info info;
 295
 296	memset(&info, 0, sizeof(struct drv_ctl_info));
 297	info.cmd = cmd;
 298	info.data.credit.credit_count = count;
 299	ethdev->drv_ctl(dev->netdev, &info);
 300}
 301
 302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 303{
 304	u32 i;
 305
 306	if (!cp->ctx_tbl)
 307		return -EINVAL;
 308
 309	for (i = 0; i < cp->max_cid_space; i++) {
 310		if (cp->ctx_tbl[i].cid == cid) {
 311			*l5_cid = i;
 312			return 0;
 313		}
 314	}
 315	return -EINVAL;
 316}
 317
 318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 319			   struct cnic_sock *csk)
 320{
 321	struct iscsi_path path_req;
 322	char *buf = NULL;
 323	u16 len = 0;
 324	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 325	struct cnic_ulp_ops *ulp_ops;
 326	struct cnic_uio_dev *udev = cp->udev;
 327	int rc = 0, retry = 0;
 328
 329	if (!udev || udev->uio_dev == -1)
 330		return -ENODEV;
 331
 332	if (csk) {
 333		len = sizeof(path_req);
 334		buf = (char *) &path_req;
 335		memset(&path_req, 0, len);
 336
 337		msg_type = ISCSI_KEVENT_PATH_REQ;
 338		path_req.handle = (u64) csk->l5_cid;
 339		if (test_bit(SK_F_IPV6, &csk->flags)) {
 340			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 341			       sizeof(struct in6_addr));
 342			path_req.ip_addr_len = 16;
 343		} else {
 344			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 345			       sizeof(struct in_addr));
 346			path_req.ip_addr_len = 4;
 347		}
 348		path_req.vlan_id = csk->vlan_id;
 349		path_req.pmtu = csk->mtu;
 350	}
 351
 352	while (retry < 3) {
 353		rc = 0;
 354		rcu_read_lock();
 355		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
 356		if (ulp_ops)
 357			rc = ulp_ops->iscsi_nl_send_msg(
 358				cp->ulp_handle[CNIC_ULP_ISCSI],
 359				msg_type, buf, len);
 360		rcu_read_unlock();
 361		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 362			break;
 363
 364		msleep(100);
 365		retry++;
 366	}
 367	return rc;
 368}
 369
 370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 371
 372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 373				  char *buf, u16 len)
 374{
 375	int rc = -EINVAL;
 376
 377	switch (msg_type) {
 378	case ISCSI_UEVENT_PATH_UPDATE: {
 379		struct cnic_local *cp;
 380		u32 l5_cid;
 381		struct cnic_sock *csk;
 382		struct iscsi_path *path_resp;
 383
 384		if (len < sizeof(*path_resp))
 385			break;
 386
 387		path_resp = (struct iscsi_path *) buf;
 388		cp = dev->cnic_priv;
 389		l5_cid = (u32) path_resp->handle;
 390		if (l5_cid >= MAX_CM_SK_TBL_SZ)
 391			break;
 392
 393		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
 
 394			rc = -ENODEV;
 
 395			break;
 396		}
 397		csk = &cp->csk_tbl[l5_cid];
 398		csk_hold(csk);
 399		if (cnic_in_use(csk) &&
 400		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 401
 402			csk->vlan_id = path_resp->vlan_id;
 403
 404			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
 405			if (test_bit(SK_F_IPV6, &csk->flags))
 406				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 407				       sizeof(struct in6_addr));
 408			else
 409				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 410				       sizeof(struct in_addr));
 411
 412			if (is_valid_ether_addr(csk->ha)) {
 413				cnic_cm_set_pg(csk);
 414			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 415				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 416
 417				cnic_cm_upcall(cp, csk,
 418					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 419				clear_bit(SK_F_CONNECT_START, &csk->flags);
 420			}
 421		}
 422		csk_put(csk);
 
 423		rc = 0;
 424	}
 425	}
 426
 427	return rc;
 428}
 429
 430static int cnic_offld_prep(struct cnic_sock *csk)
 431{
 432	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 433		return 0;
 434
 435	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 436		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 437		return 0;
 438	}
 439
 440	return 1;
 441}
 442
 443static int cnic_close_prep(struct cnic_sock *csk)
 444{
 445	clear_bit(SK_F_CONNECT_START, &csk->flags);
 446	smp_mb__after_atomic();
 447
 448	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 449		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 450			msleep(1);
 451
 452		return 1;
 453	}
 454	return 0;
 455}
 456
 457static int cnic_abort_prep(struct cnic_sock *csk)
 458{
 459	clear_bit(SK_F_CONNECT_START, &csk->flags);
 460	smp_mb__after_atomic();
 461
 462	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 463		msleep(1);
 464
 465	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 466		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 467		return 1;
 468	}
 469
 470	return 0;
 471}
 472
 473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 474{
 475	struct cnic_dev *dev;
 476
 477	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 478		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 479		return -EINVAL;
 480	}
 481	mutex_lock(&cnic_lock);
 482	if (cnic_ulp_tbl_prot(ulp_type)) {
 483		pr_err("%s: Type %d has already been registered\n",
 484		       __func__, ulp_type);
 485		mutex_unlock(&cnic_lock);
 486		return -EBUSY;
 487	}
 488
 489	read_lock(&cnic_dev_lock);
 490	list_for_each_entry(dev, &cnic_dev_list, list) {
 491		struct cnic_local *cp = dev->cnic_priv;
 492
 493		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 494	}
 495	read_unlock(&cnic_dev_lock);
 496
 497	atomic_set(&ulp_ops->ref_count, 0);
 498	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 499	mutex_unlock(&cnic_lock);
 500
 501	/* Prevent race conditions with netdev_event */
 502	rtnl_lock();
 503	list_for_each_entry(dev, &cnic_dev_list, list) {
 504		struct cnic_local *cp = dev->cnic_priv;
 505
 506		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 507			ulp_ops->cnic_init(dev);
 508	}
 509	rtnl_unlock();
 510
 511	return 0;
 512}
 513
 514int cnic_unregister_driver(int ulp_type)
 515{
 516	struct cnic_dev *dev;
 517	struct cnic_ulp_ops *ulp_ops;
 518	int i = 0;
 519
 520	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 521		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 522		return -EINVAL;
 523	}
 524	mutex_lock(&cnic_lock);
 525	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 526	if (!ulp_ops) {
 527		pr_err("%s: Type %d has not been registered\n",
 528		       __func__, ulp_type);
 529		goto out_unlock;
 530	}
 531	read_lock(&cnic_dev_lock);
 532	list_for_each_entry(dev, &cnic_dev_list, list) {
 533		struct cnic_local *cp = dev->cnic_priv;
 534
 535		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 536			pr_err("%s: Type %d still has devices registered\n",
 537			       __func__, ulp_type);
 538			read_unlock(&cnic_dev_lock);
 539			goto out_unlock;
 540		}
 541	}
 542	read_unlock(&cnic_dev_lock);
 543
 544	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 545
 546	mutex_unlock(&cnic_lock);
 547	synchronize_rcu();
 548	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 549		msleep(100);
 550		i++;
 551	}
 552
 553	if (atomic_read(&ulp_ops->ref_count) != 0)
 554		pr_warn("%s: Failed waiting for ref count to go to zero\n",
 555			__func__);
 556	return 0;
 557
 558out_unlock:
 559	mutex_unlock(&cnic_lock);
 560	return -EINVAL;
 561}
 562
 563static int cnic_start_hw(struct cnic_dev *);
 564static void cnic_stop_hw(struct cnic_dev *);
 565
 566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 567				void *ulp_ctx)
 568{
 569	struct cnic_local *cp = dev->cnic_priv;
 570	struct cnic_ulp_ops *ulp_ops;
 571
 572	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 573		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 574		return -EINVAL;
 575	}
 576	mutex_lock(&cnic_lock);
 577	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 578		pr_err("%s: Driver with type %d has not been registered\n",
 579		       __func__, ulp_type);
 580		mutex_unlock(&cnic_lock);
 581		return -EAGAIN;
 582	}
 583	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 584		pr_err("%s: Type %d has already been registered to this device\n",
 585		       __func__, ulp_type);
 586		mutex_unlock(&cnic_lock);
 587		return -EBUSY;
 588	}
 589
 590	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 591	cp->ulp_handle[ulp_type] = ulp_ctx;
 592	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 593	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 594	cnic_hold(dev);
 595
 596	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 597		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 598			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 599
 600	mutex_unlock(&cnic_lock);
 601
 602	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 603
 604	return 0;
 605
 606}
 607EXPORT_SYMBOL(cnic_register_driver);
 608
 609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 610{
 611	struct cnic_local *cp = dev->cnic_priv;
 612	int i = 0;
 613
 614	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 615		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 616		return -EINVAL;
 617	}
 618
 619	if (ulp_type == CNIC_ULP_ISCSI)
 620		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 621
 622	mutex_lock(&cnic_lock);
 623	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 624		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 625		cnic_put(dev);
 626	} else {
 627		pr_err("%s: device not registered to this ulp type %d\n",
 628		       __func__, ulp_type);
 629		mutex_unlock(&cnic_lock);
 630		return -EINVAL;
 631	}
 632	mutex_unlock(&cnic_lock);
 633
 634	if (ulp_type == CNIC_ULP_FCOE)
 635		dev->fcoe_cap = NULL;
 636
 637	synchronize_rcu();
 638
 639	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 640	       i < 20) {
 641		msleep(100);
 642		i++;
 643	}
 644	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 645		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 646
 647	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 648		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
 649	else
 650		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 651
 652	return 0;
 653}
 654EXPORT_SYMBOL(cnic_unregister_driver);
 655
 656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 657			    u32 next)
 658{
 659	id_tbl->start = start_id;
 660	id_tbl->max = size;
 661	id_tbl->next = next;
 662	spin_lock_init(&id_tbl->lock);
 663	id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
 664	if (!id_tbl->table)
 665		return -ENOMEM;
 666
 667	return 0;
 668}
 669
 670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 671{
 672	bitmap_free(id_tbl->table);
 673	id_tbl->table = NULL;
 674}
 675
 676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 677{
 678	int ret = -1;
 679
 680	id -= id_tbl->start;
 681	if (id >= id_tbl->max)
 682		return ret;
 683
 684	spin_lock(&id_tbl->lock);
 685	if (!test_bit(id, id_tbl->table)) {
 686		set_bit(id, id_tbl->table);
 687		ret = 0;
 688	}
 689	spin_unlock(&id_tbl->lock);
 690	return ret;
 691}
 692
 693/* Returns -1 if not successful */
 694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 695{
 696	u32 id;
 697
 698	spin_lock(&id_tbl->lock);
 699	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 700	if (id >= id_tbl->max) {
 701		id = -1;
 702		if (id_tbl->next != 0) {
 703			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 704			if (id >= id_tbl->next)
 705				id = -1;
 706		}
 707	}
 708
 709	if (id < id_tbl->max) {
 710		set_bit(id, id_tbl->table);
 711		id_tbl->next = (id + 1) & (id_tbl->max - 1);
 712		id += id_tbl->start;
 713	}
 714
 715	spin_unlock(&id_tbl->lock);
 716
 717	return id;
 718}
 719
 720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 721{
 722	if (id == -1)
 723		return;
 724
 725	id -= id_tbl->start;
 726	if (id >= id_tbl->max)
 727		return;
 728
 729	clear_bit(id, id_tbl->table);
 730}
 731
 732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 733{
 734	int i;
 735
 736	if (!dma->pg_arr)
 737		return;
 738
 739	for (i = 0; i < dma->num_pages; i++) {
 740		if (dma->pg_arr[i]) {
 741			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
 742					  dma->pg_arr[i], dma->pg_map_arr[i]);
 743			dma->pg_arr[i] = NULL;
 744		}
 745	}
 746	if (dma->pgtbl) {
 747		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 748				  dma->pgtbl, dma->pgtbl_map);
 749		dma->pgtbl = NULL;
 750	}
 751	kfree(dma->pg_arr);
 752	dma->pg_arr = NULL;
 753	dma->num_pages = 0;
 754}
 755
 756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 757{
 758	int i;
 759	__le32 *page_table = (__le32 *) dma->pgtbl;
 760
 761	for (i = 0; i < dma->num_pages; i++) {
 762		/* Each entry needs to be in big endian format. */
 763		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 764		page_table++;
 765		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 766		page_table++;
 767	}
 768}
 769
 770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 771{
 772	int i;
 773	__le32 *page_table = (__le32 *) dma->pgtbl;
 774
 775	for (i = 0; i < dma->num_pages; i++) {
 776		/* Each entry needs to be in little endian format. */
 777		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 778		page_table++;
 779		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 780		page_table++;
 781	}
 782}
 783
 784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 785			  int pages, int use_pg_tbl)
 786{
 787	int i, size;
 788	struct cnic_local *cp = dev->cnic_priv;
 789
 790	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 791	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 792	if (dma->pg_arr == NULL)
 793		return -ENOMEM;
 794
 795	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 796	dma->num_pages = pages;
 797
 798	for (i = 0; i < pages; i++) {
 799		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 800						    CNIC_PAGE_SIZE,
 801						    &dma->pg_map_arr[i],
 802						    GFP_ATOMIC);
 803		if (dma->pg_arr[i] == NULL)
 804			goto error;
 805	}
 806	if (!use_pg_tbl)
 807		return 0;
 808
 809	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
 810			  ~(CNIC_PAGE_SIZE - 1);
 811	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 812					&dma->pgtbl_map, GFP_ATOMIC);
 813	if (dma->pgtbl == NULL)
 814		goto error;
 815
 816	cp->setup_pgtbl(dev, dma);
 817
 818	return 0;
 819
 820error:
 821	cnic_free_dma(dev, dma);
 822	return -ENOMEM;
 823}
 824
 825static void cnic_free_context(struct cnic_dev *dev)
 826{
 827	struct cnic_local *cp = dev->cnic_priv;
 828	int i;
 829
 830	for (i = 0; i < cp->ctx_blks; i++) {
 831		if (cp->ctx_arr[i].ctx) {
 832			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 833					  cp->ctx_arr[i].ctx,
 834					  cp->ctx_arr[i].mapping);
 835			cp->ctx_arr[i].ctx = NULL;
 836		}
 837	}
 838}
 839
 840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 841{
 
 
 842	if (udev->l2_buf) {
 843		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 844				  udev->l2_buf, udev->l2_buf_map);
 845		udev->l2_buf = NULL;
 846	}
 847
 848	if (udev->l2_ring) {
 849		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 850				  udev->l2_ring, udev->l2_ring_map);
 851		udev->l2_ring = NULL;
 852	}
 853
 854}
 855
 856static void __cnic_free_uio(struct cnic_uio_dev *udev)
 857{
 858	uio_unregister_device(&udev->cnic_uinfo);
 859
 860	__cnic_free_uio_rings(udev);
 861
 862	pci_dev_put(udev->pdev);
 863	kfree(udev);
 864}
 865
 866static void cnic_free_uio(struct cnic_uio_dev *udev)
 867{
 868	if (!udev)
 869		return;
 870
 871	write_lock(&cnic_dev_lock);
 872	list_del_init(&udev->list);
 873	write_unlock(&cnic_dev_lock);
 874	__cnic_free_uio(udev);
 875}
 876
 877static void cnic_free_resc(struct cnic_dev *dev)
 878{
 879	struct cnic_local *cp = dev->cnic_priv;
 880	struct cnic_uio_dev *udev = cp->udev;
 881
 882	if (udev) {
 883		udev->dev = NULL;
 884		cp->udev = NULL;
 885		if (udev->uio_dev == -1)
 886			__cnic_free_uio_rings(udev);
 887	}
 888
 889	cnic_free_context(dev);
 890	kfree(cp->ctx_arr);
 891	cp->ctx_arr = NULL;
 892	cp->ctx_blks = 0;
 893
 894	cnic_free_dma(dev, &cp->gbl_buf_info);
 895	cnic_free_dma(dev, &cp->kwq_info);
 896	cnic_free_dma(dev, &cp->kwq_16_data_info);
 897	cnic_free_dma(dev, &cp->kcq2.dma);
 898	cnic_free_dma(dev, &cp->kcq1.dma);
 899	kfree(cp->iscsi_tbl);
 900	cp->iscsi_tbl = NULL;
 901	kfree(cp->ctx_tbl);
 902	cp->ctx_tbl = NULL;
 903
 904	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 905	cnic_free_id_tbl(&cp->cid_tbl);
 906}
 907
 908static int cnic_alloc_context(struct cnic_dev *dev)
 909{
 910	struct cnic_local *cp = dev->cnic_priv;
 911
 912	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 913		int i, k, arr_size;
 914
 915		cp->ctx_blk_size = CNIC_PAGE_SIZE;
 916		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
 917		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 918			   sizeof(struct cnic_ctx);
 919		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 920		if (cp->ctx_arr == NULL)
 921			return -ENOMEM;
 922
 923		k = 0;
 924		for (i = 0; i < 2; i++) {
 925			u32 j, reg, off, lo, hi;
 926
 927			if (i == 0)
 928				off = BNX2_PG_CTX_MAP;
 929			else
 930				off = BNX2_ISCSI_CTX_MAP;
 931
 932			reg = cnic_reg_rd_ind(dev, off);
 933			lo = reg >> 16;
 934			hi = reg & 0xffff;
 935			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 936				cp->ctx_arr[k].cid = j;
 937		}
 938
 939		cp->ctx_blks = k;
 940		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 941			cp->ctx_blks = 0;
 942			return -ENOMEM;
 943		}
 944
 945		for (i = 0; i < cp->ctx_blks; i++) {
 946			cp->ctx_arr[i].ctx =
 947				dma_alloc_coherent(&dev->pcidev->dev,
 948						   CNIC_PAGE_SIZE,
 949						   &cp->ctx_arr[i].mapping,
 950						   GFP_KERNEL);
 951			if (cp->ctx_arr[i].ctx == NULL)
 952				return -ENOMEM;
 953		}
 954	}
 955	return 0;
 956}
 957
 958static u16 cnic_bnx2_next_idx(u16 idx)
 959{
 960	return idx + 1;
 961}
 962
 963static u16 cnic_bnx2_hw_idx(u16 idx)
 964{
 965	return idx;
 966}
 967
 968static u16 cnic_bnx2x_next_idx(u16 idx)
 969{
 970	idx++;
 971	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 972		idx++;
 973
 974	return idx;
 975}
 976
 977static u16 cnic_bnx2x_hw_idx(u16 idx)
 978{
 979	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 980		idx++;
 981	return idx;
 982}
 983
 984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 985			  bool use_pg_tbl)
 986{
 987	int err, i, use_page_tbl = 0;
 988	struct kcqe **kcq;
 989
 990	if (use_pg_tbl)
 991		use_page_tbl = 1;
 992
 993	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 994	if (err)
 995		return err;
 996
 997	kcq = (struct kcqe **) info->dma.pg_arr;
 998	info->kcq = kcq;
 999
1000	info->next_idx = cnic_bnx2_next_idx;
1001	info->hw_idx = cnic_bnx2_hw_idx;
1002	if (use_pg_tbl)
1003		return 0;
1004
1005	info->next_idx = cnic_bnx2x_next_idx;
1006	info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008	for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009		struct bnx2x_bd_chain_next *next =
1010			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011		int j = i + 1;
1012
1013		if (j >= KCQ_PAGE_CNT)
1014			j = 0;
1015		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017	}
1018	return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023	struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025	if (udev->l2_ring)
1026		return 0;
1027
1028	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030					   &udev->l2_ring_map, GFP_KERNEL);
1031	if (!udev->l2_ring)
1032		return -ENOMEM;
1033
1034	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037					  &udev->l2_buf_map, GFP_KERNEL);
1038	if (!udev->l2_buf) {
1039		__cnic_free_uio_rings(udev);
1040		return -ENOMEM;
1041	}
1042
1043	return 0;
1044
1045}
1046
1047static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1048{
1049	struct cnic_local *cp = dev->cnic_priv;
1050	struct cnic_uio_dev *udev;
1051
 
1052	list_for_each_entry(udev, &cnic_udev_list, list) {
1053		if (udev->pdev == dev->pcidev) {
1054			udev->dev = dev;
1055			if (__cnic_alloc_uio_rings(udev, pages)) {
1056				udev->dev = NULL;
1057				return -ENOMEM;
1058			}
1059			cp->udev = udev;
 
1060			return 0;
1061		}
1062	}
 
1063
1064	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1065	if (!udev)
1066		return -ENOMEM;
1067
1068	udev->uio_dev = -1;
1069
1070	udev->dev = dev;
1071	udev->pdev = dev->pcidev;
1072
1073	if (__cnic_alloc_uio_rings(udev, pages))
 
 
 
1074		goto err_udev;
1075
 
 
 
 
 
 
 
 
 
1076	list_add(&udev->list, &cnic_udev_list);
 
1077
1078	pci_dev_get(udev->pdev);
1079
1080	cp->udev = udev;
1081
1082	return 0;
1083
 
 
1084 err_udev:
1085	kfree(udev);
1086	return -ENOMEM;
1087}
1088
1089static int cnic_init_uio(struct cnic_dev *dev)
1090{
1091	struct cnic_local *cp = dev->cnic_priv;
1092	struct cnic_uio_dev *udev = cp->udev;
1093	struct uio_info *uinfo;
1094	int ret = 0;
1095
1096	if (!udev)
1097		return -ENOMEM;
1098
1099	uinfo = &udev->cnic_uinfo;
1100
1101	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102	uinfo->mem[0].internal_addr = dev->regview;
1103	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1104
1105	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107						     TX_MAX_TSS_RINGS + 1);
1108		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1109					CNIC_PAGE_MASK;
1110		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1111			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1112		else
1113			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1114
1115		uinfo->name = "bnx2_cnic";
1116	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1117		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1118
1119		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1120			CNIC_PAGE_MASK;
1121		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1122
1123		uinfo->name = "bnx2x_cnic";
1124	}
1125
1126	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1127
1128	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1129	uinfo->mem[2].size = udev->l2_ring_size;
1130	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1131
1132	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1133	uinfo->mem[3].size = udev->l2_buf_size;
1134	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1135
1136	uinfo->version = CNIC_MODULE_VERSION;
1137	uinfo->irq = UIO_IRQ_CUSTOM;
1138
1139	uinfo->open = cnic_uio_open;
1140	uinfo->release = cnic_uio_close;
1141
1142	if (udev->uio_dev == -1) {
1143		if (!uinfo->priv) {
1144			uinfo->priv = udev;
1145
1146			ret = uio_register_device(&udev->pdev->dev, uinfo);
1147		}
1148	} else {
1149		cnic_init_rings(dev);
1150	}
1151
1152	return ret;
1153}
1154
1155static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1156{
1157	struct cnic_local *cp = dev->cnic_priv;
1158	int ret;
1159
1160	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1161	if (ret)
1162		goto error;
1163	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1164
1165	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1166	if (ret)
1167		goto error;
1168
1169	ret = cnic_alloc_context(dev);
1170	if (ret)
1171		goto error;
1172
1173	ret = cnic_alloc_uio_rings(dev, 2);
1174	if (ret)
1175		goto error;
1176
1177	ret = cnic_init_uio(dev);
1178	if (ret)
1179		goto error;
1180
1181	return 0;
1182
1183error:
1184	cnic_free_resc(dev);
1185	return ret;
1186}
1187
1188static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1189{
1190	struct cnic_local *cp = dev->cnic_priv;
1191	struct bnx2x *bp = netdev_priv(dev->netdev);
1192	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1193	int total_mem, blks, i;
1194
1195	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1196	blks = total_mem / ctx_blk_size;
1197	if (total_mem % ctx_blk_size)
1198		blks++;
1199
1200	if (blks > cp->ethdev->ctx_tbl_len)
1201		return -ENOMEM;
1202
1203	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1204	if (cp->ctx_arr == NULL)
1205		return -ENOMEM;
1206
1207	cp->ctx_blks = blks;
1208	cp->ctx_blk_size = ctx_blk_size;
1209	if (!CHIP_IS_E1(bp))
1210		cp->ctx_align = 0;
1211	else
1212		cp->ctx_align = ctx_blk_size;
1213
1214	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1215
1216	for (i = 0; i < blks; i++) {
1217		cp->ctx_arr[i].ctx =
1218			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1219					   &cp->ctx_arr[i].mapping,
1220					   GFP_KERNEL);
1221		if (cp->ctx_arr[i].ctx == NULL)
1222			return -ENOMEM;
1223
1224		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1225			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1226				cnic_free_context(dev);
1227				cp->ctx_blk_size += cp->ctx_align;
1228				i = -1;
1229				continue;
1230			}
1231		}
1232	}
1233	return 0;
1234}
1235
1236static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1237{
1238	struct cnic_local *cp = dev->cnic_priv;
1239	struct bnx2x *bp = netdev_priv(dev->netdev);
1240	struct cnic_eth_dev *ethdev = cp->ethdev;
1241	u32 start_cid = ethdev->starting_cid;
1242	int i, j, n, ret, pages;
1243	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1244
 
 
1245	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1246	cp->iscsi_start_cid = start_cid;
1247	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1248
1249	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1250		cp->max_cid_space += dev->max_fcoe_conn;
1251		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1252		if (!cp->fcoe_init_cid)
1253			cp->fcoe_init_cid = 0x10;
1254	}
1255
1256	cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1257				GFP_KERNEL);
1258	if (!cp->iscsi_tbl)
1259		goto error;
1260
1261	cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1262			      GFP_KERNEL);
1263	if (!cp->ctx_tbl)
1264		goto error;
1265
1266	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1267		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1268		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1269	}
1270
1271	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1272		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1273
1274	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1275		CNIC_PAGE_SIZE;
1276
1277	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1278	if (ret)
1279		goto error;
1280
1281	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1282	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1283		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1284
1285		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1286		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1287						   off;
1288
1289		if ((i % n) == (n - 1))
1290			j++;
1291	}
1292
1293	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1294	if (ret)
1295		goto error;
1296
1297	if (CNIC_SUPPORTS_FCOE(bp)) {
1298		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1299		if (ret)
1300			goto error;
1301	}
1302
1303	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1304	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1305	if (ret)
1306		goto error;
1307
1308	ret = cnic_alloc_bnx2x_context(dev);
1309	if (ret)
1310		goto error;
1311
1312	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1313		return 0;
1314
1315	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1316
1317	cp->l2_rx_ring_size = 15;
1318
1319	ret = cnic_alloc_uio_rings(dev, 4);
1320	if (ret)
1321		goto error;
1322
1323	ret = cnic_init_uio(dev);
1324	if (ret)
1325		goto error;
1326
1327	return 0;
1328
1329error:
1330	cnic_free_resc(dev);
1331	return -ENOMEM;
1332}
1333
1334static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1335{
1336	return cp->max_kwq_idx -
1337		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1338}
1339
1340static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1341				  u32 num_wqes)
1342{
1343	struct cnic_local *cp = dev->cnic_priv;
1344	struct kwqe *prod_qe;
1345	u16 prod, sw_prod, i;
1346
1347	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1348		return -EAGAIN;		/* bnx2 is down */
1349
1350	spin_lock_bh(&cp->cnic_ulp_lock);
1351	if (num_wqes > cnic_kwq_avail(cp) &&
1352	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1353		spin_unlock_bh(&cp->cnic_ulp_lock);
1354		return -EAGAIN;
1355	}
1356
1357	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1358
1359	prod = cp->kwq_prod_idx;
1360	sw_prod = prod & MAX_KWQ_IDX;
1361	for (i = 0; i < num_wqes; i++) {
1362		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1363		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1364		prod++;
1365		sw_prod = prod & MAX_KWQ_IDX;
1366	}
1367	cp->kwq_prod_idx = prod;
1368
1369	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1370
1371	spin_unlock_bh(&cp->cnic_ulp_lock);
1372	return 0;
1373}
1374
1375static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1376				   union l5cm_specific_data *l5_data)
1377{
1378	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1379	dma_addr_t map;
1380
1381	map = ctx->kwqe_data_mapping;
1382	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1383	l5_data->phy_address.hi = (u64) map >> 32;
1384	return ctx->kwqe_data;
1385}
1386
1387static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1388				u32 type, union l5cm_specific_data *l5_data)
1389{
1390	struct cnic_local *cp = dev->cnic_priv;
1391	struct bnx2x *bp = netdev_priv(dev->netdev);
1392	struct l5cm_spe kwqe;
1393	struct kwqe_16 *kwq[1];
1394	u16 type_16;
1395	int ret;
1396
1397	kwqe.hdr.conn_and_cmd_data =
1398		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1399			     BNX2X_HW_CID(bp, cid)));
1400
1401	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1402	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1403		   SPE_HDR_FUNCTION_ID;
1404
1405	kwqe.hdr.type = cpu_to_le16(type_16);
1406	kwqe.hdr.reserved1 = 0;
1407	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1408	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1409
1410	kwq[0] = (struct kwqe_16 *) &kwqe;
1411
1412	spin_lock_bh(&cp->cnic_ulp_lock);
1413	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1414	spin_unlock_bh(&cp->cnic_ulp_lock);
1415
1416	if (ret == 1)
1417		return 0;
1418
1419	return ret;
1420}
1421
1422static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1423				   struct kcqe *cqes[], u32 num_cqes)
1424{
1425	struct cnic_local *cp = dev->cnic_priv;
1426	struct cnic_ulp_ops *ulp_ops;
1427
1428	rcu_read_lock();
1429	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1430	if (likely(ulp_ops)) {
1431		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1432					  cqes, num_cqes);
1433	}
1434	rcu_read_unlock();
1435}
1436
1437static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1438				       int en_tcp_dack)
1439{
1440	struct bnx2x *bp = netdev_priv(dev->netdev);
1441	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1442	u16 tstorm_flags = 0;
1443
1444	if (time_stamps) {
1445		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1446		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1447	}
1448	if (en_tcp_dack)
1449		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1450
1451	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1452		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1453
1454	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1455		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1456}
1457
1458static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1459{
1460	struct cnic_local *cp = dev->cnic_priv;
1461	struct bnx2x *bp = netdev_priv(dev->netdev);
1462	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1463	int hq_bds, pages;
1464	u32 pfid = bp->pfid;
1465
1466	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1467	cp->num_ccells = req1->num_ccells_per_conn;
1468	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1469			      cp->num_iscsi_tasks;
1470	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1471			BNX2X_ISCSI_R2TQE_SIZE;
1472	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1473	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1474	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1475	cp->num_cqs = req1->num_cqs;
1476
1477	if (!dev->max_iscsi_conn)
1478		return 0;
1479
1480	/* init Tstorm RAM */
1481	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1482		  req1->rq_num_wqes);
1483	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1484		  CNIC_PAGE_SIZE);
1485	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1486		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1487	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1488		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1489		  req1->num_tasks_per_conn);
1490
1491	/* init Ustorm RAM */
1492	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1493		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1494		  req1->rq_buffer_size);
1495	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1496		  CNIC_PAGE_SIZE);
1497	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1498		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1499	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1500		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1501		  req1->num_tasks_per_conn);
1502	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1503		  req1->rq_num_wqes);
1504	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1505		  req1->cq_num_wqes);
1506	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1507		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1508
1509	/* init Xstorm RAM */
1510	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1511		  CNIC_PAGE_SIZE);
1512	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1513		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1514	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1515		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1516		  req1->num_tasks_per_conn);
1517	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1518		  hq_bds);
1519	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1520		  req1->num_tasks_per_conn);
1521	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1522		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1523
1524	/* init Cstorm RAM */
1525	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1526		  CNIC_PAGE_SIZE);
1527	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1528		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1529	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1530		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1531		  req1->num_tasks_per_conn);
1532	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1533		  req1->cq_num_wqes);
1534	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1535		  hq_bds);
1536
1537	cnic_bnx2x_set_tcp_options(dev,
1538			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1539			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1540
1541	return 0;
1542}
1543
1544static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1545{
1546	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1547	struct bnx2x *bp = netdev_priv(dev->netdev);
1548	u32 pfid = bp->pfid;
1549	struct iscsi_kcqe kcqe;
1550	struct kcqe *cqes[1];
1551
1552	memset(&kcqe, 0, sizeof(kcqe));
1553	if (!dev->max_iscsi_conn) {
1554		kcqe.completion_status =
1555			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1556		goto done;
1557	}
1558
1559	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1560		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1561	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1563		req2->error_bit_map[1]);
1564
1565	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1566		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1567	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1568		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571		req2->error_bit_map[1]);
1572
1573	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1574		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1575
1576	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1577
1578done:
1579	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1580	cqes[0] = (struct kcqe *) &kcqe;
1581	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1582
1583	return 0;
1584}
1585
1586static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1587{
1588	struct cnic_local *cp = dev->cnic_priv;
1589	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1590
1591	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1592		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1593
1594		cnic_free_dma(dev, &iscsi->hq_info);
1595		cnic_free_dma(dev, &iscsi->r2tq_info);
1596		cnic_free_dma(dev, &iscsi->task_array_info);
1597		cnic_free_id(&cp->cid_tbl, ctx->cid);
1598	} else {
1599		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1600	}
1601
1602	ctx->cid = 0;
1603}
1604
1605static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1606{
1607	u32 cid;
1608	int ret, pages;
1609	struct cnic_local *cp = dev->cnic_priv;
1610	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1611	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1612
1613	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1614		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1615		if (cid == -1) {
1616			ret = -ENOMEM;
1617			goto error;
1618		}
1619		ctx->cid = cid;
1620		return 0;
1621	}
1622
1623	cid = cnic_alloc_new_id(&cp->cid_tbl);
1624	if (cid == -1) {
1625		ret = -ENOMEM;
1626		goto error;
1627	}
1628
1629	ctx->cid = cid;
1630	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1631
1632	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1633	if (ret)
1634		goto error;
1635
1636	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1637	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1638	if (ret)
1639		goto error;
1640
1641	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1642	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1643	if (ret)
1644		goto error;
1645
1646	return 0;
1647
1648error:
1649	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1650	return ret;
1651}
1652
1653static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1654				struct regpair *ctx_addr)
1655{
1656	struct cnic_local *cp = dev->cnic_priv;
1657	struct cnic_eth_dev *ethdev = cp->ethdev;
1658	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1659	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1660	unsigned long align_off = 0;
1661	dma_addr_t ctx_map;
1662	void *ctx;
1663
1664	if (cp->ctx_align) {
1665		unsigned long mask = cp->ctx_align - 1;
1666
1667		if (cp->ctx_arr[blk].mapping & mask)
1668			align_off = cp->ctx_align -
1669				    (cp->ctx_arr[blk].mapping & mask);
1670	}
1671	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1672		(off * BNX2X_CONTEXT_MEM_SIZE);
1673	ctx = cp->ctx_arr[blk].ctx + align_off +
1674	      (off * BNX2X_CONTEXT_MEM_SIZE);
1675	if (init)
1676		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1677
1678	ctx_addr->lo = ctx_map & 0xffffffff;
1679	ctx_addr->hi = (u64) ctx_map >> 32;
1680	return ctx;
1681}
1682
1683static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1684				u32 num)
1685{
1686	struct cnic_local *cp = dev->cnic_priv;
1687	struct bnx2x *bp = netdev_priv(dev->netdev);
1688	struct iscsi_kwqe_conn_offload1 *req1 =
1689			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1690	struct iscsi_kwqe_conn_offload2 *req2 =
1691			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1692	struct iscsi_kwqe_conn_offload3 *req3;
1693	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1694	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1695	u32 cid = ctx->cid;
1696	u32 hw_cid = BNX2X_HW_CID(bp, cid);
1697	struct iscsi_context *ictx;
1698	struct regpair context_addr;
1699	int i, j, n = 2, n_max;
1700	u8 port = BP_PORT(bp);
1701
1702	ctx->ctx_flags = 0;
1703	if (!req2->num_additional_wqes)
1704		return -EINVAL;
1705
1706	n_max = req2->num_additional_wqes + 2;
1707
1708	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1709	if (ictx == NULL)
1710		return -ENOMEM;
1711
1712	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1713
1714	ictx->xstorm_ag_context.hq_prod = 1;
1715
1716	ictx->xstorm_st_context.iscsi.first_burst_length =
1717		ISCSI_DEF_FIRST_BURST_LEN;
1718	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1719		ISCSI_DEF_MAX_RECV_SEG_LEN;
1720	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1721		req1->sq_page_table_addr_lo;
1722	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1723		req1->sq_page_table_addr_hi;
1724	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1725	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1726	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1727		iscsi->hq_info.pgtbl_map & 0xffffffff;
1728	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1729		(u64) iscsi->hq_info.pgtbl_map >> 32;
1730	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1731		iscsi->hq_info.pgtbl[0];
1732	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1733		iscsi->hq_info.pgtbl[1];
1734	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1735		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1736	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1737		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1738	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1739		iscsi->r2tq_info.pgtbl[0];
1740	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1741		iscsi->r2tq_info.pgtbl[1];
1742	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1743		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1744	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1745		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1746	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1747		BNX2X_ISCSI_PBL_NOT_CACHED;
1748	ictx->xstorm_st_context.iscsi.flags.flags |=
1749		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1750	ictx->xstorm_st_context.iscsi.flags.flags |=
1751		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1752	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1753		ETH_P_8021Q;
1754	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1755	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1756
1757		port = 0;
1758	}
1759	ictx->xstorm_st_context.common.flags =
1760		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1761	ictx->xstorm_st_context.common.flags =
1762		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1763
1764	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1765	/* TSTORM requires the base address of RQ DB & not PTE */
1766	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1767		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1768	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1769		req2->rq_page_table_addr_hi;
1770	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1771	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1772	ictx->tstorm_st_context.tcp.flags2 |=
1773		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1774	ictx->tstorm_st_context.tcp.ooo_support_mode =
1775		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1776
1777	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1778
1779	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1780		req2->rq_page_table_addr_lo;
1781	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1782		req2->rq_page_table_addr_hi;
1783	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1784	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1785	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1786		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1787	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1788		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1789	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1790		iscsi->r2tq_info.pgtbl[0];
1791	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1792		iscsi->r2tq_info.pgtbl[1];
1793	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1794		req1->cq_page_table_addr_lo;
1795	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1796		req1->cq_page_table_addr_hi;
1797	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1798	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1799	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1800	ictx->ustorm_st_context.task_pbe_cache_index =
1801		BNX2X_ISCSI_PBL_NOT_CACHED;
1802	ictx->ustorm_st_context.task_pdu_cache_index =
1803		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1804
1805	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1806		if (j == 3) {
1807			if (n >= n_max)
1808				break;
1809			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1810			j = 0;
1811		}
1812		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1813		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1814			req3->qp_first_pte[j].hi;
1815		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1816			req3->qp_first_pte[j].lo;
1817	}
1818
1819	ictx->ustorm_st_context.task_pbl_base.lo =
1820		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1821	ictx->ustorm_st_context.task_pbl_base.hi =
1822		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1823	ictx->ustorm_st_context.tce_phy_addr.lo =
1824		iscsi->task_array_info.pgtbl[0];
1825	ictx->ustorm_st_context.tce_phy_addr.hi =
1826		iscsi->task_array_info.pgtbl[1];
1827	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1828	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1829	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1830	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1831		ISCSI_DEF_MAX_BURST_LEN;
1832	ictx->ustorm_st_context.negotiated_rx |=
1833		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1834		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1835
1836	ictx->cstorm_st_context.hq_pbl_base.lo =
1837		iscsi->hq_info.pgtbl_map & 0xffffffff;
1838	ictx->cstorm_st_context.hq_pbl_base.hi =
1839		(u64) iscsi->hq_info.pgtbl_map >> 32;
1840	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1841	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1842	ictx->cstorm_st_context.task_pbl_base.lo =
1843		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1844	ictx->cstorm_st_context.task_pbl_base.hi =
1845		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1846	/* CSTORM and USTORM initialization is different, CSTORM requires
1847	 * CQ DB base & not PTE addr */
1848	ictx->cstorm_st_context.cq_db_base.lo =
1849		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1850	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1851	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1852	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1853	for (i = 0; i < cp->num_cqs; i++) {
1854		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1855			ISCSI_INITIAL_SN;
1856		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1857			ISCSI_INITIAL_SN;
1858	}
1859
1860	ictx->xstorm_ag_context.cdu_reserved =
1861		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1862				       ISCSI_CONNECTION_TYPE);
1863	ictx->ustorm_ag_context.cdu_usage =
1864		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1865				       ISCSI_CONNECTION_TYPE);
1866	return 0;
1867
1868}
1869
1870static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1871				   u32 num, int *work)
1872{
1873	struct iscsi_kwqe_conn_offload1 *req1;
1874	struct iscsi_kwqe_conn_offload2 *req2;
1875	struct cnic_local *cp = dev->cnic_priv;
1876	struct bnx2x *bp = netdev_priv(dev->netdev);
1877	struct cnic_context *ctx;
1878	struct iscsi_kcqe kcqe;
1879	struct kcqe *cqes[1];
1880	u32 l5_cid;
1881	int ret = 0;
1882
1883	if (num < 2) {
1884		*work = num;
1885		return -EINVAL;
1886	}
1887
1888	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1889	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1890	if ((num - 2) < req2->num_additional_wqes) {
1891		*work = num;
1892		return -EINVAL;
1893	}
1894	*work = 2 + req2->num_additional_wqes;
1895
1896	l5_cid = req1->iscsi_conn_id;
1897	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1898		return -EINVAL;
1899
1900	memset(&kcqe, 0, sizeof(kcqe));
1901	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1902	kcqe.iscsi_conn_id = l5_cid;
1903	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1904
1905	ctx = &cp->ctx_tbl[l5_cid];
1906	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1907		kcqe.completion_status =
1908			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1909		goto done;
1910	}
1911
1912	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1913		atomic_dec(&cp->iscsi_conn);
1914		goto done;
1915	}
1916	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1917	if (ret) {
1918		atomic_dec(&cp->iscsi_conn);
 
1919		goto done;
1920	}
1921	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1922	if (ret < 0) {
1923		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1924		atomic_dec(&cp->iscsi_conn);
1925		goto done;
1926	}
1927
1928	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1929	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1930
1931done:
1932	cqes[0] = (struct kcqe *) &kcqe;
1933	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1934	return 0;
1935}
1936
1937
1938static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1939{
1940	struct cnic_local *cp = dev->cnic_priv;
1941	struct iscsi_kwqe_conn_update *req =
1942		(struct iscsi_kwqe_conn_update *) kwqe;
1943	void *data;
1944	union l5cm_specific_data l5_data;
1945	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1946	int ret;
1947
1948	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1949		return -EINVAL;
1950
1951	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1952	if (!data)
1953		return -ENOMEM;
1954
1955	memcpy(data, kwqe, sizeof(struct kwqe));
1956
1957	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1958			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1959	return ret;
1960}
1961
1962static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1963{
1964	struct cnic_local *cp = dev->cnic_priv;
1965	struct bnx2x *bp = netdev_priv(dev->netdev);
1966	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1967	union l5cm_specific_data l5_data;
1968	int ret;
1969	u32 hw_cid;
1970
1971	init_waitqueue_head(&ctx->waitq);
1972	ctx->wait_cond = 0;
1973	memset(&l5_data, 0, sizeof(l5_data));
1974	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1975
1976	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1977				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1978
1979	if (ret == 0) {
1980		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1981		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1982			return -EBUSY;
1983	}
1984
1985	return 0;
1986}
1987
1988static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1989{
1990	struct cnic_local *cp = dev->cnic_priv;
1991	struct iscsi_kwqe_conn_destroy *req =
1992		(struct iscsi_kwqe_conn_destroy *) kwqe;
1993	u32 l5_cid = req->reserved0;
1994	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1995	int ret = 0;
1996	struct iscsi_kcqe kcqe;
1997	struct kcqe *cqes[1];
1998
1999	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2000		goto skip_cfc_delete;
2001
2002	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2003		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2004
2005		if (delta > (2 * HZ))
2006			delta = 0;
2007
2008		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2009		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2010		goto destroy_reply;
2011	}
2012
2013	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2014
2015skip_cfc_delete:
2016	cnic_free_bnx2x_conn_resc(dev, l5_cid);
2017
2018	if (!ret) {
2019		atomic_dec(&cp->iscsi_conn);
2020		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2021	}
2022
2023destroy_reply:
2024	memset(&kcqe, 0, sizeof(kcqe));
2025	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2026	kcqe.iscsi_conn_id = l5_cid;
2027	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2028	kcqe.iscsi_conn_context_id = req->context_id;
2029
2030	cqes[0] = (struct kcqe *) &kcqe;
2031	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2032
2033	return 0;
2034}
2035
2036static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2037				      struct l4_kwq_connect_req1 *kwqe1,
2038				      struct l4_kwq_connect_req3 *kwqe3,
2039				      struct l5cm_active_conn_buffer *conn_buf)
2040{
2041	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2042	struct l5cm_xstorm_conn_buffer *xstorm_buf =
2043		&conn_buf->xstorm_conn_buffer;
2044	struct l5cm_tstorm_conn_buffer *tstorm_buf =
2045		&conn_buf->tstorm_conn_buffer;
2046	struct regpair context_addr;
2047	u32 cid = BNX2X_SW_CID(kwqe1->cid);
2048	struct in6_addr src_ip, dst_ip;
2049	int i;
2050	u32 *addrp;
2051
2052	addrp = (u32 *) &conn_addr->local_ip_addr;
2053	for (i = 0; i < 4; i++, addrp++)
2054		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2055
2056	addrp = (u32 *) &conn_addr->remote_ip_addr;
2057	for (i = 0; i < 4; i++, addrp++)
2058		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2059
2060	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2061
2062	xstorm_buf->context_addr.hi = context_addr.hi;
2063	xstorm_buf->context_addr.lo = context_addr.lo;
2064	xstorm_buf->mss = 0xffff;
2065	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2066	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2067		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2068	xstorm_buf->pseudo_header_checksum =
2069		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2070
 
 
 
2071	if (kwqe3->ka_timeout) {
2072		tstorm_buf->ka_enable = 1;
2073		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2074		tstorm_buf->ka_interval = kwqe3->ka_interval;
2075		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2076	}
2077	tstorm_buf->max_rt_time = 0xffffffff;
2078}
2079
2080static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2081{
2082	struct bnx2x *bp = netdev_priv(dev->netdev);
2083	u32 pfid = bp->pfid;
2084	u8 *mac = dev->mac_addr;
2085
2086	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2087		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2088	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2089		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2090	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2091		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2092	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2094	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2096	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2098
2099	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2101	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2103		 mac[4]);
2104	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2106	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2108		 mac[2]);
2109	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2111	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2112		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2113		 mac[0]);
2114}
2115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2116static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2117			      u32 num, int *work)
2118{
2119	struct cnic_local *cp = dev->cnic_priv;
2120	struct bnx2x *bp = netdev_priv(dev->netdev);
2121	struct l4_kwq_connect_req1 *kwqe1 =
2122		(struct l4_kwq_connect_req1 *) wqes[0];
2123	struct l4_kwq_connect_req3 *kwqe3;
2124	struct l5cm_active_conn_buffer *conn_buf;
2125	struct l5cm_conn_addr_params *conn_addr;
2126	union l5cm_specific_data l5_data;
2127	u32 l5_cid = kwqe1->pg_cid;
2128	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2129	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2130	int ret;
2131
2132	if (num < 2) {
2133		*work = num;
2134		return -EINVAL;
2135	}
2136
2137	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2138		*work = 3;
2139	else
2140		*work = 2;
2141
2142	if (num < *work) {
2143		*work = num;
2144		return -EINVAL;
2145	}
2146
2147	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2148		netdev_err(dev->netdev, "conn_buf size too big\n");
2149		return -ENOMEM;
2150	}
2151	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2152	if (!conn_buf)
2153		return -ENOMEM;
2154
2155	memset(conn_buf, 0, sizeof(*conn_buf));
2156
2157	conn_addr = &conn_buf->conn_addr_buf;
2158	conn_addr->remote_addr_0 = csk->ha[0];
2159	conn_addr->remote_addr_1 = csk->ha[1];
2160	conn_addr->remote_addr_2 = csk->ha[2];
2161	conn_addr->remote_addr_3 = csk->ha[3];
2162	conn_addr->remote_addr_4 = csk->ha[4];
2163	conn_addr->remote_addr_5 = csk->ha[5];
2164
2165	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2166		struct l4_kwq_connect_req2 *kwqe2 =
2167			(struct l4_kwq_connect_req2 *) wqes[1];
2168
2169		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2170		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2171		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2172
2173		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2174		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2175		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2176		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2177	}
2178	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2179
2180	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2181	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2182	conn_addr->local_tcp_port = kwqe1->src_port;
2183	conn_addr->remote_tcp_port = kwqe1->dst_port;
2184
2185	conn_addr->pmtu = kwqe3->pmtu;
2186	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2187
2188	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2189		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
 
 
 
2190
2191	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2192			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2193	if (!ret)
2194		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2195
2196	return ret;
2197}
2198
2199static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2200{
2201	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2202	union l5cm_specific_data l5_data;
2203	int ret;
2204
2205	memset(&l5_data, 0, sizeof(l5_data));
2206	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2207			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2208	return ret;
2209}
2210
2211static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2212{
2213	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2214	union l5cm_specific_data l5_data;
2215	int ret;
2216
2217	memset(&l5_data, 0, sizeof(l5_data));
2218	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2219			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2220	return ret;
2221}
2222static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2223{
2224	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2225	struct l4_kcq kcqe;
2226	struct kcqe *cqes[1];
2227
2228	memset(&kcqe, 0, sizeof(kcqe));
2229	kcqe.pg_host_opaque = req->host_opaque;
2230	kcqe.pg_cid = req->host_opaque;
2231	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2232	cqes[0] = (struct kcqe *) &kcqe;
2233	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2234	return 0;
2235}
2236
2237static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2238{
2239	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2240	struct l4_kcq kcqe;
2241	struct kcqe *cqes[1];
2242
2243	memset(&kcqe, 0, sizeof(kcqe));
2244	kcqe.pg_host_opaque = req->pg_host_opaque;
2245	kcqe.pg_cid = req->pg_cid;
2246	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2247	cqes[0] = (struct kcqe *) &kcqe;
2248	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2249	return 0;
2250}
2251
2252static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2253{
2254	struct fcoe_kwqe_stat *req;
2255	struct fcoe_stat_ramrod_params *fcoe_stat;
2256	union l5cm_specific_data l5_data;
2257	struct cnic_local *cp = dev->cnic_priv;
2258	struct bnx2x *bp = netdev_priv(dev->netdev);
2259	int ret;
2260	u32 cid;
2261
2262	req = (struct fcoe_kwqe_stat *) kwqe;
2263	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2264
2265	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2266	if (!fcoe_stat)
2267		return -ENOMEM;
2268
2269	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2270	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2271
2272	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2273				  FCOE_CONNECTION_TYPE, &l5_data);
2274	return ret;
2275}
2276
2277static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2278				 u32 num, int *work)
2279{
2280	int ret;
2281	struct cnic_local *cp = dev->cnic_priv;
2282	struct bnx2x *bp = netdev_priv(dev->netdev);
2283	u32 cid;
2284	struct fcoe_init_ramrod_params *fcoe_init;
2285	struct fcoe_kwqe_init1 *req1;
2286	struct fcoe_kwqe_init2 *req2;
2287	struct fcoe_kwqe_init3 *req3;
2288	union l5cm_specific_data l5_data;
2289
2290	if (num < 3) {
2291		*work = num;
2292		return -EINVAL;
2293	}
2294	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2295	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2296	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2297	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2298		*work = 1;
2299		return -EINVAL;
2300	}
2301	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2302		*work = 2;
2303		return -EINVAL;
2304	}
2305
2306	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2307		netdev_err(dev->netdev, "fcoe_init size too big\n");
2308		return -ENOMEM;
2309	}
2310	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2311	if (!fcoe_init)
2312		return -ENOMEM;
2313
2314	memset(fcoe_init, 0, sizeof(*fcoe_init));
2315	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2316	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2317	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2318	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2319	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2320	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2321
2322	fcoe_init->sb_num = cp->status_blk_num;
2323	fcoe_init->eq_prod = MAX_KCQ_IDX;
2324	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2325	cp->kcq2.sw_prod_idx = 0;
2326
2327	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2328	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2329				  FCOE_CONNECTION_TYPE, &l5_data);
2330	*work = 3;
2331	return ret;
2332}
2333
2334static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2335				 u32 num, int *work)
2336{
2337	int ret = 0;
2338	u32 cid = -1, l5_cid;
2339	struct cnic_local *cp = dev->cnic_priv;
2340	struct bnx2x *bp = netdev_priv(dev->netdev);
2341	struct fcoe_kwqe_conn_offload1 *req1;
2342	struct fcoe_kwqe_conn_offload2 *req2;
2343	struct fcoe_kwqe_conn_offload3 *req3;
2344	struct fcoe_kwqe_conn_offload4 *req4;
2345	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2346	struct cnic_context *ctx;
2347	struct fcoe_context *fctx;
2348	struct regpair ctx_addr;
2349	union l5cm_specific_data l5_data;
2350	struct fcoe_kcqe kcqe;
2351	struct kcqe *cqes[1];
2352
2353	if (num < 4) {
2354		*work = num;
2355		return -EINVAL;
2356	}
2357	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2358	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2359	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2360	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2361
2362	*work = 4;
2363
2364	l5_cid = req1->fcoe_conn_id;
2365	if (l5_cid >= dev->max_fcoe_conn)
2366		goto err_reply;
2367
2368	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2369
2370	ctx = &cp->ctx_tbl[l5_cid];
2371	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2372		goto err_reply;
2373
2374	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2375	if (ret) {
2376		ret = 0;
2377		goto err_reply;
2378	}
2379	cid = ctx->cid;
2380
2381	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2382	if (fctx) {
2383		u32 hw_cid = BNX2X_HW_CID(bp, cid);
2384		u32 val;
2385
2386		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2387					     FCOE_CONNECTION_TYPE);
2388		fctx->xstorm_ag_context.cdu_reserved = val;
2389		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2390					     FCOE_CONNECTION_TYPE);
2391		fctx->ustorm_ag_context.cdu_usage = val;
2392	}
2393	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2394		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2395		goto err_reply;
2396	}
2397	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2398	if (!fcoe_offload)
2399		goto err_reply;
2400
2401	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2402	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2403	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2404	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2405	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2406
2407	cid = BNX2X_HW_CID(bp, cid);
2408	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2409				  FCOE_CONNECTION_TYPE, &l5_data);
2410	if (!ret)
2411		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2412
2413	return ret;
2414
2415err_reply:
2416	if (cid != -1)
2417		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2418
2419	memset(&kcqe, 0, sizeof(kcqe));
2420	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2421	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2422	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2423
2424	cqes[0] = (struct kcqe *) &kcqe;
2425	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2426	return ret;
2427}
2428
2429static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2430{
2431	struct fcoe_kwqe_conn_enable_disable *req;
2432	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2433	union l5cm_specific_data l5_data;
2434	int ret;
2435	u32 cid, l5_cid;
2436	struct cnic_local *cp = dev->cnic_priv;
2437
2438	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2439	cid = req->context_id;
2440	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2441
2442	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2443		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2444		return -ENOMEM;
2445	}
2446	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2447	if (!fcoe_enable)
2448		return -ENOMEM;
2449
2450	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2451	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2452	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2453				  FCOE_CONNECTION_TYPE, &l5_data);
2454	return ret;
2455}
2456
2457static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2458{
2459	struct fcoe_kwqe_conn_enable_disable *req;
2460	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2461	union l5cm_specific_data l5_data;
2462	int ret;
2463	u32 cid, l5_cid;
2464	struct cnic_local *cp = dev->cnic_priv;
2465
2466	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2467	cid = req->context_id;
2468	l5_cid = req->conn_id;
2469	if (l5_cid >= dev->max_fcoe_conn)
2470		return -EINVAL;
2471
2472	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2473
2474	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2475		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2476		return -ENOMEM;
2477	}
2478	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2479	if (!fcoe_disable)
2480		return -ENOMEM;
2481
2482	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2483	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2484	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2485				  FCOE_CONNECTION_TYPE, &l5_data);
2486	return ret;
2487}
2488
2489static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2490{
2491	struct fcoe_kwqe_conn_destroy *req;
2492	union l5cm_specific_data l5_data;
2493	int ret;
2494	u32 cid, l5_cid;
2495	struct cnic_local *cp = dev->cnic_priv;
2496	struct cnic_context *ctx;
2497	struct fcoe_kcqe kcqe;
2498	struct kcqe *cqes[1];
2499
2500	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2501	cid = req->context_id;
2502	l5_cid = req->conn_id;
2503	if (l5_cid >= dev->max_fcoe_conn)
2504		return -EINVAL;
2505
2506	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2507
2508	ctx = &cp->ctx_tbl[l5_cid];
2509
2510	init_waitqueue_head(&ctx->waitq);
2511	ctx->wait_cond = 0;
2512
2513	memset(&kcqe, 0, sizeof(kcqe));
2514	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2515	memset(&l5_data, 0, sizeof(l5_data));
2516	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2517				  FCOE_CONNECTION_TYPE, &l5_data);
2518	if (ret == 0) {
2519		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2520		if (ctx->wait_cond)
2521			kcqe.completion_status = 0;
2522	}
2523
2524	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2525	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2526
2527	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2528	kcqe.fcoe_conn_id = req->conn_id;
2529	kcqe.fcoe_conn_context_id = cid;
2530
2531	cqes[0] = (struct kcqe *) &kcqe;
2532	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2533	return ret;
2534}
2535
2536static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2537{
2538	struct cnic_local *cp = dev->cnic_priv;
2539	u32 i;
2540
2541	for (i = start_cid; i < cp->max_cid_space; i++) {
2542		struct cnic_context *ctx = &cp->ctx_tbl[i];
2543		int j;
2544
2545		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2546			msleep(10);
2547
2548		for (j = 0; j < 5; j++) {
2549			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2550				break;
2551			msleep(20);
2552		}
2553
2554		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2555			netdev_warn(dev->netdev, "CID %x not deleted\n",
2556				   ctx->cid);
2557	}
2558}
2559
2560static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2561{
 
2562	union l5cm_specific_data l5_data;
2563	struct cnic_local *cp = dev->cnic_priv;
2564	struct bnx2x *bp = netdev_priv(dev->netdev);
2565	int ret;
2566	u32 cid;
2567
2568	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2569
2570	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
 
2571
2572	memset(&l5_data, 0, sizeof(l5_data));
2573	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2574				  FCOE_CONNECTION_TYPE, &l5_data);
2575	return ret;
2576}
2577
2578static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2579{
2580	struct cnic_local *cp = dev->cnic_priv;
2581	struct kcqe kcqe;
2582	struct kcqe *cqes[1];
2583	u32 cid;
2584	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2585	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2586	u32 kcqe_op;
2587	int ulp_type;
2588
2589	cid = kwqe->kwqe_info0;
2590	memset(&kcqe, 0, sizeof(kcqe));
2591
2592	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2593		u32 l5_cid = 0;
2594
2595		ulp_type = CNIC_ULP_FCOE;
2596		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2597			struct fcoe_kwqe_conn_enable_disable *req;
2598
2599			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2600			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2601			cid = req->context_id;
2602			l5_cid = req->conn_id;
2603		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2604			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2605		} else {
2606			return;
2607		}
2608		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2609		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2610		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2611		kcqe.kcqe_info2 = cid;
2612		kcqe.kcqe_info0 = l5_cid;
2613
2614	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2615		ulp_type = CNIC_ULP_ISCSI;
2616		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2617			cid = kwqe->kwqe_info1;
2618
2619		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2620		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2621		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2622		kcqe.kcqe_info2 = cid;
2623		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2624
2625	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2626		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2627
2628		ulp_type = CNIC_ULP_L4;
2629		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2630			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2631		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2632			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2633		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2634			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2635		else
2636			return;
2637
2638		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2639				    KCQE_FLAGS_LAYER_MASK_L4;
2640		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2641		l4kcqe->cid = cid;
2642		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2643	} else {
2644		return;
2645	}
2646
2647	cqes[0] = &kcqe;
2648	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2649}
2650
2651static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2652					 struct kwqe *wqes[], u32 num_wqes)
2653{
2654	int i, work, ret;
2655	u32 opcode;
2656	struct kwqe *kwqe;
2657
2658	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2659		return -EAGAIN;		/* bnx2 is down */
2660
2661	for (i = 0; i < num_wqes; ) {
2662		kwqe = wqes[i];
2663		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2664		work = 1;
2665
2666		switch (opcode) {
2667		case ISCSI_KWQE_OPCODE_INIT1:
2668			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2669			break;
2670		case ISCSI_KWQE_OPCODE_INIT2:
2671			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2672			break;
2673		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2674			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2675						     num_wqes - i, &work);
2676			break;
2677		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2678			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2679			break;
2680		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2681			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2682			break;
2683		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2684			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2685						 &work);
2686			break;
2687		case L4_KWQE_OPCODE_VALUE_CLOSE:
2688			ret = cnic_bnx2x_close(dev, kwqe);
2689			break;
2690		case L4_KWQE_OPCODE_VALUE_RESET:
2691			ret = cnic_bnx2x_reset(dev, kwqe);
2692			break;
2693		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2694			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2695			break;
2696		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2697			ret = cnic_bnx2x_update_pg(dev, kwqe);
2698			break;
2699		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2700			ret = 0;
2701			break;
2702		default:
2703			ret = 0;
2704			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2705				   opcode);
2706			break;
2707		}
2708		if (ret < 0) {
2709			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2710				   opcode);
2711
2712			/* Possibly bnx2x parity error, send completion
2713			 * to ulp drivers with error code to speed up
2714			 * cleanup and reset recovery.
2715			 */
2716			if (ret == -EIO || ret == -EAGAIN)
2717				cnic_bnx2x_kwqe_err(dev, kwqe);
2718		}
2719		i += work;
2720	}
2721	return 0;
2722}
2723
2724static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2725					struct kwqe *wqes[], u32 num_wqes)
2726{
2727	struct bnx2x *bp = netdev_priv(dev->netdev);
2728	int i, work, ret;
2729	u32 opcode;
2730	struct kwqe *kwqe;
2731
2732	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2733		return -EAGAIN;		/* bnx2 is down */
2734
2735	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2736		return -EINVAL;
2737
2738	for (i = 0; i < num_wqes; ) {
2739		kwqe = wqes[i];
2740		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2741		work = 1;
2742
2743		switch (opcode) {
2744		case FCOE_KWQE_OPCODE_INIT1:
2745			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2746						    num_wqes - i, &work);
2747			break;
2748		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2749			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2750						    num_wqes - i, &work);
2751			break;
2752		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2753			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2754			break;
2755		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2756			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2757			break;
2758		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2759			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2760			break;
2761		case FCOE_KWQE_OPCODE_DESTROY:
2762			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2763			break;
2764		case FCOE_KWQE_OPCODE_STAT:
2765			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2766			break;
2767		default:
2768			ret = 0;
2769			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2770				   opcode);
2771			break;
2772		}
2773		if (ret < 0) {
2774			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2775				   opcode);
2776
2777			/* Possibly bnx2x parity error, send completion
2778			 * to ulp drivers with error code to speed up
2779			 * cleanup and reset recovery.
2780			 */
2781			if (ret == -EIO || ret == -EAGAIN)
2782				cnic_bnx2x_kwqe_err(dev, kwqe);
2783		}
2784		i += work;
2785	}
2786	return 0;
2787}
2788
2789static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2790				   u32 num_wqes)
2791{
2792	int ret = -EINVAL;
2793	u32 layer_code;
2794
2795	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2796		return -EAGAIN;		/* bnx2x is down */
2797
2798	if (!num_wqes)
2799		return 0;
2800
2801	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2802	switch (layer_code) {
2803	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2804	case KWQE_FLAGS_LAYER_MASK_L4:
2805	case KWQE_FLAGS_LAYER_MASK_L2:
2806		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2807		break;
2808
2809	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2810		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2811		break;
2812	}
2813	return ret;
2814}
2815
2816static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2817{
2818	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2819		return KCQE_FLAGS_LAYER_MASK_L4;
2820
2821	return opflag & KCQE_FLAGS_LAYER_MASK;
2822}
2823
2824static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2825{
2826	struct cnic_local *cp = dev->cnic_priv;
2827	int i, j, comp = 0;
2828
2829	i = 0;
2830	j = 1;
2831	while (num_cqes) {
2832		struct cnic_ulp_ops *ulp_ops;
2833		int ulp_type;
2834		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2835		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2836
2837		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2838			comp++;
2839
2840		while (j < num_cqes) {
2841			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2842
2843			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2844				break;
2845
2846			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2847				comp++;
2848			j++;
2849		}
2850
2851		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2852			ulp_type = CNIC_ULP_RDMA;
2853		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2854			ulp_type = CNIC_ULP_ISCSI;
2855		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2856			ulp_type = CNIC_ULP_FCOE;
2857		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2858			ulp_type = CNIC_ULP_L4;
2859		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2860			goto end;
2861		else {
2862			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2863				   kcqe_op_flag);
2864			goto end;
2865		}
2866
2867		rcu_read_lock();
2868		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2869		if (likely(ulp_ops)) {
2870			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2871						  cp->completed_kcq + i, j);
2872		}
2873		rcu_read_unlock();
2874end:
2875		num_cqes -= j;
2876		i += j;
2877		j = 1;
2878	}
2879	if (unlikely(comp))
2880		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2881}
2882
2883static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2884{
2885	struct cnic_local *cp = dev->cnic_priv;
2886	u16 i, ri, hw_prod, last;
2887	struct kcqe *kcqe;
2888	int kcqe_cnt = 0, last_cnt = 0;
2889
2890	i = ri = last = info->sw_prod_idx;
2891	ri &= MAX_KCQ_IDX;
2892	hw_prod = *info->hw_prod_idx_ptr;
2893	hw_prod = info->hw_idx(hw_prod);
2894
2895	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2896		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2897		cp->completed_kcq[kcqe_cnt++] = kcqe;
2898		i = info->next_idx(i);
2899		ri = i & MAX_KCQ_IDX;
2900		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2901			last_cnt = kcqe_cnt;
2902			last = i;
2903		}
2904	}
2905
2906	info->sw_prod_idx = last;
2907	return last_cnt;
2908}
2909
2910static int cnic_l2_completion(struct cnic_local *cp)
2911{
2912	u16 hw_cons, sw_cons;
2913	struct cnic_uio_dev *udev = cp->udev;
2914	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2915					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2916	u32 cmd;
2917	int comp = 0;
2918
2919	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2920		return 0;
2921
2922	hw_cons = *cp->rx_cons_ptr;
2923	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2924		hw_cons++;
2925
2926	sw_cons = cp->rx_cons;
2927	while (sw_cons != hw_cons) {
2928		u8 cqe_fp_flags;
2929
2930		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2931		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2932		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2933			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2934			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2935			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2936			    cmd == RAMROD_CMD_ID_ETH_HALT)
2937				comp++;
2938		}
2939		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2940	}
2941	return comp;
2942}
2943
2944static void cnic_chk_pkt_rings(struct cnic_local *cp)
2945{
2946	u16 rx_cons, tx_cons;
2947	int comp = 0;
2948
2949	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2950		return;
2951
2952	rx_cons = *cp->rx_cons_ptr;
2953	tx_cons = *cp->tx_cons_ptr;
2954	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2955		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2956			comp = cnic_l2_completion(cp);
2957
2958		cp->tx_cons = tx_cons;
2959		cp->rx_cons = rx_cons;
2960
2961		if (cp->udev)
2962			uio_event_notify(&cp->udev->cnic_uinfo);
2963	}
2964	if (comp)
2965		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2966}
2967
2968static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2969{
2970	struct cnic_local *cp = dev->cnic_priv;
2971	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2972	int kcqe_cnt;
2973
2974	/* status block index must be read before reading other fields */
2975	rmb();
2976	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2977
2978	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2979
2980		service_kcqes(dev, kcqe_cnt);
2981
2982		/* Tell compiler that status_blk fields can change. */
2983		barrier();
2984		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2985		/* status block index must be read first */
2986		rmb();
2987		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2988	}
2989
2990	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2991
2992	cnic_chk_pkt_rings(cp);
2993
2994	return status_idx;
2995}
2996
2997static int cnic_service_bnx2(void *data, void *status_blk)
2998{
2999	struct cnic_dev *dev = data;
3000
3001	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3002		struct status_block *sblk = status_blk;
3003
3004		return sblk->status_idx;
3005	}
3006
3007	return cnic_service_bnx2_queues(dev);
3008}
3009
3010static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3011{
3012	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3013	struct cnic_dev *dev = cp->dev;
3014
3015	cp->last_status_idx = cnic_service_bnx2_queues(dev);
3016
3017	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3018		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3019}
3020
3021static void cnic_doirq(struct cnic_dev *dev)
3022{
3023	struct cnic_local *cp = dev->cnic_priv;
3024
3025	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3026		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3027
3028		prefetch(cp->status_blk.gen);
3029		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3030
3031		tasklet_schedule(&cp->cnic_irq_task);
3032	}
3033}
3034
3035static irqreturn_t cnic_irq(int irq, void *dev_instance)
3036{
3037	struct cnic_dev *dev = dev_instance;
3038	struct cnic_local *cp = dev->cnic_priv;
3039
3040	if (cp->ack_int)
3041		cp->ack_int(dev);
3042
3043	cnic_doirq(dev);
3044
3045	return IRQ_HANDLED;
3046}
3047
3048static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3049				      u16 index, u8 op, u8 update)
3050{
3051	struct bnx2x *bp = netdev_priv(dev->netdev);
3052	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3053		       COMMAND_REG_INT_ACK);
3054	struct igu_ack_register igu_ack;
3055
3056	igu_ack.status_block_index = index;
3057	igu_ack.sb_id_and_flags =
3058			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3059			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3060			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3061			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3062
3063	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3064}
3065
3066static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3067			    u16 index, u8 op, u8 update)
3068{
3069	struct igu_regular cmd_data;
3070	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3071
3072	cmd_data.sb_id_and_flags =
3073		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3074		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3075		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3076		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3077
3078
3079	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3080}
3081
3082static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3083{
3084	struct cnic_local *cp = dev->cnic_priv;
3085
3086	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3087			   IGU_INT_DISABLE, 0);
3088}
3089
3090static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3091{
3092	struct cnic_local *cp = dev->cnic_priv;
3093
3094	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3095			IGU_INT_DISABLE, 0);
3096}
3097
3098static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3099{
3100	struct cnic_local *cp = dev->cnic_priv;
3101
3102	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3103			   IGU_INT_ENABLE, 1);
3104}
3105
3106static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3107{
3108	struct cnic_local *cp = dev->cnic_priv;
3109
3110	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3111			IGU_INT_ENABLE, 1);
3112}
3113
3114static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3115{
3116	u32 last_status = *info->status_idx_ptr;
3117	int kcqe_cnt;
3118
3119	/* status block index must be read before reading the KCQ */
3120	rmb();
3121	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3122
3123		service_kcqes(dev, kcqe_cnt);
3124
3125		/* Tell compiler that sblk fields can change. */
3126		barrier();
3127
3128		last_status = *info->status_idx_ptr;
3129		/* status block index must be read before reading the KCQ */
3130		rmb();
3131	}
3132	return last_status;
3133}
3134
3135static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3136{
3137	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3138	struct cnic_dev *dev = cp->dev;
3139	struct bnx2x *bp = netdev_priv(dev->netdev);
3140	u32 status_idx, new_status_idx;
3141
3142	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3143		return;
3144
3145	while (1) {
3146		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3147
3148		CNIC_WR16(dev, cp->kcq1.io_addr,
3149			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3150
3151		if (!CNIC_SUPPORTS_FCOE(bp)) {
3152			cp->arm_int(dev, status_idx);
 
3153			break;
3154		}
3155
3156		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3157
3158		if (new_status_idx != status_idx)
3159			continue;
3160
3161		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3162			  MAX_KCQ_IDX);
3163
3164		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3165				status_idx, IGU_INT_ENABLE, 1);
3166
3167		break;
3168	}
3169}
3170
3171static int cnic_service_bnx2x(void *data, void *status_blk)
3172{
3173	struct cnic_dev *dev = data;
3174	struct cnic_local *cp = dev->cnic_priv;
3175
3176	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3177		cnic_doirq(dev);
3178
3179	cnic_chk_pkt_rings(cp);
3180
3181	return 0;
3182}
3183
3184static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3185{
3186	struct cnic_ulp_ops *ulp_ops;
3187
3188	if (if_type == CNIC_ULP_ISCSI)
3189		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3190
3191	mutex_lock(&cnic_lock);
3192	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3193					    lockdep_is_held(&cnic_lock));
3194	if (!ulp_ops) {
3195		mutex_unlock(&cnic_lock);
3196		return;
3197	}
3198	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3199	mutex_unlock(&cnic_lock);
3200
3201	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3202		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3203
3204	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3205}
3206
3207static void cnic_ulp_stop(struct cnic_dev *dev)
3208{
3209	struct cnic_local *cp = dev->cnic_priv;
3210	int if_type;
3211
3212	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3213		cnic_ulp_stop_one(cp, if_type);
3214}
3215
3216static void cnic_ulp_start(struct cnic_dev *dev)
3217{
3218	struct cnic_local *cp = dev->cnic_priv;
3219	int if_type;
3220
3221	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3222		struct cnic_ulp_ops *ulp_ops;
3223
3224		mutex_lock(&cnic_lock);
3225		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3226						    lockdep_is_held(&cnic_lock));
3227		if (!ulp_ops || !ulp_ops->cnic_start) {
3228			mutex_unlock(&cnic_lock);
3229			continue;
3230		}
3231		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3232		mutex_unlock(&cnic_lock);
3233
3234		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3235			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3236
3237		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3238	}
3239}
3240
3241static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3242{
3243	struct cnic_local *cp = dev->cnic_priv;
3244	struct cnic_ulp_ops *ulp_ops;
3245	int rc;
3246
3247	mutex_lock(&cnic_lock);
3248	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3249					    lockdep_is_held(&cnic_lock));
3250	if (ulp_ops && ulp_ops->cnic_get_stats)
3251		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3252	else
3253		rc = -ENODEV;
3254	mutex_unlock(&cnic_lock);
3255	return rc;
3256}
3257
3258static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3259{
3260	struct cnic_dev *dev = data;
3261	int ulp_type = CNIC_ULP_ISCSI;
3262
3263	switch (info->cmd) {
3264	case CNIC_CTL_STOP_CMD:
3265		cnic_hold(dev);
3266
3267		cnic_ulp_stop(dev);
3268		cnic_stop_hw(dev);
3269
3270		cnic_put(dev);
3271		break;
3272	case CNIC_CTL_START_CMD:
3273		cnic_hold(dev);
3274
3275		if (!cnic_start_hw(dev))
3276			cnic_ulp_start(dev);
3277
3278		cnic_put(dev);
3279		break;
3280	case CNIC_CTL_STOP_ISCSI_CMD: {
3281		struct cnic_local *cp = dev->cnic_priv;
3282		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3283		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3284		break;
3285	}
3286	case CNIC_CTL_COMPLETION_CMD: {
3287		struct cnic_ctl_completion *comp = &info->data.comp;
3288		u32 cid = BNX2X_SW_CID(comp->cid);
3289		u32 l5_cid;
3290		struct cnic_local *cp = dev->cnic_priv;
3291
3292		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3293			break;
3294
3295		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3296			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3297
3298			if (unlikely(comp->error)) {
3299				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3300				netdev_err(dev->netdev,
3301					   "CID %x CFC delete comp error %x\n",
3302					   cid, comp->error);
3303			}
3304
3305			ctx->wait_cond = 1;
3306			wake_up(&ctx->waitq);
3307		}
3308		break;
3309	}
3310	case CNIC_CTL_FCOE_STATS_GET_CMD:
3311		ulp_type = CNIC_ULP_FCOE;
3312		fallthrough;
3313	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3314		cnic_hold(dev);
3315		cnic_copy_ulp_stats(dev, ulp_type);
3316		cnic_put(dev);
3317		break;
3318
3319	default:
3320		return -EINVAL;
3321	}
3322	return 0;
3323}
3324
3325static void cnic_ulp_init(struct cnic_dev *dev)
3326{
3327	int i;
3328	struct cnic_local *cp = dev->cnic_priv;
3329
3330	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3331		struct cnic_ulp_ops *ulp_ops;
3332
3333		mutex_lock(&cnic_lock);
3334		ulp_ops = cnic_ulp_tbl_prot(i);
3335		if (!ulp_ops || !ulp_ops->cnic_init) {
3336			mutex_unlock(&cnic_lock);
3337			continue;
3338		}
3339		ulp_get(ulp_ops);
3340		mutex_unlock(&cnic_lock);
3341
3342		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3343			ulp_ops->cnic_init(dev);
3344
3345		ulp_put(ulp_ops);
3346	}
3347}
3348
3349static void cnic_ulp_exit(struct cnic_dev *dev)
3350{
3351	int i;
3352	struct cnic_local *cp = dev->cnic_priv;
3353
3354	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3355		struct cnic_ulp_ops *ulp_ops;
3356
3357		mutex_lock(&cnic_lock);
3358		ulp_ops = cnic_ulp_tbl_prot(i);
3359		if (!ulp_ops || !ulp_ops->cnic_exit) {
3360			mutex_unlock(&cnic_lock);
3361			continue;
3362		}
3363		ulp_get(ulp_ops);
3364		mutex_unlock(&cnic_lock);
3365
3366		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3367			ulp_ops->cnic_exit(dev);
3368
3369		ulp_put(ulp_ops);
3370	}
3371}
3372
3373static int cnic_cm_offload_pg(struct cnic_sock *csk)
3374{
3375	struct cnic_dev *dev = csk->dev;
3376	struct l4_kwq_offload_pg *l4kwqe;
3377	struct kwqe *wqes[1];
3378
3379	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3380	memset(l4kwqe, 0, sizeof(*l4kwqe));
3381	wqes[0] = (struct kwqe *) l4kwqe;
3382
3383	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3384	l4kwqe->flags =
3385		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3386	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3387
3388	l4kwqe->da0 = csk->ha[0];
3389	l4kwqe->da1 = csk->ha[1];
3390	l4kwqe->da2 = csk->ha[2];
3391	l4kwqe->da3 = csk->ha[3];
3392	l4kwqe->da4 = csk->ha[4];
3393	l4kwqe->da5 = csk->ha[5];
3394
3395	l4kwqe->sa0 = dev->mac_addr[0];
3396	l4kwqe->sa1 = dev->mac_addr[1];
3397	l4kwqe->sa2 = dev->mac_addr[2];
3398	l4kwqe->sa3 = dev->mac_addr[3];
3399	l4kwqe->sa4 = dev->mac_addr[4];
3400	l4kwqe->sa5 = dev->mac_addr[5];
3401
3402	l4kwqe->etype = ETH_P_IP;
3403	l4kwqe->ipid_start = DEF_IPID_START;
3404	l4kwqe->host_opaque = csk->l5_cid;
3405
3406	if (csk->vlan_id) {
3407		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3408		l4kwqe->vlan_tag = csk->vlan_id;
3409		l4kwqe->l2hdr_nbytes += 4;
3410	}
3411
3412	return dev->submit_kwqes(dev, wqes, 1);
3413}
3414
3415static int cnic_cm_update_pg(struct cnic_sock *csk)
3416{
3417	struct cnic_dev *dev = csk->dev;
3418	struct l4_kwq_update_pg *l4kwqe;
3419	struct kwqe *wqes[1];
3420
3421	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3422	memset(l4kwqe, 0, sizeof(*l4kwqe));
3423	wqes[0] = (struct kwqe *) l4kwqe;
3424
3425	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3426	l4kwqe->flags =
3427		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3428	l4kwqe->pg_cid = csk->pg_cid;
3429
3430	l4kwqe->da0 = csk->ha[0];
3431	l4kwqe->da1 = csk->ha[1];
3432	l4kwqe->da2 = csk->ha[2];
3433	l4kwqe->da3 = csk->ha[3];
3434	l4kwqe->da4 = csk->ha[4];
3435	l4kwqe->da5 = csk->ha[5];
3436
3437	l4kwqe->pg_host_opaque = csk->l5_cid;
3438	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3439
3440	return dev->submit_kwqes(dev, wqes, 1);
3441}
3442
3443static int cnic_cm_upload_pg(struct cnic_sock *csk)
3444{
3445	struct cnic_dev *dev = csk->dev;
3446	struct l4_kwq_upload *l4kwqe;
3447	struct kwqe *wqes[1];
3448
3449	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3450	memset(l4kwqe, 0, sizeof(*l4kwqe));
3451	wqes[0] = (struct kwqe *) l4kwqe;
3452
3453	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3454	l4kwqe->flags =
3455		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3456	l4kwqe->cid = csk->pg_cid;
3457
3458	return dev->submit_kwqes(dev, wqes, 1);
3459}
3460
3461static int cnic_cm_conn_req(struct cnic_sock *csk)
3462{
3463	struct cnic_dev *dev = csk->dev;
3464	struct l4_kwq_connect_req1 *l4kwqe1;
3465	struct l4_kwq_connect_req2 *l4kwqe2;
3466	struct l4_kwq_connect_req3 *l4kwqe3;
3467	struct kwqe *wqes[3];
3468	u8 tcp_flags = 0;
3469	int num_wqes = 2;
3470
3471	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3472	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3473	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3474	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3475	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3476	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3477
3478	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3479	l4kwqe3->flags =
3480		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3481	l4kwqe3->ka_timeout = csk->ka_timeout;
3482	l4kwqe3->ka_interval = csk->ka_interval;
3483	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3484	l4kwqe3->tos = csk->tos;
3485	l4kwqe3->ttl = csk->ttl;
3486	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3487	l4kwqe3->pmtu = csk->mtu;
3488	l4kwqe3->rcv_buf = csk->rcv_buf;
3489	l4kwqe3->snd_buf = csk->snd_buf;
3490	l4kwqe3->seed = csk->seed;
3491
3492	wqes[0] = (struct kwqe *) l4kwqe1;
3493	if (test_bit(SK_F_IPV6, &csk->flags)) {
3494		wqes[1] = (struct kwqe *) l4kwqe2;
3495		wqes[2] = (struct kwqe *) l4kwqe3;
3496		num_wqes = 3;
3497
3498		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3499		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3500		l4kwqe2->flags =
3501			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3502			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3503		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3504		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3505		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3506		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3507		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3508		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3509		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3510			       sizeof(struct tcphdr);
3511	} else {
3512		wqes[1] = (struct kwqe *) l4kwqe3;
3513		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3514			       sizeof(struct tcphdr);
3515	}
3516
3517	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3518	l4kwqe1->flags =
3519		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3520		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3521	l4kwqe1->cid = csk->cid;
3522	l4kwqe1->pg_cid = csk->pg_cid;
3523	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3524	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3525	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3526	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3527	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3528		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3529	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3530		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3531	if (csk->tcp_flags & SK_TCP_NAGLE)
3532		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3533	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3534		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3535	if (csk->tcp_flags & SK_TCP_SACK)
3536		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3537	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3538		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3539
3540	l4kwqe1->tcp_flags = tcp_flags;
3541
3542	return dev->submit_kwqes(dev, wqes, num_wqes);
3543}
3544
3545static int cnic_cm_close_req(struct cnic_sock *csk)
3546{
3547	struct cnic_dev *dev = csk->dev;
3548	struct l4_kwq_close_req *l4kwqe;
3549	struct kwqe *wqes[1];
3550
3551	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3552	memset(l4kwqe, 0, sizeof(*l4kwqe));
3553	wqes[0] = (struct kwqe *) l4kwqe;
3554
3555	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3556	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3557	l4kwqe->cid = csk->cid;
3558
3559	return dev->submit_kwqes(dev, wqes, 1);
3560}
3561
3562static int cnic_cm_abort_req(struct cnic_sock *csk)
3563{
3564	struct cnic_dev *dev = csk->dev;
3565	struct l4_kwq_reset_req *l4kwqe;
3566	struct kwqe *wqes[1];
3567
3568	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3569	memset(l4kwqe, 0, sizeof(*l4kwqe));
3570	wqes[0] = (struct kwqe *) l4kwqe;
3571
3572	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3573	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3574	l4kwqe->cid = csk->cid;
3575
3576	return dev->submit_kwqes(dev, wqes, 1);
3577}
3578
3579static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3580			  u32 l5_cid, struct cnic_sock **csk, void *context)
3581{
3582	struct cnic_local *cp = dev->cnic_priv;
3583	struct cnic_sock *csk1;
3584
3585	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3586		return -EINVAL;
3587
3588	if (cp->ctx_tbl) {
3589		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3590
3591		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3592			return -EAGAIN;
3593	}
3594
3595	csk1 = &cp->csk_tbl[l5_cid];
3596	if (atomic_read(&csk1->ref_count))
3597		return -EAGAIN;
3598
3599	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3600		return -EBUSY;
3601
3602	csk1->dev = dev;
3603	csk1->cid = cid;
3604	csk1->l5_cid = l5_cid;
3605	csk1->ulp_type = ulp_type;
3606	csk1->context = context;
3607
3608	csk1->ka_timeout = DEF_KA_TIMEOUT;
3609	csk1->ka_interval = DEF_KA_INTERVAL;
3610	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3611	csk1->tos = DEF_TOS;
3612	csk1->ttl = DEF_TTL;
3613	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3614	csk1->rcv_buf = DEF_RCV_BUF;
3615	csk1->snd_buf = DEF_SND_BUF;
3616	csk1->seed = DEF_SEED;
3617	csk1->tcp_flags = 0;
3618
3619	*csk = csk1;
3620	return 0;
3621}
3622
3623static void cnic_cm_cleanup(struct cnic_sock *csk)
3624{
3625	if (csk->src_port) {
3626		struct cnic_dev *dev = csk->dev;
3627		struct cnic_local *cp = dev->cnic_priv;
3628
3629		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3630		csk->src_port = 0;
3631	}
3632}
3633
3634static void cnic_close_conn(struct cnic_sock *csk)
3635{
3636	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3637		cnic_cm_upload_pg(csk);
3638		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3639	}
3640	cnic_cm_cleanup(csk);
3641}
3642
3643static int cnic_cm_destroy(struct cnic_sock *csk)
3644{
3645	if (!cnic_in_use(csk))
3646		return -EINVAL;
3647
3648	csk_hold(csk);
3649	clear_bit(SK_F_INUSE, &csk->flags);
3650	smp_mb__after_atomic();
3651	while (atomic_read(&csk->ref_count) != 1)
3652		msleep(1);
3653	cnic_cm_cleanup(csk);
3654
3655	csk->flags = 0;
3656	csk_put(csk);
3657	return 0;
3658}
3659
3660static inline u16 cnic_get_vlan(struct net_device *dev,
3661				struct net_device **vlan_dev)
3662{
3663	if (is_vlan_dev(dev)) {
3664		*vlan_dev = vlan_dev_real_dev(dev);
3665		return vlan_dev_vlan_id(dev);
3666	}
3667	*vlan_dev = dev;
3668	return 0;
3669}
3670
3671static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3672			     struct dst_entry **dst)
3673{
3674#if defined(CONFIG_INET)
3675	struct rtable *rt;
3676
3677	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3678	if (!IS_ERR(rt)) {
3679		*dst = &rt->dst;
3680		return 0;
3681	}
3682	return PTR_ERR(rt);
3683#else
3684	return -ENETUNREACH;
3685#endif
3686}
3687
3688static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3689			     struct dst_entry **dst)
3690{
3691#if IS_ENABLED(CONFIG_IPV6)
3692	struct flowi6 fl6;
3693
3694	memset(&fl6, 0, sizeof(fl6));
3695	fl6.daddr = dst_addr->sin6_addr;
3696	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3697		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3698
3699	*dst = ip6_route_output(&init_net, NULL, &fl6);
3700	if ((*dst)->error) {
3701		dst_release(*dst);
3702		*dst = NULL;
3703		return -ENETUNREACH;
3704	} else
3705		return 0;
3706#endif
3707
3708	return -ENETUNREACH;
3709}
3710
3711static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3712					   int ulp_type)
3713{
3714	struct cnic_dev *dev = NULL;
3715	struct dst_entry *dst;
3716	struct net_device *netdev = NULL;
3717	int err = -ENETUNREACH;
3718
3719	if (dst_addr->sin_family == AF_INET)
3720		err = cnic_get_v4_route(dst_addr, &dst);
3721	else if (dst_addr->sin_family == AF_INET6) {
3722		struct sockaddr_in6 *dst_addr6 =
3723			(struct sockaddr_in6 *) dst_addr;
3724
3725		err = cnic_get_v6_route(dst_addr6, &dst);
3726	} else
3727		return NULL;
3728
3729	if (err)
3730		return NULL;
3731
3732	if (!dst->dev)
3733		goto done;
3734
3735	cnic_get_vlan(dst->dev, &netdev);
3736
3737	dev = cnic_from_netdev(netdev);
3738
3739done:
3740	dst_release(dst);
3741	if (dev)
3742		cnic_put(dev);
3743	return dev;
3744}
3745
3746static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3747{
3748	struct cnic_dev *dev = csk->dev;
3749	struct cnic_local *cp = dev->cnic_priv;
3750
3751	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3752}
3753
3754static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3755{
3756	struct cnic_dev *dev = csk->dev;
3757	struct cnic_local *cp = dev->cnic_priv;
3758	int is_v6, rc = 0;
3759	struct dst_entry *dst = NULL;
3760	struct net_device *realdev;
3761	__be16 local_port;
3762	u32 port_id;
3763
3764	if (saddr->local.v6.sin6_family == AF_INET6 &&
3765	    saddr->remote.v6.sin6_family == AF_INET6)
3766		is_v6 = 1;
3767	else if (saddr->local.v4.sin_family == AF_INET &&
3768		 saddr->remote.v4.sin_family == AF_INET)
3769		is_v6 = 0;
3770	else
3771		return -EINVAL;
3772
3773	clear_bit(SK_F_IPV6, &csk->flags);
3774
3775	if (is_v6) {
3776		set_bit(SK_F_IPV6, &csk->flags);
3777		cnic_get_v6_route(&saddr->remote.v6, &dst);
3778
3779		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3780		       sizeof(struct in6_addr));
3781		csk->dst_port = saddr->remote.v6.sin6_port;
3782		local_port = saddr->local.v6.sin6_port;
3783
3784	} else {
3785		cnic_get_v4_route(&saddr->remote.v4, &dst);
3786
3787		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3788		csk->dst_port = saddr->remote.v4.sin_port;
3789		local_port = saddr->local.v4.sin_port;
3790	}
3791
3792	csk->vlan_id = 0;
3793	csk->mtu = dev->netdev->mtu;
3794	if (dst && dst->dev) {
3795		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3796		if (realdev == dev->netdev) {
3797			csk->vlan_id = vlan;
3798			csk->mtu = dst_mtu(dst);
3799		}
3800	}
3801
3802	port_id = be16_to_cpu(local_port);
3803	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3804	    port_id < CNIC_LOCAL_PORT_MAX) {
3805		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3806			port_id = 0;
3807	} else
3808		port_id = 0;
3809
3810	if (!port_id) {
3811		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3812		if (port_id == -1) {
3813			rc = -ENOMEM;
3814			goto err_out;
3815		}
3816		local_port = cpu_to_be16(port_id);
3817	}
3818	csk->src_port = local_port;
3819
3820err_out:
3821	dst_release(dst);
3822	return rc;
3823}
3824
3825static void cnic_init_csk_state(struct cnic_sock *csk)
3826{
3827	csk->state = 0;
3828	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3829	clear_bit(SK_F_CLOSING, &csk->flags);
3830}
3831
3832static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3833{
3834	struct cnic_local *cp = csk->dev->cnic_priv;
3835	int err = 0;
3836
3837	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3838		return -EOPNOTSUPP;
3839
3840	if (!cnic_in_use(csk))
3841		return -EINVAL;
3842
3843	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3844		return -EINVAL;
3845
3846	cnic_init_csk_state(csk);
3847
3848	err = cnic_get_route(csk, saddr);
3849	if (err)
3850		goto err_out;
3851
3852	err = cnic_resolve_addr(csk, saddr);
3853	if (!err)
3854		return 0;
3855
3856err_out:
3857	clear_bit(SK_F_CONNECT_START, &csk->flags);
3858	return err;
3859}
3860
3861static int cnic_cm_abort(struct cnic_sock *csk)
3862{
3863	struct cnic_local *cp = csk->dev->cnic_priv;
3864	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3865
3866	if (!cnic_in_use(csk))
3867		return -EINVAL;
3868
3869	if (cnic_abort_prep(csk))
3870		return cnic_cm_abort_req(csk);
3871
3872	/* Getting here means that we haven't started connect, or
3873	 * connect was not successful, or it has been reset by the target.
3874	 */
3875
3876	cp->close_conn(csk, opcode);
3877	if (csk->state != opcode) {
3878		/* Wait for remote reset sequence to complete */
3879		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3880			msleep(1);
3881
3882		return -EALREADY;
3883	}
3884
3885	return 0;
3886}
3887
3888static int cnic_cm_close(struct cnic_sock *csk)
3889{
3890	if (!cnic_in_use(csk))
3891		return -EINVAL;
3892
3893	if (cnic_close_prep(csk)) {
3894		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3895		return cnic_cm_close_req(csk);
3896	} else {
3897		/* Wait for remote reset sequence to complete */
3898		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3899			msleep(1);
3900
3901		return -EALREADY;
3902	}
3903	return 0;
3904}
3905
3906static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3907			   u8 opcode)
3908{
3909	struct cnic_ulp_ops *ulp_ops;
3910	int ulp_type = csk->ulp_type;
3911
3912	rcu_read_lock();
3913	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3914	if (ulp_ops) {
3915		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3916			ulp_ops->cm_connect_complete(csk);
3917		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3918			ulp_ops->cm_close_complete(csk);
3919		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3920			ulp_ops->cm_remote_abort(csk);
3921		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3922			ulp_ops->cm_abort_complete(csk);
3923		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3924			ulp_ops->cm_remote_close(csk);
3925	}
3926	rcu_read_unlock();
3927}
3928
3929static int cnic_cm_set_pg(struct cnic_sock *csk)
3930{
3931	if (cnic_offld_prep(csk)) {
3932		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3933			cnic_cm_update_pg(csk);
3934		else
3935			cnic_cm_offload_pg(csk);
3936	}
3937	return 0;
3938}
3939
3940static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3941{
3942	struct cnic_local *cp = dev->cnic_priv;
3943	u32 l5_cid = kcqe->pg_host_opaque;
3944	u8 opcode = kcqe->op_code;
3945	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3946
3947	csk_hold(csk);
3948	if (!cnic_in_use(csk))
3949		goto done;
3950
3951	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3952		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3953		goto done;
3954	}
3955	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3956	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3957		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3958		cnic_cm_upcall(cp, csk,
3959			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3960		goto done;
3961	}
3962
3963	csk->pg_cid = kcqe->pg_cid;
3964	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3965	cnic_cm_conn_req(csk);
3966
3967done:
3968	csk_put(csk);
3969}
3970
3971static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3972{
3973	struct cnic_local *cp = dev->cnic_priv;
3974	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3975	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3976	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3977
3978	ctx->timestamp = jiffies;
3979	ctx->wait_cond = 1;
3980	wake_up(&ctx->waitq);
3981}
3982
3983static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3984{
3985	struct cnic_local *cp = dev->cnic_priv;
3986	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3987	u8 opcode = l4kcqe->op_code;
3988	u32 l5_cid;
3989	struct cnic_sock *csk;
3990
3991	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3992		cnic_process_fcoe_term_conn(dev, kcqe);
3993		return;
3994	}
3995	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3996	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3997		cnic_cm_process_offld_pg(dev, l4kcqe);
3998		return;
3999	}
4000
4001	l5_cid = l4kcqe->conn_id;
4002	if (opcode & 0x80)
4003		l5_cid = l4kcqe->cid;
4004	if (l5_cid >= MAX_CM_SK_TBL_SZ)
4005		return;
4006
4007	csk = &cp->csk_tbl[l5_cid];
4008	csk_hold(csk);
4009
4010	if (!cnic_in_use(csk)) {
4011		csk_put(csk);
4012		return;
4013	}
4014
4015	switch (opcode) {
4016	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4017		if (l4kcqe->status != 0) {
4018			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4019			cnic_cm_upcall(cp, csk,
4020				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4021		}
4022		break;
4023	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4024		if (l4kcqe->status == 0)
4025			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4026		else if (l4kcqe->status ==
4027			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4028			set_bit(SK_F_HW_ERR, &csk->flags);
4029
4030		smp_mb__before_atomic();
4031		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4032		cnic_cm_upcall(cp, csk, opcode);
4033		break;
4034
4035	case L5CM_RAMROD_CMD_ID_CLOSE: {
4036		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4037
4038		if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4039			break;
4040
4041		netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4042			    l4kcqe->status, l5kcqe->completion_status);
4043		opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4044	}
4045		fallthrough;
4046	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4047	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4048	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4049	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4050	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4051		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4052			set_bit(SK_F_HW_ERR, &csk->flags);
4053
4054		cp->close_conn(csk, opcode);
4055		break;
4056
4057	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4058		/* after we already sent CLOSE_REQ */
4059		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4060		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4061		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4062			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4063		else
4064			cnic_cm_upcall(cp, csk, opcode);
4065		break;
4066	}
4067	csk_put(csk);
4068}
4069
4070static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4071{
4072	struct cnic_dev *dev = data;
4073	int i;
4074
4075	for (i = 0; i < num; i++)
4076		cnic_cm_process_kcqe(dev, kcqe[i]);
4077}
4078
4079static struct cnic_ulp_ops cm_ulp_ops = {
4080	.indicate_kcqes		= cnic_cm_indicate_kcqe,
4081};
4082
4083static void cnic_cm_free_mem(struct cnic_dev *dev)
4084{
4085	struct cnic_local *cp = dev->cnic_priv;
4086
4087	kvfree(cp->csk_tbl);
4088	cp->csk_tbl = NULL;
4089	cnic_free_id_tbl(&cp->csk_port_tbl);
4090}
4091
4092static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4093{
4094	struct cnic_local *cp = dev->cnic_priv;
4095	u32 port_id;
4096	int i;
4097
4098	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4099			       GFP_KERNEL);
4100	if (!cp->csk_tbl)
4101		return -ENOMEM;
4102
4103	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4104		atomic_set(&cp->csk_tbl[i].ref_count, 0);
4105
4106	port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4107	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4108			     CNIC_LOCAL_PORT_MIN, port_id)) {
4109		cnic_cm_free_mem(dev);
4110		return -ENOMEM;
4111	}
4112	return 0;
4113}
4114
4115static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4116{
4117	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4118		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4119		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4120		csk->state = opcode;
4121	}
4122
4123	/* 1. If event opcode matches the expected event in csk->state
4124	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4125	 *    event
4126	 * 3. If the expected event is 0, meaning the connection was never
4127	 *    never established, we accept the opcode from cm_abort.
4128	 */
4129	if (opcode == csk->state || csk->state == 0 ||
4130	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4131	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4132		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4133			if (csk->state == 0)
4134				csk->state = opcode;
4135			return 1;
4136		}
4137	}
4138	return 0;
4139}
4140
4141static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4142{
4143	struct cnic_dev *dev = csk->dev;
4144	struct cnic_local *cp = dev->cnic_priv;
4145
4146	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4147		cnic_cm_upcall(cp, csk, opcode);
4148		return;
4149	}
4150
4151	clear_bit(SK_F_CONNECT_START, &csk->flags);
4152	cnic_close_conn(csk);
4153	csk->state = opcode;
4154	cnic_cm_upcall(cp, csk, opcode);
4155}
4156
4157static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4158{
4159}
4160
4161static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4162{
4163	u32 seed;
4164
4165	seed = get_random_u32();
4166	cnic_ctx_wr(dev, 45, 0, seed);
4167	return 0;
4168}
4169
4170static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4171{
4172	struct cnic_dev *dev = csk->dev;
4173	struct cnic_local *cp = dev->cnic_priv;
4174	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4175	union l5cm_specific_data l5_data;
4176	u32 cmd = 0;
4177	int close_complete = 0;
4178
4179	switch (opcode) {
4180	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4181	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4182	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4183		if (cnic_ready_to_close(csk, opcode)) {
4184			if (test_bit(SK_F_HW_ERR, &csk->flags))
4185				close_complete = 1;
4186			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4187				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4188			else
4189				close_complete = 1;
4190		}
4191		break;
4192	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4193		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4194		break;
4195	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4196		close_complete = 1;
4197		break;
4198	}
4199	if (cmd) {
4200		memset(&l5_data, 0, sizeof(l5_data));
4201
4202		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4203				    &l5_data);
4204	} else if (close_complete) {
4205		ctx->timestamp = jiffies;
4206		cnic_close_conn(csk);
4207		cnic_cm_upcall(cp, csk, csk->state);
4208	}
4209}
4210
4211static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4212{
4213	struct cnic_local *cp = dev->cnic_priv;
4214
4215	if (!cp->ctx_tbl)
4216		return;
4217
4218	if (!netif_running(dev->netdev))
4219		return;
4220
4221	cnic_bnx2x_delete_wait(dev, 0);
4222
4223	cancel_delayed_work(&cp->delete_task);
4224	flush_workqueue(cnic_wq);
4225
4226	if (atomic_read(&cp->iscsi_conn) != 0)
4227		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4228			    atomic_read(&cp->iscsi_conn));
4229}
4230
4231static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4232{
4233	struct bnx2x *bp = netdev_priv(dev->netdev);
4234	u32 pfid = bp->pfid;
4235	u32 port = BP_PORT(bp);
4236
4237	cnic_init_bnx2x_mac(dev);
4238	cnic_bnx2x_set_tcp_options(dev, 0, 1);
4239
4240	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4241		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4242
4243	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4244		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4245	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4246		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4247		DEF_MAX_DA_COUNT);
4248
4249	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4250		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4251	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4252		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4253	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4254		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4255	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4256		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4257
4258	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4259		DEF_MAX_CWND);
4260	return 0;
4261}
4262
4263static void cnic_delete_task(struct work_struct *work)
4264{
4265	struct cnic_local *cp;
4266	struct cnic_dev *dev;
4267	u32 i;
4268	int need_resched = 0;
4269
4270	cp = container_of(work, struct cnic_local, delete_task.work);
4271	dev = cp->dev;
4272
4273	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4274		struct drv_ctl_info info;
4275
4276		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4277
4278		memset(&info, 0, sizeof(struct drv_ctl_info));
4279		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4280		cp->ethdev->drv_ctl(dev->netdev, &info);
4281	}
4282
4283	for (i = 0; i < cp->max_cid_space; i++) {
4284		struct cnic_context *ctx = &cp->ctx_tbl[i];
4285		int err;
4286
4287		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4288		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4289			continue;
4290
4291		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4292			need_resched = 1;
4293			continue;
4294		}
4295
4296		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4297			continue;
4298
4299		err = cnic_bnx2x_destroy_ramrod(dev, i);
4300
4301		cnic_free_bnx2x_conn_resc(dev, i);
4302		if (!err) {
4303			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4304				atomic_dec(&cp->iscsi_conn);
4305
4306			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4307		}
4308	}
4309
4310	if (need_resched)
4311		queue_delayed_work(cnic_wq, &cp->delete_task,
4312				   msecs_to_jiffies(10));
4313
4314}
4315
4316static int cnic_cm_open(struct cnic_dev *dev)
4317{
4318	struct cnic_local *cp = dev->cnic_priv;
4319	int err;
4320
4321	err = cnic_cm_alloc_mem(dev);
4322	if (err)
4323		return err;
4324
4325	err = cp->start_cm(dev);
4326
4327	if (err)
4328		goto err_out;
4329
4330	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4331
4332	dev->cm_create = cnic_cm_create;
4333	dev->cm_destroy = cnic_cm_destroy;
4334	dev->cm_connect = cnic_cm_connect;
4335	dev->cm_abort = cnic_cm_abort;
4336	dev->cm_close = cnic_cm_close;
4337	dev->cm_select_dev = cnic_cm_select_dev;
4338
4339	cp->ulp_handle[CNIC_ULP_L4] = dev;
4340	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4341	return 0;
4342
4343err_out:
4344	cnic_cm_free_mem(dev);
4345	return err;
4346}
4347
4348static int cnic_cm_shutdown(struct cnic_dev *dev)
4349{
4350	struct cnic_local *cp = dev->cnic_priv;
4351	int i;
4352
 
 
4353	if (!cp->csk_tbl)
4354		return 0;
4355
4356	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4357		struct cnic_sock *csk = &cp->csk_tbl[i];
4358
4359		clear_bit(SK_F_INUSE, &csk->flags);
4360		cnic_cm_cleanup(csk);
4361	}
4362	cnic_cm_free_mem(dev);
4363
4364	return 0;
4365}
4366
4367static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4368{
4369	u32 cid_addr;
4370	int i;
4371
4372	cid_addr = GET_CID_ADDR(cid);
4373
4374	for (i = 0; i < CTX_SIZE; i += 4)
4375		cnic_ctx_wr(dev, cid_addr, i, 0);
4376}
4377
4378static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4379{
4380	struct cnic_local *cp = dev->cnic_priv;
4381	int ret = 0, i;
4382	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4383
4384	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4385		return 0;
4386
4387	for (i = 0; i < cp->ctx_blks; i++) {
4388		int j;
4389		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4390		u32 val;
4391
4392		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4393
4394		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4395			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4396		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4397			(u64) cp->ctx_arr[i].mapping >> 32);
4398		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4399			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4400		for (j = 0; j < 10; j++) {
4401
4402			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4403			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4404				break;
4405			udelay(5);
4406		}
4407		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4408			ret = -EBUSY;
4409			break;
4410		}
4411	}
4412	return ret;
4413}
4414
4415static void cnic_free_irq(struct cnic_dev *dev)
4416{
4417	struct cnic_local *cp = dev->cnic_priv;
4418	struct cnic_eth_dev *ethdev = cp->ethdev;
4419
4420	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4421		cp->disable_int_sync(dev);
4422		tasklet_kill(&cp->cnic_irq_task);
4423		free_irq(ethdev->irq_arr[0].vector, dev);
4424	}
4425}
4426
4427static int cnic_request_irq(struct cnic_dev *dev)
4428{
4429	struct cnic_local *cp = dev->cnic_priv;
4430	struct cnic_eth_dev *ethdev = cp->ethdev;
4431	int err;
4432
4433	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4434	if (err)
4435		tasklet_disable(&cp->cnic_irq_task);
4436
4437	return err;
4438}
4439
4440static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4441{
4442	struct cnic_local *cp = dev->cnic_priv;
4443	struct cnic_eth_dev *ethdev = cp->ethdev;
4444
4445	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4446		int err, i = 0;
4447		int sblk_num = cp->status_blk_num;
4448		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4449			   BNX2_HC_SB_CONFIG_1;
4450
4451		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4452
4453		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4454		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4455		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4456
4457		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4458		tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
 
4459		err = cnic_request_irq(dev);
4460		if (err)
4461			return err;
4462
4463		while (cp->status_blk.bnx2->status_completion_producer_index &&
4464		       i < 10) {
4465			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4466				1 << (11 + sblk_num));
4467			udelay(10);
4468			i++;
4469			barrier();
4470		}
4471		if (cp->status_blk.bnx2->status_completion_producer_index) {
4472			cnic_free_irq(dev);
4473			goto failed;
4474		}
4475
4476	} else {
4477		struct status_block *sblk = cp->status_blk.gen;
4478		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4479		int i = 0;
4480
4481		while (sblk->status_completion_producer_index && i < 10) {
4482			CNIC_WR(dev, BNX2_HC_COMMAND,
4483				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4484			udelay(10);
4485			i++;
4486			barrier();
4487		}
4488		if (sblk->status_completion_producer_index)
4489			goto failed;
4490
4491	}
4492	return 0;
4493
4494failed:
4495	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4496	return -EBUSY;
4497}
4498
4499static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4500{
4501	struct cnic_local *cp = dev->cnic_priv;
4502	struct cnic_eth_dev *ethdev = cp->ethdev;
4503
4504	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4505		return;
4506
4507	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4508		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4509}
4510
4511static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4512{
4513	struct cnic_local *cp = dev->cnic_priv;
4514	struct cnic_eth_dev *ethdev = cp->ethdev;
4515
4516	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4517		return;
4518
4519	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4520		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4521	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4522	synchronize_irq(ethdev->irq_arr[0].vector);
4523}
4524
4525static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4526{
4527	struct cnic_local *cp = dev->cnic_priv;
4528	struct cnic_eth_dev *ethdev = cp->ethdev;
4529	struct cnic_uio_dev *udev = cp->udev;
4530	u32 cid_addr, tx_cid, sb_id;
4531	u32 val, offset0, offset1, offset2, offset3;
4532	int i;
4533	struct bnx2_tx_bd *txbd;
4534	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4535	struct status_block *s_blk = cp->status_blk.gen;
4536
4537	sb_id = cp->status_blk_num;
4538	tx_cid = 20;
4539	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4540	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4541		struct status_block_msix *sblk = cp->status_blk.bnx2;
4542
4543		tx_cid = TX_TSS_CID + sb_id - 1;
4544		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4545			(TX_TSS_CID << 7));
4546		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4547	}
4548	cp->tx_cons = *cp->tx_cons_ptr;
4549
4550	cid_addr = GET_CID_ADDR(tx_cid);
4551	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4552		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4553
4554		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4555			cnic_ctx_wr(dev, cid_addr2, i, 0);
4556
4557		offset0 = BNX2_L2CTX_TYPE_XI;
4558		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4559		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4560		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4561	} else {
4562		cnic_init_context(dev, tx_cid);
4563		cnic_init_context(dev, tx_cid + 1);
4564
4565		offset0 = BNX2_L2CTX_TYPE;
4566		offset1 = BNX2_L2CTX_CMD_TYPE;
4567		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4568		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4569	}
4570	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4571	cnic_ctx_wr(dev, cid_addr, offset0, val);
4572
4573	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4574	cnic_ctx_wr(dev, cid_addr, offset1, val);
4575
4576	txbd = udev->l2_ring;
4577
4578	buf_map = udev->l2_buf_map;
4579	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4580		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4581		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4582	}
4583	val = (u64) ring_map >> 32;
4584	cnic_ctx_wr(dev, cid_addr, offset2, val);
4585	txbd->tx_bd_haddr_hi = val;
4586
4587	val = (u64) ring_map & 0xffffffff;
4588	cnic_ctx_wr(dev, cid_addr, offset3, val);
4589	txbd->tx_bd_haddr_lo = val;
4590}
4591
4592static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4593{
4594	struct cnic_local *cp = dev->cnic_priv;
4595	struct cnic_eth_dev *ethdev = cp->ethdev;
4596	struct cnic_uio_dev *udev = cp->udev;
4597	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4598	int i;
4599	struct bnx2_rx_bd *rxbd;
4600	struct status_block *s_blk = cp->status_blk.gen;
4601	dma_addr_t ring_map = udev->l2_ring_map;
4602
4603	sb_id = cp->status_blk_num;
4604	cnic_init_context(dev, 2);
4605	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4606	coal_reg = BNX2_HC_COMMAND;
4607	coal_val = CNIC_RD(dev, coal_reg);
4608	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4609		struct status_block_msix *sblk = cp->status_blk.bnx2;
4610
4611		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4612		coal_reg = BNX2_HC_COALESCE_NOW;
4613		coal_val = 1 << (11 + sb_id);
4614	}
4615	i = 0;
4616	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4617		CNIC_WR(dev, coal_reg, coal_val);
4618		udelay(10);
4619		i++;
4620		barrier();
4621	}
4622	cp->rx_cons = *cp->rx_cons_ptr;
4623
4624	cid_addr = GET_CID_ADDR(2);
4625	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4626	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4627	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4628
4629	if (sb_id == 0)
4630		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4631	else
4632		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4633	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4634
4635	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4636	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4637		dma_addr_t buf_map;
4638		int n = (i % cp->l2_rx_ring_size) + 1;
4639
4640		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4641		rxbd->rx_bd_len = cp->l2_single_buf_size;
4642		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4643		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4644		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4645	}
4646	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4647	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4648	rxbd->rx_bd_haddr_hi = val;
4649
4650	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4651	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4652	rxbd->rx_bd_haddr_lo = val;
4653
4654	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4655	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4656}
4657
4658static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4659{
4660	struct kwqe *wqes[1], l2kwqe;
4661
4662	memset(&l2kwqe, 0, sizeof(l2kwqe));
4663	wqes[0] = &l2kwqe;
4664	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4665			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4666			       KWQE_OPCODE_SHIFT) | 2;
4667	dev->submit_kwqes(dev, wqes, 1);
4668}
4669
4670static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4671{
4672	struct cnic_local *cp = dev->cnic_priv;
4673	u32 val;
4674
4675	val = cp->func << 2;
4676
4677	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4678
4679	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4680			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4681	dev->mac_addr[0] = (u8) (val >> 8);
4682	dev->mac_addr[1] = (u8) val;
4683
4684	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4685
4686	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4687			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4688	dev->mac_addr[2] = (u8) (val >> 24);
4689	dev->mac_addr[3] = (u8) (val >> 16);
4690	dev->mac_addr[4] = (u8) (val >> 8);
4691	dev->mac_addr[5] = (u8) val;
4692
4693	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4694
4695	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4696	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4697		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4698
4699	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4700	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4701	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4702}
4703
4704static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4705{
4706	struct cnic_local *cp = dev->cnic_priv;
4707	struct cnic_eth_dev *ethdev = cp->ethdev;
4708	struct status_block *sblk = cp->status_blk.gen;
4709	u32 val, kcq_cid_addr, kwq_cid_addr;
4710	int err;
4711
4712	cnic_set_bnx2_mac(dev);
4713
4714	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4715	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4716	if (CNIC_PAGE_BITS > 12)
4717		val |= (12 - 8)  << 4;
4718	else
4719		val |= (CNIC_PAGE_BITS - 8)  << 4;
4720
4721	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4722
4723	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4724	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4725	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4726
4727	err = cnic_setup_5709_context(dev, 1);
4728	if (err)
4729		return err;
4730
4731	cnic_init_context(dev, KWQ_CID);
4732	cnic_init_context(dev, KCQ_CID);
4733
4734	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4735	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4736
4737	cp->max_kwq_idx = MAX_KWQ_IDX;
4738	cp->kwq_prod_idx = 0;
4739	cp->kwq_con_idx = 0;
4740	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4741
4742	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4743		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4744	else
4745		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4746
4747	/* Initialize the kernel work queue context. */
4748	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4749	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4750	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4751
4752	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4753	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4754
4755	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4756	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4757
4758	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4759	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4760
4761	val = (u32) cp->kwq_info.pgtbl_map;
4762	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4763
4764	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4765	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4766
4767	cp->kcq1.sw_prod_idx = 0;
4768	cp->kcq1.hw_prod_idx_ptr =
4769		&sblk->status_completion_producer_index;
4770
4771	cp->kcq1.status_idx_ptr = &sblk->status_idx;
4772
4773	/* Initialize the kernel complete queue context. */
4774	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4775	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4776	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4777
4778	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4779	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4780
4781	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4782	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4783
4784	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4785	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4786
4787	val = (u32) cp->kcq1.dma.pgtbl_map;
4788	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4789
4790	cp->int_num = 0;
4791	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4792		struct status_block_msix *msblk = cp->status_blk.bnx2;
4793		u32 sb_id = cp->status_blk_num;
4794		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4795
4796		cp->kcq1.hw_prod_idx_ptr =
4797			&msblk->status_completion_producer_index;
4798		cp->kcq1.status_idx_ptr = &msblk->status_idx;
4799		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4800		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4801		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4802		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4803	}
4804
4805	/* Enable Commnad Scheduler notification when we write to the
4806	 * host producer index of the kernel contexts. */
4807	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4808
4809	/* Enable Command Scheduler notification when we write to either
4810	 * the Send Queue or Receive Queue producer indexes of the kernel
4811	 * bypass contexts. */
4812	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4813	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4814
4815	/* Notify COM when the driver post an application buffer. */
4816	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4817
4818	/* Set the CP and COM doorbells.  These two processors polls the
4819	 * doorbell for a non zero value before running.  This must be done
4820	 * after setting up the kernel queue contexts. */
4821	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4822	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4823
4824	cnic_init_bnx2_tx_ring(dev);
4825	cnic_init_bnx2_rx_ring(dev);
4826
4827	err = cnic_init_bnx2_irq(dev);
4828	if (err) {
4829		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4830		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4831		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4832		return err;
4833	}
4834
4835	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4836
4837	return 0;
4838}
4839
4840static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4841{
4842	struct cnic_local *cp = dev->cnic_priv;
4843	struct cnic_eth_dev *ethdev = cp->ethdev;
4844	u32 start_offset = ethdev->ctx_tbl_offset;
4845	int i;
4846
4847	for (i = 0; i < cp->ctx_blks; i++) {
4848		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4849		dma_addr_t map = ctx->mapping;
4850
4851		if (cp->ctx_align) {
4852			unsigned long mask = cp->ctx_align - 1;
4853
4854			map = (map + mask) & ~mask;
4855		}
4856
4857		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4858	}
4859}
4860
4861static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4862{
4863	struct cnic_local *cp = dev->cnic_priv;
4864	struct cnic_eth_dev *ethdev = cp->ethdev;
4865	int err = 0;
4866
4867	tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
 
4868	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4869		err = cnic_request_irq(dev);
4870
4871	return err;
4872}
4873
4874static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4875						u16 sb_id, u8 sb_index,
4876						u8 disable)
4877{
4878	struct bnx2x *bp = netdev_priv(dev->netdev);
4879
4880	u32 addr = BAR_CSTRORM_INTMEM +
4881			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4882			offsetof(struct hc_status_block_data_e1x, index_data) +
4883			sizeof(struct hc_index_data)*sb_index +
4884			offsetof(struct hc_index_data, flags);
4885	u16 flags = CNIC_RD16(dev, addr);
4886	/* clear and set */
4887	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4888	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4889		  HC_INDEX_DATA_HC_ENABLED);
4890	CNIC_WR16(dev, addr, flags);
4891}
4892
4893static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4894{
4895	struct cnic_local *cp = dev->cnic_priv;
4896	struct bnx2x *bp = netdev_priv(dev->netdev);
4897	u8 sb_id = cp->status_blk_num;
4898
4899	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4900			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4901			offsetof(struct hc_status_block_data_e1x, index_data) +
4902			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4903			offsetof(struct hc_index_data, timeout), 64 / 4);
4904	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4905}
4906
4907static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4908{
4909}
4910
4911static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4912				    struct client_init_ramrod_data *data)
4913{
4914	struct cnic_local *cp = dev->cnic_priv;
4915	struct bnx2x *bp = netdev_priv(dev->netdev);
4916	struct cnic_uio_dev *udev = cp->udev;
4917	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4918	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4919	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4920	int i;
4921	u32 cli = cp->ethdev->iscsi_l2_client_id;
4922	u32 val;
4923
4924	memset(txbd, 0, CNIC_PAGE_SIZE);
4925
4926	buf_map = udev->l2_buf_map;
4927	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4928		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4929		struct eth_tx_parse_bd_e1x *pbd_e1x =
4930			&((txbd + 1)->parse_bd_e1x);
4931		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4932		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4933
4934		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4935		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4936		reg_bd->addr_hi = start_bd->addr_hi;
4937		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4938		start_bd->nbytes = cpu_to_le16(0x10);
4939		start_bd->nbd = cpu_to_le16(3);
4940		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4941		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
 
4942		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4943
4944		if (BNX2X_CHIP_IS_E2_PLUS(bp))
4945			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4946				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4947		else
4948			pbd_e1x->global_data = (UNICAST_ADDRESS <<
4949				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4950	}
4951
4952	val = (u64) ring_map >> 32;
4953	txbd->next_bd.addr_hi = cpu_to_le32(val);
4954
4955	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4956
4957	val = (u64) ring_map & 0xffffffff;
4958	txbd->next_bd.addr_lo = cpu_to_le32(val);
4959
4960	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4961
4962	/* Other ramrod params */
4963	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4964	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4965
4966	/* reset xstorm per client statistics */
4967	if (cli < MAX_STAT_COUNTER_ID) {
4968		data->general.statistics_zero_flg = 1;
4969		data->general.statistics_en_flg = 1;
4970		data->general.statistics_counter_id = cli;
4971	}
4972
4973	cp->tx_cons_ptr =
4974		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4975}
4976
4977static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4978				    struct client_init_ramrod_data *data)
4979{
4980	struct cnic_local *cp = dev->cnic_priv;
4981	struct bnx2x *bp = netdev_priv(dev->netdev);
4982	struct cnic_uio_dev *udev = cp->udev;
4983	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4984				CNIC_PAGE_SIZE);
4985	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4986				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4987	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4988	int i;
4989	u32 cli = cp->ethdev->iscsi_l2_client_id;
4990	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4991	u32 val;
4992	dma_addr_t ring_map = udev->l2_ring_map;
4993
4994	/* General data */
4995	data->general.client_id = cli;
4996	data->general.activate_flg = 1;
4997	data->general.sp_client_id = cli;
4998	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4999	data->general.func_id = bp->pfid;
5000
5001	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5002		dma_addr_t buf_map;
5003		int n = (i % cp->l2_rx_ring_size) + 1;
5004
5005		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5006		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5007		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5008	}
5009
5010	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5011	rxbd->addr_hi = cpu_to_le32(val);
5012	data->rx.bd_page_base.hi = cpu_to_le32(val);
5013
5014	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5015	rxbd->addr_lo = cpu_to_le32(val);
5016	data->rx.bd_page_base.lo = cpu_to_le32(val);
5017
5018	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5019	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5020	rxcqe->addr_hi = cpu_to_le32(val);
5021	data->rx.cqe_page_base.hi = cpu_to_le32(val);
5022
5023	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5024	rxcqe->addr_lo = cpu_to_le32(val);
5025	data->rx.cqe_page_base.lo = cpu_to_le32(val);
5026
5027	/* Other ramrod params */
5028	data->rx.client_qzone_id = cl_qzone_id;
5029	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5030	data->rx.status_block_id = BNX2X_DEF_SB_ID;
5031
5032	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5033
5034	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5035	data->rx.outer_vlan_removal_enable_flg = 1;
5036	data->rx.silent_vlan_removal_flg = 1;
5037	data->rx.silent_vlan_value = 0;
5038	data->rx.silent_vlan_mask = 0xffff;
5039
5040	cp->rx_cons_ptr =
5041		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5042	cp->rx_cons = *cp->rx_cons_ptr;
5043}
5044
5045static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5046{
5047	struct cnic_local *cp = dev->cnic_priv;
5048	struct bnx2x *bp = netdev_priv(dev->netdev);
5049	u32 pfid = bp->pfid;
5050
5051	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5052			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5053	cp->kcq1.sw_prod_idx = 0;
5054
5055	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5056		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5057
5058		cp->kcq1.hw_prod_idx_ptr =
5059			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5060		cp->kcq1.status_idx_ptr =
5061			&sb->sb.running_index[SM_RX_ID];
5062	} else {
5063		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5064
5065		cp->kcq1.hw_prod_idx_ptr =
5066			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5067		cp->kcq1.status_idx_ptr =
5068			&sb->sb.running_index[SM_RX_ID];
5069	}
5070
5071	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5072		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5073
5074		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5075					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5076		cp->kcq2.sw_prod_idx = 0;
5077		cp->kcq2.hw_prod_idx_ptr =
5078			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5079		cp->kcq2.status_idx_ptr =
5080			&sb->sb.running_index[SM_RX_ID];
5081	}
5082}
5083
5084static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5085{
5086	struct cnic_local *cp = dev->cnic_priv;
5087	struct bnx2x *bp = netdev_priv(dev->netdev);
5088	struct cnic_eth_dev *ethdev = cp->ethdev;
5089	int ret;
5090	u32 pfid;
5091
5092	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5093	cp->func = bp->pf_num;
5094
5095	pfid = bp->pfid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5096
5097	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5098			       cp->iscsi_start_cid, 0);
5099
5100	if (ret)
5101		return -ENOMEM;
5102
5103	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5104		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5105					cp->fcoe_start_cid, 0);
5106
5107		if (ret)
5108			return -ENOMEM;
5109	}
5110
5111	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5112
5113	cnic_init_bnx2x_kcq(dev);
5114
5115	/* Only 1 EQ */
5116	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5117	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5118		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5119	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5121		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5122	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5124		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5125	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5127		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5128	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5129		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5130		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5131	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5132		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5133	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5134		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5135	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5136		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5137		HC_INDEX_ISCSI_EQ_CONS);
5138
5139	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5141		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5142	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5143		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5144		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5145
5146	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5147		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5148
5149	cnic_setup_bnx2x_context(dev);
5150
5151	ret = cnic_init_bnx2x_irq(dev);
5152	if (ret)
5153		return ret;
5154
5155	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5156	return 0;
5157}
5158
5159static void cnic_init_rings(struct cnic_dev *dev)
5160{
5161	struct cnic_local *cp = dev->cnic_priv;
5162	struct bnx2x *bp = netdev_priv(dev->netdev);
5163	struct cnic_uio_dev *udev = cp->udev;
5164
5165	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5166		return;
5167
5168	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5169		cnic_init_bnx2_tx_ring(dev);
5170		cnic_init_bnx2_rx_ring(dev);
5171		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5172	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5173		u32 cli = cp->ethdev->iscsi_l2_client_id;
5174		u32 cid = cp->ethdev->iscsi_l2_cid;
5175		u32 cl_qzone_id;
5176		struct client_init_ramrod_data *data;
5177		union l5cm_specific_data l5_data;
5178		struct ustorm_eth_rx_producers rx_prods = {0};
5179		u32 off, i, *cid_ptr;
5180
5181		rx_prods.bd_prod = 0;
5182		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5183		barrier();
5184
5185		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5186
5187		off = BAR_USTRORM_INTMEM +
5188			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
5189			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5190			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5191
5192		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5193			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5194
5195		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5196
5197		data = udev->l2_buf;
5198		cid_ptr = udev->l2_buf + 12;
5199
5200		memset(data, 0, sizeof(*data));
5201
5202		cnic_init_bnx2x_tx_ring(dev, data);
5203		cnic_init_bnx2x_rx_ring(dev, data);
5204
5205		data->general.fp_hsi_ver =  ETH_FP_HSI_VERSION;
5206
5207		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5208		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5209
5210		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5211
5212		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5213			cid, ETH_CONNECTION_TYPE, &l5_data);
5214
5215		i = 0;
5216		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5217		       ++i < 10)
5218			msleep(1);
5219
5220		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5221			netdev_err(dev->netdev,
5222				"iSCSI CLIENT_SETUP did not complete\n");
5223		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5224		cnic_ring_ctl(dev, cid, cli, 1);
5225		*cid_ptr = cid >> 4;
5226		*(cid_ptr + 1) = cid * bp->db_size;
5227		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5228	}
5229}
5230
5231static void cnic_shutdown_rings(struct cnic_dev *dev)
5232{
5233	struct cnic_local *cp = dev->cnic_priv;
5234	struct cnic_uio_dev *udev = cp->udev;
5235	void *rx_ring;
5236
5237	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5238		return;
5239
5240	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5241		cnic_shutdown_bnx2_rx_ring(dev);
5242	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5243		u32 cli = cp->ethdev->iscsi_l2_client_id;
5244		u32 cid = cp->ethdev->iscsi_l2_cid;
5245		union l5cm_specific_data l5_data;
5246		int i;
5247
5248		cnic_ring_ctl(dev, cid, cli, 0);
5249
5250		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5251
5252		l5_data.phy_address.lo = cli;
5253		l5_data.phy_address.hi = 0;
5254		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5255			cid, ETH_CONNECTION_TYPE, &l5_data);
5256		i = 0;
5257		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5258		       ++i < 10)
5259			msleep(1);
5260
5261		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5262			netdev_err(dev->netdev,
5263				"iSCSI CLIENT_HALT did not complete\n");
5264		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5265
5266		memset(&l5_data, 0, sizeof(l5_data));
5267		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5268			cid, NONE_CONNECTION_TYPE, &l5_data);
5269		msleep(10);
5270	}
5271	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5272	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5273	memset(rx_ring, 0, CNIC_PAGE_SIZE);
5274}
5275
5276static int cnic_register_netdev(struct cnic_dev *dev)
5277{
5278	struct cnic_local *cp = dev->cnic_priv;
5279	struct cnic_eth_dev *ethdev = cp->ethdev;
5280	int err;
5281
5282	if (!ethdev)
5283		return -ENODEV;
5284
5285	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5286		return 0;
5287
5288	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5289	if (err)
5290		netdev_err(dev->netdev, "register_cnic failed\n");
5291
5292	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
5293	 * can change after firmware is downloaded.
5294	 */
5295	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5296	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5297		dev->max_iscsi_conn = 0;
5298
5299	return err;
5300}
5301
5302static void cnic_unregister_netdev(struct cnic_dev *dev)
5303{
5304	struct cnic_local *cp = dev->cnic_priv;
5305	struct cnic_eth_dev *ethdev = cp->ethdev;
5306
5307	if (!ethdev)
5308		return;
5309
5310	ethdev->drv_unregister_cnic(dev->netdev);
5311}
5312
5313static int cnic_start_hw(struct cnic_dev *dev)
5314{
5315	struct cnic_local *cp = dev->cnic_priv;
5316	struct cnic_eth_dev *ethdev = cp->ethdev;
5317	int err;
5318
5319	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5320		return -EALREADY;
5321
5322	dev->regview = ethdev->io_base;
5323	pci_dev_get(dev->pcidev);
5324	cp->func = PCI_FUNC(dev->pcidev->devfn);
5325	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5326	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5327
5328	err = cp->alloc_resc(dev);
5329	if (err) {
5330		netdev_err(dev->netdev, "allocate resource failure\n");
5331		goto err1;
5332	}
5333
5334	err = cp->start_hw(dev);
5335	if (err)
5336		goto err1;
5337
5338	err = cnic_cm_open(dev);
5339	if (err)
5340		goto err1;
5341
5342	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5343
5344	cp->enable_int(dev);
5345
5346	return 0;
5347
5348err1:
5349	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5350		cp->stop_hw(dev);
5351	else
5352		cp->free_resc(dev);
5353	pci_dev_put(dev->pcidev);
5354	return err;
5355}
5356
5357static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5358{
5359	cnic_disable_bnx2_int_sync(dev);
5360
5361	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5362	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5363
5364	cnic_init_context(dev, KWQ_CID);
5365	cnic_init_context(dev, KCQ_CID);
5366
5367	cnic_setup_5709_context(dev, 0);
5368	cnic_free_irq(dev);
5369
5370	cnic_free_resc(dev);
5371}
5372
5373
5374static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5375{
5376	struct cnic_local *cp = dev->cnic_priv;
5377	struct bnx2x *bp = netdev_priv(dev->netdev);
5378	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5379	u32 sb_id = cp->status_blk_num;
5380	u32 idx_off, syn_off;
5381
5382	cnic_free_irq(dev);
5383
5384	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5385		idx_off = offsetof(struct hc_status_block_e2, index_values) +
5386			  (hc_index * sizeof(u16));
5387
5388		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5389	} else {
5390		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5391			  (hc_index * sizeof(u16));
5392
5393		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5394	}
5395	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5396	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5397		  idx_off, 0);
5398
5399	*cp->kcq1.hw_prod_idx_ptr = 0;
5400	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5401		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5402	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5403	cnic_free_resc(dev);
5404}
5405
5406static void cnic_stop_hw(struct cnic_dev *dev)
5407{
5408	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5409		struct cnic_local *cp = dev->cnic_priv;
5410		int i = 0;
5411
5412		/* Need to wait for the ring shutdown event to complete
5413		 * before clearing the CNIC_UP flag.
5414		 */
5415		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5416			msleep(100);
5417			i++;
5418		}
5419		cnic_shutdown_rings(dev);
5420		cp->stop_cm(dev);
5421		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5422		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5423		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5424		synchronize_rcu();
5425		cnic_cm_shutdown(dev);
5426		cp->stop_hw(dev);
5427		pci_dev_put(dev->pcidev);
5428	}
5429}
5430
5431static void cnic_free_dev(struct cnic_dev *dev)
5432{
5433	int i = 0;
5434
5435	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5436		msleep(100);
5437		i++;
5438	}
5439	if (atomic_read(&dev->ref_count) != 0)
5440		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5441
5442	netdev_info(dev->netdev, "Removed CNIC device\n");
5443	dev_put(dev->netdev);
5444	kfree(dev);
5445}
5446
5447static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5448				struct cnic_fc_npiv_tbl *npiv_tbl)
5449{
5450	struct cnic_local *cp = dev->cnic_priv;
5451	struct bnx2x *bp = netdev_priv(dev->netdev);
5452	int ret;
5453
5454	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5455		return -EAGAIN;     /* bnx2x is down */
5456
5457	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5458		return -EINVAL;
5459
5460	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5461	return ret;
5462}
5463
5464static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5465				       struct pci_dev *pdev)
5466{
5467	struct cnic_dev *cdev;
5468	struct cnic_local *cp;
5469	int alloc_size;
5470
5471	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5472
5473	cdev = kzalloc(alloc_size, GFP_KERNEL);
5474	if (cdev == NULL)
 
5475		return NULL;
 
5476
5477	cdev->netdev = dev;
5478	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5479	cdev->register_device = cnic_register_device;
5480	cdev->unregister_device = cnic_unregister_device;
5481	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5482	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5483	atomic_set(&cdev->ref_count, 0);
5484
5485	cp = cdev->cnic_priv;
5486	cp->dev = cdev;
5487	cp->l2_single_buf_size = 0x400;
5488	cp->l2_rx_ring_size = 3;
5489
5490	spin_lock_init(&cp->cnic_ulp_lock);
5491
5492	netdev_info(dev, "Added CNIC device\n");
5493
5494	return cdev;
5495}
5496
5497static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5498{
5499	struct pci_dev *pdev;
5500	struct cnic_dev *cdev;
5501	struct cnic_local *cp;
5502	struct bnx2 *bp = netdev_priv(dev);
5503	struct cnic_eth_dev *ethdev = NULL;
 
5504
5505	if (bp->cnic_probe)
5506		ethdev = (bp->cnic_probe)(dev);
5507
 
 
5508	if (!ethdev)
5509		return NULL;
5510
5511	pdev = ethdev->pdev;
5512	if (!pdev)
5513		return NULL;
5514
5515	dev_hold(dev);
5516	pci_dev_get(pdev);
5517	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5518	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5519	    (pdev->revision < 0x10)) {
5520		pci_dev_put(pdev);
5521		goto cnic_err;
5522	}
5523	pci_dev_put(pdev);
5524
5525	cdev = cnic_alloc_dev(dev, pdev);
5526	if (cdev == NULL)
5527		goto cnic_err;
5528
5529	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5530	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5531
5532	cp = cdev->cnic_priv;
5533	cp->ethdev = ethdev;
5534	cdev->pcidev = pdev;
5535	cp->chip_id = ethdev->chip_id;
5536
5537	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5538
5539	cp->cnic_ops = &cnic_bnx2_ops;
5540	cp->start_hw = cnic_start_bnx2_hw;
5541	cp->stop_hw = cnic_stop_bnx2_hw;
5542	cp->setup_pgtbl = cnic_setup_page_tbl;
5543	cp->alloc_resc = cnic_alloc_bnx2_resc;
5544	cp->free_resc = cnic_free_resc;
5545	cp->start_cm = cnic_cm_init_bnx2_hw;
5546	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5547	cp->enable_int = cnic_enable_bnx2_int;
5548	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5549	cp->close_conn = cnic_close_bnx2_conn;
5550	return cdev;
5551
5552cnic_err:
5553	dev_put(dev);
5554	return NULL;
5555}
5556
5557static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5558{
5559	struct pci_dev *pdev;
5560	struct cnic_dev *cdev;
5561	struct cnic_local *cp;
5562	struct bnx2x *bp = netdev_priv(dev);
5563	struct cnic_eth_dev *ethdev = NULL;
 
5564
5565	if (bp->cnic_probe)
5566		ethdev = bp->cnic_probe(dev);
5567
 
 
5568	if (!ethdev)
5569		return NULL;
5570
5571	pdev = ethdev->pdev;
5572	if (!pdev)
5573		return NULL;
5574
5575	dev_hold(dev);
5576	cdev = cnic_alloc_dev(dev, pdev);
5577	if (cdev == NULL) {
5578		dev_put(dev);
5579		return NULL;
5580	}
5581
5582	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5583	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5584
5585	cp = cdev->cnic_priv;
5586	cp->ethdev = ethdev;
5587	cdev->pcidev = pdev;
5588	cp->chip_id = ethdev->chip_id;
5589
5590	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5591
5592	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5593		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5594	if (CNIC_SUPPORTS_FCOE(bp)) {
 
5595		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5596		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5597	}
5598
5599	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5600		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5601
5602	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5603
5604	cp->cnic_ops = &cnic_bnx2x_ops;
5605	cp->start_hw = cnic_start_bnx2x_hw;
5606	cp->stop_hw = cnic_stop_bnx2x_hw;
5607	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5608	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5609	cp->free_resc = cnic_free_resc;
5610	cp->start_cm = cnic_cm_init_bnx2x_hw;
5611	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5612	cp->enable_int = cnic_enable_bnx2x_int;
5613	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5614	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5615		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5616		cp->arm_int = cnic_arm_bnx2x_e2_msix;
5617	} else {
5618		cp->ack_int = cnic_ack_bnx2x_msix;
5619		cp->arm_int = cnic_arm_bnx2x_msix;
5620	}
5621	cp->close_conn = cnic_close_bnx2x_conn;
5622	return cdev;
5623}
5624
5625static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5626{
5627	struct ethtool_drvinfo drvinfo;
5628	struct cnic_dev *cdev = NULL;
5629
5630	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5631		memset(&drvinfo, 0, sizeof(drvinfo));
5632		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5633
5634		if (!strcmp(drvinfo.driver, "bnx2"))
5635			cdev = init_bnx2_cnic(dev);
5636		if (!strcmp(drvinfo.driver, "bnx2x"))
5637			cdev = init_bnx2x_cnic(dev);
5638		if (cdev) {
5639			write_lock(&cnic_dev_lock);
5640			list_add(&cdev->list, &cnic_dev_list);
5641			write_unlock(&cnic_dev_lock);
5642		}
5643	}
5644	return cdev;
5645}
5646
5647static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5648			      u16 vlan_id)
5649{
5650	int if_type;
5651
 
5652	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5653		struct cnic_ulp_ops *ulp_ops;
5654		void *ctx;
5655
5656		mutex_lock(&cnic_lock);
5657		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5658						lockdep_is_held(&cnic_lock));
5659		if (!ulp_ops || !ulp_ops->indicate_netevent) {
5660			mutex_unlock(&cnic_lock);
5661			continue;
5662		}
5663
5664		ctx = cp->ulp_handle[if_type];
5665
5666		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5667		mutex_unlock(&cnic_lock);
5668
5669		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5670
5671		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5672	}
 
5673}
5674
5675/* netdev event handler */
 
 
5676static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5677							 void *ptr)
5678{
5679	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5680	struct cnic_dev *dev;
5681	int new_dev = 0;
5682
5683	dev = cnic_from_netdev(netdev);
5684
5685	if (!dev && event == NETDEV_REGISTER) {
5686		/* Check for the hot-plug device */
5687		dev = is_cnic_dev(netdev);
5688		if (dev) {
5689			new_dev = 1;
5690			cnic_hold(dev);
5691		}
5692	}
5693	if (dev) {
5694		struct cnic_local *cp = dev->cnic_priv;
5695
5696		if (new_dev)
5697			cnic_ulp_init(dev);
5698		else if (event == NETDEV_UNREGISTER)
5699			cnic_ulp_exit(dev);
5700
5701		if (event == NETDEV_UP) {
5702			if (cnic_register_netdev(dev) != 0) {
5703				cnic_put(dev);
5704				goto done;
5705			}
5706			if (!cnic_start_hw(dev))
5707				cnic_ulp_start(dev);
5708		}
5709
5710		cnic_rcv_netevent(cp, event, 0);
5711
5712		if (event == NETDEV_GOING_DOWN) {
5713			cnic_ulp_stop(dev);
5714			cnic_stop_hw(dev);
5715			cnic_unregister_netdev(dev);
5716		} else if (event == NETDEV_UNREGISTER) {
5717			write_lock(&cnic_dev_lock);
5718			list_del_init(&dev->list);
5719			write_unlock(&cnic_dev_lock);
5720
5721			cnic_put(dev);
5722			cnic_free_dev(dev);
5723			goto done;
5724		}
5725		cnic_put(dev);
5726	} else {
5727		struct net_device *realdev;
5728		u16 vid;
5729
5730		vid = cnic_get_vlan(netdev, &realdev);
5731		if (realdev) {
5732			dev = cnic_from_netdev(realdev);
5733			if (dev) {
5734				vid |= VLAN_CFI_MASK;	/* make non-zero */
5735				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5736				cnic_put(dev);
5737			}
5738		}
5739	}
5740done:
5741	return NOTIFY_DONE;
5742}
5743
5744static struct notifier_block cnic_netdev_notifier = {
5745	.notifier_call = cnic_netdev_event
5746};
5747
5748static void cnic_release(void)
5749{
 
5750	struct cnic_uio_dev *udev;
5751
 
 
 
 
 
 
 
 
 
 
 
 
5752	while (!list_empty(&cnic_udev_list)) {
5753		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5754				  list);
5755		cnic_free_uio(udev);
5756	}
5757}
5758
5759static int __init cnic_init(void)
5760{
5761	int rc = 0;
5762
5763	pr_info("%s", version);
5764
5765	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5766	if (rc) {
5767		cnic_release();
5768		return rc;
5769	}
5770
5771	cnic_wq = create_singlethread_workqueue("cnic_wq");
5772	if (!cnic_wq) {
5773		cnic_release();
5774		unregister_netdevice_notifier(&cnic_netdev_notifier);
5775		return -ENOMEM;
5776	}
5777
5778	return 0;
5779}
5780
5781static void __exit cnic_exit(void)
5782{
5783	unregister_netdevice_notifier(&cnic_netdev_notifier);
5784	cnic_release();
5785	destroy_workqueue(cnic_wq);
5786}
5787
5788module_init(cnic_init);
5789module_exit(cnic_exit);
v3.5.6
   1/* cnic.c: Broadcom CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2012 Broadcom Corporation
 
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
 
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16
  17#include <linux/kernel.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/slab.h>
  21#include <linux/pci.h>
  22#include <linux/init.h>
  23#include <linux/netdevice.h>
  24#include <linux/uio_driver.h>
  25#include <linux/in.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/delay.h>
  28#include <linux/ethtool.h>
  29#include <linux/if_vlan.h>
  30#include <linux/prefetch.h>
  31#include <linux/random.h>
  32#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  33#define BCM_VLAN 1
  34#endif
  35#include <net/ip.h>
  36#include <net/tcp.h>
  37#include <net/route.h>
  38#include <net/ipv6.h>
  39#include <net/ip6_route.h>
  40#include <net/ip6_checksum.h>
  41#include <scsi/iscsi_if.h>
  42
 
  43#include "cnic_if.h"
  44#include "bnx2.h"
 
  45#include "bnx2x/bnx2x_reg.h"
  46#include "bnx2x/bnx2x_fw_defs.h"
  47#include "bnx2x/bnx2x_hsi.h"
  48#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
  49#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
  50#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
  51#include "cnic.h"
  52#include "cnic_defs.h"
  53
  54#define DRV_MODULE_NAME		"cnic"
  55
  56static char version[] __devinitdata =
  57	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  58
  59MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  60	      "Chen (zongxi@broadcom.com");
  61MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
  62MODULE_LICENSE("GPL");
  63MODULE_VERSION(CNIC_MODULE_VERSION);
  64
  65/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  66static LIST_HEAD(cnic_dev_list);
  67static LIST_HEAD(cnic_udev_list);
  68static DEFINE_RWLOCK(cnic_dev_lock);
  69static DEFINE_MUTEX(cnic_lock);
  70
  71static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  72
  73/* helper function, assuming cnic_lock is held */
  74static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  75{
  76	return rcu_dereference_protected(cnic_ulp_tbl[type],
  77					 lockdep_is_held(&cnic_lock));
  78}
  79
  80static int cnic_service_bnx2(void *, void *);
  81static int cnic_service_bnx2x(void *, void *);
  82static int cnic_ctl(void *, struct cnic_ctl_info *);
  83
  84static struct cnic_ops cnic_bnx2_ops = {
  85	.cnic_owner	= THIS_MODULE,
  86	.cnic_handler	= cnic_service_bnx2,
  87	.cnic_ctl	= cnic_ctl,
  88};
  89
  90static struct cnic_ops cnic_bnx2x_ops = {
  91	.cnic_owner	= THIS_MODULE,
  92	.cnic_handler	= cnic_service_bnx2x,
  93	.cnic_ctl	= cnic_ctl,
  94};
  95
  96static struct workqueue_struct *cnic_wq;
  97
  98static void cnic_shutdown_rings(struct cnic_dev *);
  99static void cnic_init_rings(struct cnic_dev *);
 100static int cnic_cm_set_pg(struct cnic_sock *);
 101
 102static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 103{
 104	struct cnic_uio_dev *udev = uinfo->priv;
 105	struct cnic_dev *dev;
 106
 107	if (!capable(CAP_NET_ADMIN))
 108		return -EPERM;
 109
 110	if (udev->uio_dev != -1)
 111		return -EBUSY;
 112
 113	rtnl_lock();
 114	dev = udev->dev;
 115
 116	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 117		rtnl_unlock();
 118		return -ENODEV;
 119	}
 120
 121	udev->uio_dev = iminor(inode);
 122
 123	cnic_shutdown_rings(dev);
 124	cnic_init_rings(dev);
 125	rtnl_unlock();
 126
 127	return 0;
 128}
 129
 130static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 131{
 132	struct cnic_uio_dev *udev = uinfo->priv;
 133
 134	udev->uio_dev = -1;
 135	return 0;
 136}
 137
 138static inline void cnic_hold(struct cnic_dev *dev)
 139{
 140	atomic_inc(&dev->ref_count);
 141}
 142
 143static inline void cnic_put(struct cnic_dev *dev)
 144{
 145	atomic_dec(&dev->ref_count);
 146}
 147
 148static inline void csk_hold(struct cnic_sock *csk)
 149{
 150	atomic_inc(&csk->ref_count);
 151}
 152
 153static inline void csk_put(struct cnic_sock *csk)
 154{
 155	atomic_dec(&csk->ref_count);
 156}
 157
 158static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 159{
 160	struct cnic_dev *cdev;
 161
 162	read_lock(&cnic_dev_lock);
 163	list_for_each_entry(cdev, &cnic_dev_list, list) {
 164		if (netdev == cdev->netdev) {
 165			cnic_hold(cdev);
 166			read_unlock(&cnic_dev_lock);
 167			return cdev;
 168		}
 169	}
 170	read_unlock(&cnic_dev_lock);
 171	return NULL;
 172}
 173
 174static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 175{
 176	atomic_inc(&ulp_ops->ref_count);
 177}
 178
 179static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 180{
 181	atomic_dec(&ulp_ops->ref_count);
 182}
 183
 184static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 185{
 186	struct cnic_local *cp = dev->cnic_priv;
 187	struct cnic_eth_dev *ethdev = cp->ethdev;
 188	struct drv_ctl_info info;
 189	struct drv_ctl_io *io = &info.data.io;
 190
 
 191	info.cmd = DRV_CTL_CTX_WR_CMD;
 192	io->cid_addr = cid_addr;
 193	io->offset = off;
 194	io->data = val;
 195	ethdev->drv_ctl(dev->netdev, &info);
 196}
 197
 198static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 199{
 200	struct cnic_local *cp = dev->cnic_priv;
 201	struct cnic_eth_dev *ethdev = cp->ethdev;
 202	struct drv_ctl_info info;
 203	struct drv_ctl_io *io = &info.data.io;
 204
 
 205	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 206	io->offset = off;
 207	io->dma_addr = addr;
 208	ethdev->drv_ctl(dev->netdev, &info);
 209}
 210
 211static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 212{
 213	struct cnic_local *cp = dev->cnic_priv;
 214	struct cnic_eth_dev *ethdev = cp->ethdev;
 215	struct drv_ctl_info info;
 216	struct drv_ctl_l2_ring *ring = &info.data.ring;
 217
 
 218	if (start)
 219		info.cmd = DRV_CTL_START_L2_CMD;
 220	else
 221		info.cmd = DRV_CTL_STOP_L2_CMD;
 222
 223	ring->cid = cid;
 224	ring->client_id = cl_id;
 225	ethdev->drv_ctl(dev->netdev, &info);
 226}
 227
 228static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 229{
 230	struct cnic_local *cp = dev->cnic_priv;
 231	struct cnic_eth_dev *ethdev = cp->ethdev;
 232	struct drv_ctl_info info;
 233	struct drv_ctl_io *io = &info.data.io;
 234
 
 235	info.cmd = DRV_CTL_IO_WR_CMD;
 236	io->offset = off;
 237	io->data = val;
 238	ethdev->drv_ctl(dev->netdev, &info);
 239}
 240
 241static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 242{
 243	struct cnic_local *cp = dev->cnic_priv;
 244	struct cnic_eth_dev *ethdev = cp->ethdev;
 245	struct drv_ctl_info info;
 246	struct drv_ctl_io *io = &info.data.io;
 247
 
 248	info.cmd = DRV_CTL_IO_RD_CMD;
 249	io->offset = off;
 250	ethdev->drv_ctl(dev->netdev, &info);
 251	return io->data;
 252}
 253
 254static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
 255{
 256	struct cnic_local *cp = dev->cnic_priv;
 257	struct cnic_eth_dev *ethdev = cp->ethdev;
 258	struct drv_ctl_info info;
 
 
 259
 260	if (reg)
 
 261		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 262	else
 
 
 263		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 
 264
 265	info.data.ulp_type = ulp_type;
 
 266	ethdev->drv_ctl(dev->netdev, &info);
 267}
 268
 269static int cnic_in_use(struct cnic_sock *csk)
 270{
 271	return test_bit(SK_F_INUSE, &csk->flags);
 272}
 273
 274static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 275{
 276	struct cnic_local *cp = dev->cnic_priv;
 277	struct cnic_eth_dev *ethdev = cp->ethdev;
 278	struct drv_ctl_info info;
 279
 
 280	info.cmd = cmd;
 281	info.data.credit.credit_count = count;
 282	ethdev->drv_ctl(dev->netdev, &info);
 283}
 284
 285static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 286{
 287	u32 i;
 288
 
 
 
 289	for (i = 0; i < cp->max_cid_space; i++) {
 290		if (cp->ctx_tbl[i].cid == cid) {
 291			*l5_cid = i;
 292			return 0;
 293		}
 294	}
 295	return -EINVAL;
 296}
 297
 298static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 299			   struct cnic_sock *csk)
 300{
 301	struct iscsi_path path_req;
 302	char *buf = NULL;
 303	u16 len = 0;
 304	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 305	struct cnic_ulp_ops *ulp_ops;
 306	struct cnic_uio_dev *udev = cp->udev;
 307	int rc = 0, retry = 0;
 308
 309	if (!udev || udev->uio_dev == -1)
 310		return -ENODEV;
 311
 312	if (csk) {
 313		len = sizeof(path_req);
 314		buf = (char *) &path_req;
 315		memset(&path_req, 0, len);
 316
 317		msg_type = ISCSI_KEVENT_PATH_REQ;
 318		path_req.handle = (u64) csk->l5_cid;
 319		if (test_bit(SK_F_IPV6, &csk->flags)) {
 320			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 321			       sizeof(struct in6_addr));
 322			path_req.ip_addr_len = 16;
 323		} else {
 324			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 325			       sizeof(struct in_addr));
 326			path_req.ip_addr_len = 4;
 327		}
 328		path_req.vlan_id = csk->vlan_id;
 329		path_req.pmtu = csk->mtu;
 330	}
 331
 332	while (retry < 3) {
 333		rc = 0;
 334		rcu_read_lock();
 335		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
 336		if (ulp_ops)
 337			rc = ulp_ops->iscsi_nl_send_msg(
 338				cp->ulp_handle[CNIC_ULP_ISCSI],
 339				msg_type, buf, len);
 340		rcu_read_unlock();
 341		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 342			break;
 343
 344		msleep(100);
 345		retry++;
 346	}
 347	return rc;
 348}
 349
 350static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 351
 352static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 353				  char *buf, u16 len)
 354{
 355	int rc = -EINVAL;
 356
 357	switch (msg_type) {
 358	case ISCSI_UEVENT_PATH_UPDATE: {
 359		struct cnic_local *cp;
 360		u32 l5_cid;
 361		struct cnic_sock *csk;
 362		struct iscsi_path *path_resp;
 363
 364		if (len < sizeof(*path_resp))
 365			break;
 366
 367		path_resp = (struct iscsi_path *) buf;
 368		cp = dev->cnic_priv;
 369		l5_cid = (u32) path_resp->handle;
 370		if (l5_cid >= MAX_CM_SK_TBL_SZ)
 371			break;
 372
 373		rcu_read_lock();
 374		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
 375			rc = -ENODEV;
 376			rcu_read_unlock();
 377			break;
 378		}
 379		csk = &cp->csk_tbl[l5_cid];
 380		csk_hold(csk);
 381		if (cnic_in_use(csk) &&
 382		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 383
 384			csk->vlan_id = path_resp->vlan_id;
 385
 386			memcpy(csk->ha, path_resp->mac_addr, 6);
 387			if (test_bit(SK_F_IPV6, &csk->flags))
 388				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 389				       sizeof(struct in6_addr));
 390			else
 391				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 392				       sizeof(struct in_addr));
 393
 394			if (is_valid_ether_addr(csk->ha)) {
 395				cnic_cm_set_pg(csk);
 396			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 397				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 398
 399				cnic_cm_upcall(cp, csk,
 400					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 401				clear_bit(SK_F_CONNECT_START, &csk->flags);
 402			}
 403		}
 404		csk_put(csk);
 405		rcu_read_unlock();
 406		rc = 0;
 407	}
 408	}
 409
 410	return rc;
 411}
 412
 413static int cnic_offld_prep(struct cnic_sock *csk)
 414{
 415	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 416		return 0;
 417
 418	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 419		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 420		return 0;
 421	}
 422
 423	return 1;
 424}
 425
 426static int cnic_close_prep(struct cnic_sock *csk)
 427{
 428	clear_bit(SK_F_CONNECT_START, &csk->flags);
 429	smp_mb__after_clear_bit();
 430
 431	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 432		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 433			msleep(1);
 434
 435		return 1;
 436	}
 437	return 0;
 438}
 439
 440static int cnic_abort_prep(struct cnic_sock *csk)
 441{
 442	clear_bit(SK_F_CONNECT_START, &csk->flags);
 443	smp_mb__after_clear_bit();
 444
 445	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 446		msleep(1);
 447
 448	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 449		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 450		return 1;
 451	}
 452
 453	return 0;
 454}
 455
 456int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 457{
 458	struct cnic_dev *dev;
 459
 460	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 461		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 462		return -EINVAL;
 463	}
 464	mutex_lock(&cnic_lock);
 465	if (cnic_ulp_tbl_prot(ulp_type)) {
 466		pr_err("%s: Type %d has already been registered\n",
 467		       __func__, ulp_type);
 468		mutex_unlock(&cnic_lock);
 469		return -EBUSY;
 470	}
 471
 472	read_lock(&cnic_dev_lock);
 473	list_for_each_entry(dev, &cnic_dev_list, list) {
 474		struct cnic_local *cp = dev->cnic_priv;
 475
 476		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 477	}
 478	read_unlock(&cnic_dev_lock);
 479
 480	atomic_set(&ulp_ops->ref_count, 0);
 481	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 482	mutex_unlock(&cnic_lock);
 483
 484	/* Prevent race conditions with netdev_event */
 485	rtnl_lock();
 486	list_for_each_entry(dev, &cnic_dev_list, list) {
 487		struct cnic_local *cp = dev->cnic_priv;
 488
 489		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 490			ulp_ops->cnic_init(dev);
 491	}
 492	rtnl_unlock();
 493
 494	return 0;
 495}
 496
 497int cnic_unregister_driver(int ulp_type)
 498{
 499	struct cnic_dev *dev;
 500	struct cnic_ulp_ops *ulp_ops;
 501	int i = 0;
 502
 503	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 504		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 505		return -EINVAL;
 506	}
 507	mutex_lock(&cnic_lock);
 508	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 509	if (!ulp_ops) {
 510		pr_err("%s: Type %d has not been registered\n",
 511		       __func__, ulp_type);
 512		goto out_unlock;
 513	}
 514	read_lock(&cnic_dev_lock);
 515	list_for_each_entry(dev, &cnic_dev_list, list) {
 516		struct cnic_local *cp = dev->cnic_priv;
 517
 518		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 519			pr_err("%s: Type %d still has devices registered\n",
 520			       __func__, ulp_type);
 521			read_unlock(&cnic_dev_lock);
 522			goto out_unlock;
 523		}
 524	}
 525	read_unlock(&cnic_dev_lock);
 526
 527	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 528
 529	mutex_unlock(&cnic_lock);
 530	synchronize_rcu();
 531	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 532		msleep(100);
 533		i++;
 534	}
 535
 536	if (atomic_read(&ulp_ops->ref_count) != 0)
 537		pr_warn("%s: Failed waiting for ref count to go to zero\n",
 538			__func__);
 539	return 0;
 540
 541out_unlock:
 542	mutex_unlock(&cnic_lock);
 543	return -EINVAL;
 544}
 545
 546static int cnic_start_hw(struct cnic_dev *);
 547static void cnic_stop_hw(struct cnic_dev *);
 548
 549static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 550				void *ulp_ctx)
 551{
 552	struct cnic_local *cp = dev->cnic_priv;
 553	struct cnic_ulp_ops *ulp_ops;
 554
 555	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 556		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 557		return -EINVAL;
 558	}
 559	mutex_lock(&cnic_lock);
 560	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 561		pr_err("%s: Driver with type %d has not been registered\n",
 562		       __func__, ulp_type);
 563		mutex_unlock(&cnic_lock);
 564		return -EAGAIN;
 565	}
 566	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 567		pr_err("%s: Type %d has already been registered to this device\n",
 568		       __func__, ulp_type);
 569		mutex_unlock(&cnic_lock);
 570		return -EBUSY;
 571	}
 572
 573	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 574	cp->ulp_handle[ulp_type] = ulp_ctx;
 575	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 576	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 577	cnic_hold(dev);
 578
 579	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 580		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 581			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 582
 583	mutex_unlock(&cnic_lock);
 584
 585	cnic_ulp_ctl(dev, ulp_type, true);
 586
 587	return 0;
 588
 589}
 590EXPORT_SYMBOL(cnic_register_driver);
 591
 592static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 593{
 594	struct cnic_local *cp = dev->cnic_priv;
 595	int i = 0;
 596
 597	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 598		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 599		return -EINVAL;
 600	}
 
 
 
 
 601	mutex_lock(&cnic_lock);
 602	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 603		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 604		cnic_put(dev);
 605	} else {
 606		pr_err("%s: device not registered to this ulp type %d\n",
 607		       __func__, ulp_type);
 608		mutex_unlock(&cnic_lock);
 609		return -EINVAL;
 610	}
 611	mutex_unlock(&cnic_lock);
 612
 613	if (ulp_type == CNIC_ULP_ISCSI)
 614		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 615
 616	synchronize_rcu();
 617
 618	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 619	       i < 20) {
 620		msleep(100);
 621		i++;
 622	}
 623	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 624		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 625
 626	cnic_ulp_ctl(dev, ulp_type, false);
 
 
 
 627
 628	return 0;
 629}
 630EXPORT_SYMBOL(cnic_unregister_driver);
 631
 632static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 633			    u32 next)
 634{
 635	id_tbl->start = start_id;
 636	id_tbl->max = size;
 637	id_tbl->next = next;
 638	spin_lock_init(&id_tbl->lock);
 639	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
 640	if (!id_tbl->table)
 641		return -ENOMEM;
 642
 643	return 0;
 644}
 645
 646static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 647{
 648	kfree(id_tbl->table);
 649	id_tbl->table = NULL;
 650}
 651
 652static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 653{
 654	int ret = -1;
 655
 656	id -= id_tbl->start;
 657	if (id >= id_tbl->max)
 658		return ret;
 659
 660	spin_lock(&id_tbl->lock);
 661	if (!test_bit(id, id_tbl->table)) {
 662		set_bit(id, id_tbl->table);
 663		ret = 0;
 664	}
 665	spin_unlock(&id_tbl->lock);
 666	return ret;
 667}
 668
 669/* Returns -1 if not successful */
 670static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 671{
 672	u32 id;
 673
 674	spin_lock(&id_tbl->lock);
 675	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 676	if (id >= id_tbl->max) {
 677		id = -1;
 678		if (id_tbl->next != 0) {
 679			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 680			if (id >= id_tbl->next)
 681				id = -1;
 682		}
 683	}
 684
 685	if (id < id_tbl->max) {
 686		set_bit(id, id_tbl->table);
 687		id_tbl->next = (id + 1) & (id_tbl->max - 1);
 688		id += id_tbl->start;
 689	}
 690
 691	spin_unlock(&id_tbl->lock);
 692
 693	return id;
 694}
 695
 696static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 697{
 698	if (id == -1)
 699		return;
 700
 701	id -= id_tbl->start;
 702	if (id >= id_tbl->max)
 703		return;
 704
 705	clear_bit(id, id_tbl->table);
 706}
 707
 708static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 709{
 710	int i;
 711
 712	if (!dma->pg_arr)
 713		return;
 714
 715	for (i = 0; i < dma->num_pages; i++) {
 716		if (dma->pg_arr[i]) {
 717			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
 718					  dma->pg_arr[i], dma->pg_map_arr[i]);
 719			dma->pg_arr[i] = NULL;
 720		}
 721	}
 722	if (dma->pgtbl) {
 723		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 724				  dma->pgtbl, dma->pgtbl_map);
 725		dma->pgtbl = NULL;
 726	}
 727	kfree(dma->pg_arr);
 728	dma->pg_arr = NULL;
 729	dma->num_pages = 0;
 730}
 731
 732static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 733{
 734	int i;
 735	__le32 *page_table = (__le32 *) dma->pgtbl;
 736
 737	for (i = 0; i < dma->num_pages; i++) {
 738		/* Each entry needs to be in big endian format. */
 739		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 740		page_table++;
 741		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 742		page_table++;
 743	}
 744}
 745
 746static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 747{
 748	int i;
 749	__le32 *page_table = (__le32 *) dma->pgtbl;
 750
 751	for (i = 0; i < dma->num_pages; i++) {
 752		/* Each entry needs to be in little endian format. */
 753		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 754		page_table++;
 755		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 756		page_table++;
 757	}
 758}
 759
 760static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 761			  int pages, int use_pg_tbl)
 762{
 763	int i, size;
 764	struct cnic_local *cp = dev->cnic_priv;
 765
 766	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 767	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 768	if (dma->pg_arr == NULL)
 769		return -ENOMEM;
 770
 771	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 772	dma->num_pages = pages;
 773
 774	for (i = 0; i < pages; i++) {
 775		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 776						    BCM_PAGE_SIZE,
 777						    &dma->pg_map_arr[i],
 778						    GFP_ATOMIC);
 779		if (dma->pg_arr[i] == NULL)
 780			goto error;
 781	}
 782	if (!use_pg_tbl)
 783		return 0;
 784
 785	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
 786			  ~(BCM_PAGE_SIZE - 1);
 787	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 788					&dma->pgtbl_map, GFP_ATOMIC);
 789	if (dma->pgtbl == NULL)
 790		goto error;
 791
 792	cp->setup_pgtbl(dev, dma);
 793
 794	return 0;
 795
 796error:
 797	cnic_free_dma(dev, dma);
 798	return -ENOMEM;
 799}
 800
 801static void cnic_free_context(struct cnic_dev *dev)
 802{
 803	struct cnic_local *cp = dev->cnic_priv;
 804	int i;
 805
 806	for (i = 0; i < cp->ctx_blks; i++) {
 807		if (cp->ctx_arr[i].ctx) {
 808			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 809					  cp->ctx_arr[i].ctx,
 810					  cp->ctx_arr[i].mapping);
 811			cp->ctx_arr[i].ctx = NULL;
 812		}
 813	}
 814}
 815
 816static void __cnic_free_uio(struct cnic_uio_dev *udev)
 817{
 818	uio_unregister_device(&udev->cnic_uinfo);
 819
 820	if (udev->l2_buf) {
 821		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 822				  udev->l2_buf, udev->l2_buf_map);
 823		udev->l2_buf = NULL;
 824	}
 825
 826	if (udev->l2_ring) {
 827		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 828				  udev->l2_ring, udev->l2_ring_map);
 829		udev->l2_ring = NULL;
 830	}
 831
 
 
 
 
 
 
 
 
 832	pci_dev_put(udev->pdev);
 833	kfree(udev);
 834}
 835
 836static void cnic_free_uio(struct cnic_uio_dev *udev)
 837{
 838	if (!udev)
 839		return;
 840
 841	write_lock(&cnic_dev_lock);
 842	list_del_init(&udev->list);
 843	write_unlock(&cnic_dev_lock);
 844	__cnic_free_uio(udev);
 845}
 846
 847static void cnic_free_resc(struct cnic_dev *dev)
 848{
 849	struct cnic_local *cp = dev->cnic_priv;
 850	struct cnic_uio_dev *udev = cp->udev;
 851
 852	if (udev) {
 853		udev->dev = NULL;
 854		cp->udev = NULL;
 
 
 855	}
 856
 857	cnic_free_context(dev);
 858	kfree(cp->ctx_arr);
 859	cp->ctx_arr = NULL;
 860	cp->ctx_blks = 0;
 861
 862	cnic_free_dma(dev, &cp->gbl_buf_info);
 863	cnic_free_dma(dev, &cp->kwq_info);
 864	cnic_free_dma(dev, &cp->kwq_16_data_info);
 865	cnic_free_dma(dev, &cp->kcq2.dma);
 866	cnic_free_dma(dev, &cp->kcq1.dma);
 867	kfree(cp->iscsi_tbl);
 868	cp->iscsi_tbl = NULL;
 869	kfree(cp->ctx_tbl);
 870	cp->ctx_tbl = NULL;
 871
 872	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 873	cnic_free_id_tbl(&cp->cid_tbl);
 874}
 875
 876static int cnic_alloc_context(struct cnic_dev *dev)
 877{
 878	struct cnic_local *cp = dev->cnic_priv;
 879
 880	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
 881		int i, k, arr_size;
 882
 883		cp->ctx_blk_size = BCM_PAGE_SIZE;
 884		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
 885		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 886			   sizeof(struct cnic_ctx);
 887		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 888		if (cp->ctx_arr == NULL)
 889			return -ENOMEM;
 890
 891		k = 0;
 892		for (i = 0; i < 2; i++) {
 893			u32 j, reg, off, lo, hi;
 894
 895			if (i == 0)
 896				off = BNX2_PG_CTX_MAP;
 897			else
 898				off = BNX2_ISCSI_CTX_MAP;
 899
 900			reg = cnic_reg_rd_ind(dev, off);
 901			lo = reg >> 16;
 902			hi = reg & 0xffff;
 903			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 904				cp->ctx_arr[k].cid = j;
 905		}
 906
 907		cp->ctx_blks = k;
 908		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 909			cp->ctx_blks = 0;
 910			return -ENOMEM;
 911		}
 912
 913		for (i = 0; i < cp->ctx_blks; i++) {
 914			cp->ctx_arr[i].ctx =
 915				dma_alloc_coherent(&dev->pcidev->dev,
 916						   BCM_PAGE_SIZE,
 917						   &cp->ctx_arr[i].mapping,
 918						   GFP_KERNEL);
 919			if (cp->ctx_arr[i].ctx == NULL)
 920				return -ENOMEM;
 921		}
 922	}
 923	return 0;
 924}
 925
 926static u16 cnic_bnx2_next_idx(u16 idx)
 927{
 928	return idx + 1;
 929}
 930
 931static u16 cnic_bnx2_hw_idx(u16 idx)
 932{
 933	return idx;
 934}
 935
 936static u16 cnic_bnx2x_next_idx(u16 idx)
 937{
 938	idx++;
 939	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 940		idx++;
 941
 942	return idx;
 943}
 944
 945static u16 cnic_bnx2x_hw_idx(u16 idx)
 946{
 947	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 948		idx++;
 949	return idx;
 950}
 951
 952static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 953			  bool use_pg_tbl)
 954{
 955	int err, i, use_page_tbl = 0;
 956	struct kcqe **kcq;
 957
 958	if (use_pg_tbl)
 959		use_page_tbl = 1;
 960
 961	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 962	if (err)
 963		return err;
 964
 965	kcq = (struct kcqe **) info->dma.pg_arr;
 966	info->kcq = kcq;
 967
 968	info->next_idx = cnic_bnx2_next_idx;
 969	info->hw_idx = cnic_bnx2_hw_idx;
 970	if (use_pg_tbl)
 971		return 0;
 972
 973	info->next_idx = cnic_bnx2x_next_idx;
 974	info->hw_idx = cnic_bnx2x_hw_idx;
 975
 976	for (i = 0; i < KCQ_PAGE_CNT; i++) {
 977		struct bnx2x_bd_chain_next *next =
 978			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
 979		int j = i + 1;
 980
 981		if (j >= KCQ_PAGE_CNT)
 982			j = 0;
 983		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
 984		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
 985	}
 986	return 0;
 987}
 988
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 990{
 991	struct cnic_local *cp = dev->cnic_priv;
 992	struct cnic_uio_dev *udev;
 993
 994	read_lock(&cnic_dev_lock);
 995	list_for_each_entry(udev, &cnic_udev_list, list) {
 996		if (udev->pdev == dev->pcidev) {
 997			udev->dev = dev;
 
 
 
 
 998			cp->udev = udev;
 999			read_unlock(&cnic_dev_lock);
1000			return 0;
1001		}
1002	}
1003	read_unlock(&cnic_dev_lock);
1004
1005	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1006	if (!udev)
1007		return -ENOMEM;
1008
1009	udev->uio_dev = -1;
1010
1011	udev->dev = dev;
1012	udev->pdev = dev->pcidev;
1013	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1014	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1015					   &udev->l2_ring_map,
1016					   GFP_KERNEL | __GFP_COMP);
1017	if (!udev->l2_ring)
1018		goto err_udev;
1019
1020	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1021	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1022	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1023					  &udev->l2_buf_map,
1024					  GFP_KERNEL | __GFP_COMP);
1025	if (!udev->l2_buf)
1026		goto err_dma;
1027
1028	write_lock(&cnic_dev_lock);
1029	list_add(&udev->list, &cnic_udev_list);
1030	write_unlock(&cnic_dev_lock);
1031
1032	pci_dev_get(udev->pdev);
1033
1034	cp->udev = udev;
1035
1036	return 0;
1037 err_dma:
1038	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1039			  udev->l2_ring, udev->l2_ring_map);
1040 err_udev:
1041	kfree(udev);
1042	return -ENOMEM;
1043}
1044
1045static int cnic_init_uio(struct cnic_dev *dev)
1046{
1047	struct cnic_local *cp = dev->cnic_priv;
1048	struct cnic_uio_dev *udev = cp->udev;
1049	struct uio_info *uinfo;
1050	int ret = 0;
1051
1052	if (!udev)
1053		return -ENOMEM;
1054
1055	uinfo = &udev->cnic_uinfo;
1056
1057	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1058	uinfo->mem[0].internal_addr = dev->regview;
1059	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1060
1061	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1062		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1063						     TX_MAX_TSS_RINGS + 1);
1064		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1065					PAGE_MASK;
1066		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1067			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1068		else
1069			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1070
1071		uinfo->name = "bnx2_cnic";
1072	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1073		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1074
1075		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1076			PAGE_MASK;
1077		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1078
1079		uinfo->name = "bnx2x_cnic";
1080	}
1081
1082	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1083
1084	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1085	uinfo->mem[2].size = udev->l2_ring_size;
1086	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1087
1088	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1089	uinfo->mem[3].size = udev->l2_buf_size;
1090	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1091
1092	uinfo->version = CNIC_MODULE_VERSION;
1093	uinfo->irq = UIO_IRQ_CUSTOM;
1094
1095	uinfo->open = cnic_uio_open;
1096	uinfo->release = cnic_uio_close;
1097
1098	if (udev->uio_dev == -1) {
1099		if (!uinfo->priv) {
1100			uinfo->priv = udev;
1101
1102			ret = uio_register_device(&udev->pdev->dev, uinfo);
1103		}
1104	} else {
1105		cnic_init_rings(dev);
1106	}
1107
1108	return ret;
1109}
1110
1111static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1112{
1113	struct cnic_local *cp = dev->cnic_priv;
1114	int ret;
1115
1116	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1117	if (ret)
1118		goto error;
1119	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1120
1121	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1122	if (ret)
1123		goto error;
1124
1125	ret = cnic_alloc_context(dev);
1126	if (ret)
1127		goto error;
1128
1129	ret = cnic_alloc_uio_rings(dev, 2);
1130	if (ret)
1131		goto error;
1132
1133	ret = cnic_init_uio(dev);
1134	if (ret)
1135		goto error;
1136
1137	return 0;
1138
1139error:
1140	cnic_free_resc(dev);
1141	return ret;
1142}
1143
1144static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1145{
1146	struct cnic_local *cp = dev->cnic_priv;
 
1147	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1148	int total_mem, blks, i;
1149
1150	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1151	blks = total_mem / ctx_blk_size;
1152	if (total_mem % ctx_blk_size)
1153		blks++;
1154
1155	if (blks > cp->ethdev->ctx_tbl_len)
1156		return -ENOMEM;
1157
1158	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1159	if (cp->ctx_arr == NULL)
1160		return -ENOMEM;
1161
1162	cp->ctx_blks = blks;
1163	cp->ctx_blk_size = ctx_blk_size;
1164	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1165		cp->ctx_align = 0;
1166	else
1167		cp->ctx_align = ctx_blk_size;
1168
1169	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1170
1171	for (i = 0; i < blks; i++) {
1172		cp->ctx_arr[i].ctx =
1173			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1174					   &cp->ctx_arr[i].mapping,
1175					   GFP_KERNEL);
1176		if (cp->ctx_arr[i].ctx == NULL)
1177			return -ENOMEM;
1178
1179		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1180			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1181				cnic_free_context(dev);
1182				cp->ctx_blk_size += cp->ctx_align;
1183				i = -1;
1184				continue;
1185			}
1186		}
1187	}
1188	return 0;
1189}
1190
1191static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1192{
1193	struct cnic_local *cp = dev->cnic_priv;
 
1194	struct cnic_eth_dev *ethdev = cp->ethdev;
1195	u32 start_cid = ethdev->starting_cid;
1196	int i, j, n, ret, pages;
1197	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1198
1199	cp->iro_arr = ethdev->iro_arr;
1200
1201	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1202	cp->iscsi_start_cid = start_cid;
1203	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1204
1205	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1206		cp->max_cid_space += dev->max_fcoe_conn;
1207		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1208		if (!cp->fcoe_init_cid)
1209			cp->fcoe_init_cid = 0x10;
1210	}
1211
1212	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1213				GFP_KERNEL);
1214	if (!cp->iscsi_tbl)
1215		goto error;
1216
1217	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1218				cp->max_cid_space, GFP_KERNEL);
1219	if (!cp->ctx_tbl)
1220		goto error;
1221
1222	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1223		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1224		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1225	}
1226
1227	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1228		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1229
1230	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1231		PAGE_SIZE;
1232
1233	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1234	if (ret)
1235		return -ENOMEM;
1236
1237	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1238	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1239		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1240
1241		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1242		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1243						   off;
1244
1245		if ((i % n) == (n - 1))
1246			j++;
1247	}
1248
1249	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1250	if (ret)
1251		goto error;
1252
1253	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1254		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1255		if (ret)
1256			goto error;
1257	}
1258
1259	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1260	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1261	if (ret)
1262		goto error;
1263
1264	ret = cnic_alloc_bnx2x_context(dev);
1265	if (ret)
1266		goto error;
1267
 
 
 
1268	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1269
1270	cp->l2_rx_ring_size = 15;
1271
1272	ret = cnic_alloc_uio_rings(dev, 4);
1273	if (ret)
1274		goto error;
1275
1276	ret = cnic_init_uio(dev);
1277	if (ret)
1278		goto error;
1279
1280	return 0;
1281
1282error:
1283	cnic_free_resc(dev);
1284	return -ENOMEM;
1285}
1286
1287static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1288{
1289	return cp->max_kwq_idx -
1290		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1291}
1292
1293static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1294				  u32 num_wqes)
1295{
1296	struct cnic_local *cp = dev->cnic_priv;
1297	struct kwqe *prod_qe;
1298	u16 prod, sw_prod, i;
1299
1300	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1301		return -EAGAIN;		/* bnx2 is down */
1302
1303	spin_lock_bh(&cp->cnic_ulp_lock);
1304	if (num_wqes > cnic_kwq_avail(cp) &&
1305	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1306		spin_unlock_bh(&cp->cnic_ulp_lock);
1307		return -EAGAIN;
1308	}
1309
1310	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1311
1312	prod = cp->kwq_prod_idx;
1313	sw_prod = prod & MAX_KWQ_IDX;
1314	for (i = 0; i < num_wqes; i++) {
1315		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1316		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1317		prod++;
1318		sw_prod = prod & MAX_KWQ_IDX;
1319	}
1320	cp->kwq_prod_idx = prod;
1321
1322	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1323
1324	spin_unlock_bh(&cp->cnic_ulp_lock);
1325	return 0;
1326}
1327
1328static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1329				   union l5cm_specific_data *l5_data)
1330{
1331	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1332	dma_addr_t map;
1333
1334	map = ctx->kwqe_data_mapping;
1335	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1336	l5_data->phy_address.hi = (u64) map >> 32;
1337	return ctx->kwqe_data;
1338}
1339
1340static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1341				u32 type, union l5cm_specific_data *l5_data)
1342{
1343	struct cnic_local *cp = dev->cnic_priv;
 
1344	struct l5cm_spe kwqe;
1345	struct kwqe_16 *kwq[1];
1346	u16 type_16;
1347	int ret;
1348
1349	kwqe.hdr.conn_and_cmd_data =
1350		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1351			     BNX2X_HW_CID(cp, cid)));
1352
1353	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1354	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1355		   SPE_HDR_FUNCTION_ID;
1356
1357	kwqe.hdr.type = cpu_to_le16(type_16);
1358	kwqe.hdr.reserved1 = 0;
1359	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1360	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1361
1362	kwq[0] = (struct kwqe_16 *) &kwqe;
1363
1364	spin_lock_bh(&cp->cnic_ulp_lock);
1365	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1366	spin_unlock_bh(&cp->cnic_ulp_lock);
1367
1368	if (ret == 1)
1369		return 0;
1370
1371	return ret;
1372}
1373
1374static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1375				   struct kcqe *cqes[], u32 num_cqes)
1376{
1377	struct cnic_local *cp = dev->cnic_priv;
1378	struct cnic_ulp_ops *ulp_ops;
1379
1380	rcu_read_lock();
1381	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1382	if (likely(ulp_ops)) {
1383		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1384					  cqes, num_cqes);
1385	}
1386	rcu_read_unlock();
1387}
1388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1390{
1391	struct cnic_local *cp = dev->cnic_priv;
 
1392	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1393	int hq_bds, pages;
1394	u32 pfid = cp->pfid;
1395
1396	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1397	cp->num_ccells = req1->num_ccells_per_conn;
1398	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1399			      cp->num_iscsi_tasks;
1400	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1401			BNX2X_ISCSI_R2TQE_SIZE;
1402	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1403	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1404	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1405	cp->num_cqs = req1->num_cqs;
1406
1407	if (!dev->max_iscsi_conn)
1408		return 0;
1409
1410	/* init Tstorm RAM */
1411	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1412		  req1->rq_num_wqes);
1413	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1414		  PAGE_SIZE);
1415	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1416		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1417	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1418		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1419		  req1->num_tasks_per_conn);
1420
1421	/* init Ustorm RAM */
1422	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1423		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1424		  req1->rq_buffer_size);
1425	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1426		  PAGE_SIZE);
1427	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1428		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1429	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1430		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1431		  req1->num_tasks_per_conn);
1432	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1433		  req1->rq_num_wqes);
1434	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1435		  req1->cq_num_wqes);
1436	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1437		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1438
1439	/* init Xstorm RAM */
1440	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1441		  PAGE_SIZE);
1442	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1443		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1444	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1445		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1446		  req1->num_tasks_per_conn);
1447	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1448		  hq_bds);
1449	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1450		  req1->num_tasks_per_conn);
1451	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1452		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1453
1454	/* init Cstorm RAM */
1455	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1456		  PAGE_SIZE);
1457	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1458		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1459	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1460		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1461		  req1->num_tasks_per_conn);
1462	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1463		  req1->cq_num_wqes);
1464	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1465		  hq_bds);
1466
 
 
 
 
1467	return 0;
1468}
1469
1470static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1471{
1472	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1473	struct cnic_local *cp = dev->cnic_priv;
1474	u32 pfid = cp->pfid;
1475	struct iscsi_kcqe kcqe;
1476	struct kcqe *cqes[1];
1477
1478	memset(&kcqe, 0, sizeof(kcqe));
1479	if (!dev->max_iscsi_conn) {
1480		kcqe.completion_status =
1481			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1482		goto done;
1483	}
1484
1485	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1486		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1487	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1488		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1489		req2->error_bit_map[1]);
1490
1491	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1492		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1493	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1494		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1495	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1496		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1497		req2->error_bit_map[1]);
1498
1499	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1500		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1501
1502	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1503
1504done:
1505	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1506	cqes[0] = (struct kcqe *) &kcqe;
1507	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1508
1509	return 0;
1510}
1511
1512static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1513{
1514	struct cnic_local *cp = dev->cnic_priv;
1515	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1516
1517	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1518		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1519
1520		cnic_free_dma(dev, &iscsi->hq_info);
1521		cnic_free_dma(dev, &iscsi->r2tq_info);
1522		cnic_free_dma(dev, &iscsi->task_array_info);
1523		cnic_free_id(&cp->cid_tbl, ctx->cid);
1524	} else {
1525		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1526	}
1527
1528	ctx->cid = 0;
1529}
1530
1531static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1532{
1533	u32 cid;
1534	int ret, pages;
1535	struct cnic_local *cp = dev->cnic_priv;
1536	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1537	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1538
1539	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1540		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1541		if (cid == -1) {
1542			ret = -ENOMEM;
1543			goto error;
1544		}
1545		ctx->cid = cid;
1546		return 0;
1547	}
1548
1549	cid = cnic_alloc_new_id(&cp->cid_tbl);
1550	if (cid == -1) {
1551		ret = -ENOMEM;
1552		goto error;
1553	}
1554
1555	ctx->cid = cid;
1556	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1557
1558	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1559	if (ret)
1560		goto error;
1561
1562	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1563	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1564	if (ret)
1565		goto error;
1566
1567	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1568	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1569	if (ret)
1570		goto error;
1571
1572	return 0;
1573
1574error:
1575	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1576	return ret;
1577}
1578
1579static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1580				struct regpair *ctx_addr)
1581{
1582	struct cnic_local *cp = dev->cnic_priv;
1583	struct cnic_eth_dev *ethdev = cp->ethdev;
1584	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1585	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1586	unsigned long align_off = 0;
1587	dma_addr_t ctx_map;
1588	void *ctx;
1589
1590	if (cp->ctx_align) {
1591		unsigned long mask = cp->ctx_align - 1;
1592
1593		if (cp->ctx_arr[blk].mapping & mask)
1594			align_off = cp->ctx_align -
1595				    (cp->ctx_arr[blk].mapping & mask);
1596	}
1597	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1598		(off * BNX2X_CONTEXT_MEM_SIZE);
1599	ctx = cp->ctx_arr[blk].ctx + align_off +
1600	      (off * BNX2X_CONTEXT_MEM_SIZE);
1601	if (init)
1602		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1603
1604	ctx_addr->lo = ctx_map & 0xffffffff;
1605	ctx_addr->hi = (u64) ctx_map >> 32;
1606	return ctx;
1607}
1608
1609static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1610				u32 num)
1611{
1612	struct cnic_local *cp = dev->cnic_priv;
 
1613	struct iscsi_kwqe_conn_offload1 *req1 =
1614			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1615	struct iscsi_kwqe_conn_offload2 *req2 =
1616			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1617	struct iscsi_kwqe_conn_offload3 *req3;
1618	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1619	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1620	u32 cid = ctx->cid;
1621	u32 hw_cid = BNX2X_HW_CID(cp, cid);
1622	struct iscsi_context *ictx;
1623	struct regpair context_addr;
1624	int i, j, n = 2, n_max;
1625	u8 port = CNIC_PORT(cp);
1626
1627	ctx->ctx_flags = 0;
1628	if (!req2->num_additional_wqes)
1629		return -EINVAL;
1630
1631	n_max = req2->num_additional_wqes + 2;
1632
1633	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1634	if (ictx == NULL)
1635		return -ENOMEM;
1636
1637	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1638
1639	ictx->xstorm_ag_context.hq_prod = 1;
1640
1641	ictx->xstorm_st_context.iscsi.first_burst_length =
1642		ISCSI_DEF_FIRST_BURST_LEN;
1643	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1644		ISCSI_DEF_MAX_RECV_SEG_LEN;
1645	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1646		req1->sq_page_table_addr_lo;
1647	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1648		req1->sq_page_table_addr_hi;
1649	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1650	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1651	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1652		iscsi->hq_info.pgtbl_map & 0xffffffff;
1653	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1654		(u64) iscsi->hq_info.pgtbl_map >> 32;
1655	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1656		iscsi->hq_info.pgtbl[0];
1657	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1658		iscsi->hq_info.pgtbl[1];
1659	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1660		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1661	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1662		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1663	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1664		iscsi->r2tq_info.pgtbl[0];
1665	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1666		iscsi->r2tq_info.pgtbl[1];
1667	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1668		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1669	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1670		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1671	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1672		BNX2X_ISCSI_PBL_NOT_CACHED;
1673	ictx->xstorm_st_context.iscsi.flags.flags |=
1674		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1675	ictx->xstorm_st_context.iscsi.flags.flags |=
1676		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1677	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1678		ETH_P_8021Q;
1679	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1680		cp->port_mode == CHIP_2_PORT_MODE) {
1681
1682		port = 0;
1683	}
1684	ictx->xstorm_st_context.common.flags =
1685		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1686	ictx->xstorm_st_context.common.flags =
1687		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1688
1689	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1690	/* TSTORM requires the base address of RQ DB & not PTE */
1691	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1692		req2->rq_page_table_addr_lo & PAGE_MASK;
1693	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1694		req2->rq_page_table_addr_hi;
1695	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1696	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1697	ictx->tstorm_st_context.tcp.flags2 |=
1698		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1699	ictx->tstorm_st_context.tcp.ooo_support_mode =
1700		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1701
1702	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1703
1704	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1705		req2->rq_page_table_addr_lo;
1706	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1707		req2->rq_page_table_addr_hi;
1708	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1709	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1710	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1711		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1712	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1713		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1714	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1715		iscsi->r2tq_info.pgtbl[0];
1716	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1717		iscsi->r2tq_info.pgtbl[1];
1718	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1719		req1->cq_page_table_addr_lo;
1720	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1721		req1->cq_page_table_addr_hi;
1722	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1723	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1724	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1725	ictx->ustorm_st_context.task_pbe_cache_index =
1726		BNX2X_ISCSI_PBL_NOT_CACHED;
1727	ictx->ustorm_st_context.task_pdu_cache_index =
1728		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1729
1730	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1731		if (j == 3) {
1732			if (n >= n_max)
1733				break;
1734			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1735			j = 0;
1736		}
1737		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1738		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1739			req3->qp_first_pte[j].hi;
1740		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1741			req3->qp_first_pte[j].lo;
1742	}
1743
1744	ictx->ustorm_st_context.task_pbl_base.lo =
1745		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1746	ictx->ustorm_st_context.task_pbl_base.hi =
1747		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1748	ictx->ustorm_st_context.tce_phy_addr.lo =
1749		iscsi->task_array_info.pgtbl[0];
1750	ictx->ustorm_st_context.tce_phy_addr.hi =
1751		iscsi->task_array_info.pgtbl[1];
1752	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1753	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1754	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1755	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1756		ISCSI_DEF_MAX_BURST_LEN;
1757	ictx->ustorm_st_context.negotiated_rx |=
1758		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1759		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1760
1761	ictx->cstorm_st_context.hq_pbl_base.lo =
1762		iscsi->hq_info.pgtbl_map & 0xffffffff;
1763	ictx->cstorm_st_context.hq_pbl_base.hi =
1764		(u64) iscsi->hq_info.pgtbl_map >> 32;
1765	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1766	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1767	ictx->cstorm_st_context.task_pbl_base.lo =
1768		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1769	ictx->cstorm_st_context.task_pbl_base.hi =
1770		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1771	/* CSTORM and USTORM initialization is different, CSTORM requires
1772	 * CQ DB base & not PTE addr */
1773	ictx->cstorm_st_context.cq_db_base.lo =
1774		req1->cq_page_table_addr_lo & PAGE_MASK;
1775	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1776	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1777	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1778	for (i = 0; i < cp->num_cqs; i++) {
1779		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1780			ISCSI_INITIAL_SN;
1781		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1782			ISCSI_INITIAL_SN;
1783	}
1784
1785	ictx->xstorm_ag_context.cdu_reserved =
1786		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1787				       ISCSI_CONNECTION_TYPE);
1788	ictx->ustorm_ag_context.cdu_usage =
1789		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1790				       ISCSI_CONNECTION_TYPE);
1791	return 0;
1792
1793}
1794
1795static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1796				   u32 num, int *work)
1797{
1798	struct iscsi_kwqe_conn_offload1 *req1;
1799	struct iscsi_kwqe_conn_offload2 *req2;
1800	struct cnic_local *cp = dev->cnic_priv;
 
1801	struct cnic_context *ctx;
1802	struct iscsi_kcqe kcqe;
1803	struct kcqe *cqes[1];
1804	u32 l5_cid;
1805	int ret = 0;
1806
1807	if (num < 2) {
1808		*work = num;
1809		return -EINVAL;
1810	}
1811
1812	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1813	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1814	if ((num - 2) < req2->num_additional_wqes) {
1815		*work = num;
1816		return -EINVAL;
1817	}
1818	*work = 2 + req2->num_additional_wqes;
1819
1820	l5_cid = req1->iscsi_conn_id;
1821	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1822		return -EINVAL;
1823
1824	memset(&kcqe, 0, sizeof(kcqe));
1825	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1826	kcqe.iscsi_conn_id = l5_cid;
1827	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1828
1829	ctx = &cp->ctx_tbl[l5_cid];
1830	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1831		kcqe.completion_status =
1832			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1833		goto done;
1834	}
1835
1836	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1837		atomic_dec(&cp->iscsi_conn);
1838		goto done;
1839	}
1840	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1841	if (ret) {
1842		atomic_dec(&cp->iscsi_conn);
1843		ret = 0;
1844		goto done;
1845	}
1846	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1847	if (ret < 0) {
1848		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1849		atomic_dec(&cp->iscsi_conn);
1850		goto done;
1851	}
1852
1853	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1854	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1855
1856done:
1857	cqes[0] = (struct kcqe *) &kcqe;
1858	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1859	return 0;
1860}
1861
1862
1863static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1864{
1865	struct cnic_local *cp = dev->cnic_priv;
1866	struct iscsi_kwqe_conn_update *req =
1867		(struct iscsi_kwqe_conn_update *) kwqe;
1868	void *data;
1869	union l5cm_specific_data l5_data;
1870	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1871	int ret;
1872
1873	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1874		return -EINVAL;
1875
1876	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1877	if (!data)
1878		return -ENOMEM;
1879
1880	memcpy(data, kwqe, sizeof(struct kwqe));
1881
1882	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1883			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1884	return ret;
1885}
1886
1887static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1888{
1889	struct cnic_local *cp = dev->cnic_priv;
 
1890	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1891	union l5cm_specific_data l5_data;
1892	int ret;
1893	u32 hw_cid;
1894
1895	init_waitqueue_head(&ctx->waitq);
1896	ctx->wait_cond = 0;
1897	memset(&l5_data, 0, sizeof(l5_data));
1898	hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1899
1900	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1901				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1902
1903	if (ret == 0) {
1904		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1905		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1906			return -EBUSY;
1907	}
1908
1909	return 0;
1910}
1911
1912static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1913{
1914	struct cnic_local *cp = dev->cnic_priv;
1915	struct iscsi_kwqe_conn_destroy *req =
1916		(struct iscsi_kwqe_conn_destroy *) kwqe;
1917	u32 l5_cid = req->reserved0;
1918	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1919	int ret = 0;
1920	struct iscsi_kcqe kcqe;
1921	struct kcqe *cqes[1];
1922
1923	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1924		goto skip_cfc_delete;
1925
1926	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1927		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1928
1929		if (delta > (2 * HZ))
1930			delta = 0;
1931
1932		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1933		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1934		goto destroy_reply;
1935	}
1936
1937	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1938
1939skip_cfc_delete:
1940	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1941
1942	if (!ret) {
1943		atomic_dec(&cp->iscsi_conn);
1944		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1945	}
1946
1947destroy_reply:
1948	memset(&kcqe, 0, sizeof(kcqe));
1949	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1950	kcqe.iscsi_conn_id = l5_cid;
1951	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1952	kcqe.iscsi_conn_context_id = req->context_id;
1953
1954	cqes[0] = (struct kcqe *) &kcqe;
1955	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1956
1957	return 0;
1958}
1959
1960static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1961				      struct l4_kwq_connect_req1 *kwqe1,
1962				      struct l4_kwq_connect_req3 *kwqe3,
1963				      struct l5cm_active_conn_buffer *conn_buf)
1964{
1965	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1966	struct l5cm_xstorm_conn_buffer *xstorm_buf =
1967		&conn_buf->xstorm_conn_buffer;
1968	struct l5cm_tstorm_conn_buffer *tstorm_buf =
1969		&conn_buf->tstorm_conn_buffer;
1970	struct regpair context_addr;
1971	u32 cid = BNX2X_SW_CID(kwqe1->cid);
1972	struct in6_addr src_ip, dst_ip;
1973	int i;
1974	u32 *addrp;
1975
1976	addrp = (u32 *) &conn_addr->local_ip_addr;
1977	for (i = 0; i < 4; i++, addrp++)
1978		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1979
1980	addrp = (u32 *) &conn_addr->remote_ip_addr;
1981	for (i = 0; i < 4; i++, addrp++)
1982		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1983
1984	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1985
1986	xstorm_buf->context_addr.hi = context_addr.hi;
1987	xstorm_buf->context_addr.lo = context_addr.lo;
1988	xstorm_buf->mss = 0xffff;
1989	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1990	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1991		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1992	xstorm_buf->pseudo_header_checksum =
1993		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1994
1995	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1996		tstorm_buf->params |=
1997			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1998	if (kwqe3->ka_timeout) {
1999		tstorm_buf->ka_enable = 1;
2000		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2001		tstorm_buf->ka_interval = kwqe3->ka_interval;
2002		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2003	}
2004	tstorm_buf->max_rt_time = 0xffffffff;
2005}
2006
2007static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2008{
2009	struct cnic_local *cp = dev->cnic_priv;
2010	u32 pfid = cp->pfid;
2011	u8 *mac = dev->mac_addr;
2012
2013	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2014		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2015	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2016		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2017	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2018		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2019	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2020		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2021	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2022		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2023	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2024		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2025
2026	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2027		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2028	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2029		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2030		 mac[4]);
2031	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2032		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2033	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2034		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2035		 mac[2]);
2036	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2037		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2038	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2039		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2040		 mac[0]);
2041}
2042
2043static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2044{
2045	struct cnic_local *cp = dev->cnic_priv;
2046	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2047	u16 tstorm_flags = 0;
2048
2049	if (tcp_ts) {
2050		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2051		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2052	}
2053
2054	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2055		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2056
2057	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2058		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2059}
2060
2061static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2062			      u32 num, int *work)
2063{
2064	struct cnic_local *cp = dev->cnic_priv;
 
2065	struct l4_kwq_connect_req1 *kwqe1 =
2066		(struct l4_kwq_connect_req1 *) wqes[0];
2067	struct l4_kwq_connect_req3 *kwqe3;
2068	struct l5cm_active_conn_buffer *conn_buf;
2069	struct l5cm_conn_addr_params *conn_addr;
2070	union l5cm_specific_data l5_data;
2071	u32 l5_cid = kwqe1->pg_cid;
2072	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2073	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2074	int ret;
2075
2076	if (num < 2) {
2077		*work = num;
2078		return -EINVAL;
2079	}
2080
2081	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2082		*work = 3;
2083	else
2084		*work = 2;
2085
2086	if (num < *work) {
2087		*work = num;
2088		return -EINVAL;
2089	}
2090
2091	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2092		netdev_err(dev->netdev, "conn_buf size too big\n");
2093		return -ENOMEM;
2094	}
2095	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2096	if (!conn_buf)
2097		return -ENOMEM;
2098
2099	memset(conn_buf, 0, sizeof(*conn_buf));
2100
2101	conn_addr = &conn_buf->conn_addr_buf;
2102	conn_addr->remote_addr_0 = csk->ha[0];
2103	conn_addr->remote_addr_1 = csk->ha[1];
2104	conn_addr->remote_addr_2 = csk->ha[2];
2105	conn_addr->remote_addr_3 = csk->ha[3];
2106	conn_addr->remote_addr_4 = csk->ha[4];
2107	conn_addr->remote_addr_5 = csk->ha[5];
2108
2109	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2110		struct l4_kwq_connect_req2 *kwqe2 =
2111			(struct l4_kwq_connect_req2 *) wqes[1];
2112
2113		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2114		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2115		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2116
2117		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2118		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2119		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2120		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2121	}
2122	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2123
2124	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2125	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2126	conn_addr->local_tcp_port = kwqe1->src_port;
2127	conn_addr->remote_tcp_port = kwqe1->dst_port;
2128
2129	conn_addr->pmtu = kwqe3->pmtu;
2130	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2131
2132	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2133		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2134
2135	cnic_bnx2x_set_tcp_timestamp(dev,
2136		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2137
2138	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2139			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2140	if (!ret)
2141		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2142
2143	return ret;
2144}
2145
2146static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2147{
2148	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2149	union l5cm_specific_data l5_data;
2150	int ret;
2151
2152	memset(&l5_data, 0, sizeof(l5_data));
2153	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2154			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2155	return ret;
2156}
2157
2158static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2159{
2160	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2161	union l5cm_specific_data l5_data;
2162	int ret;
2163
2164	memset(&l5_data, 0, sizeof(l5_data));
2165	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2166			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2167	return ret;
2168}
2169static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2170{
2171	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2172	struct l4_kcq kcqe;
2173	struct kcqe *cqes[1];
2174
2175	memset(&kcqe, 0, sizeof(kcqe));
2176	kcqe.pg_host_opaque = req->host_opaque;
2177	kcqe.pg_cid = req->host_opaque;
2178	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2179	cqes[0] = (struct kcqe *) &kcqe;
2180	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2181	return 0;
2182}
2183
2184static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2185{
2186	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2187	struct l4_kcq kcqe;
2188	struct kcqe *cqes[1];
2189
2190	memset(&kcqe, 0, sizeof(kcqe));
2191	kcqe.pg_host_opaque = req->pg_host_opaque;
2192	kcqe.pg_cid = req->pg_cid;
2193	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2194	cqes[0] = (struct kcqe *) &kcqe;
2195	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2196	return 0;
2197}
2198
2199static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2200{
2201	struct fcoe_kwqe_stat *req;
2202	struct fcoe_stat_ramrod_params *fcoe_stat;
2203	union l5cm_specific_data l5_data;
2204	struct cnic_local *cp = dev->cnic_priv;
 
2205	int ret;
2206	u32 cid;
2207
2208	req = (struct fcoe_kwqe_stat *) kwqe;
2209	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2210
2211	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2212	if (!fcoe_stat)
2213		return -ENOMEM;
2214
2215	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2216	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2217
2218	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2219				  FCOE_CONNECTION_TYPE, &l5_data);
2220	return ret;
2221}
2222
2223static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2224				 u32 num, int *work)
2225{
2226	int ret;
2227	struct cnic_local *cp = dev->cnic_priv;
 
2228	u32 cid;
2229	struct fcoe_init_ramrod_params *fcoe_init;
2230	struct fcoe_kwqe_init1 *req1;
2231	struct fcoe_kwqe_init2 *req2;
2232	struct fcoe_kwqe_init3 *req3;
2233	union l5cm_specific_data l5_data;
2234
2235	if (num < 3) {
2236		*work = num;
2237		return -EINVAL;
2238	}
2239	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2240	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2241	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2242	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2243		*work = 1;
2244		return -EINVAL;
2245	}
2246	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2247		*work = 2;
2248		return -EINVAL;
2249	}
2250
2251	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2252		netdev_err(dev->netdev, "fcoe_init size too big\n");
2253		return -ENOMEM;
2254	}
2255	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2256	if (!fcoe_init)
2257		return -ENOMEM;
2258
2259	memset(fcoe_init, 0, sizeof(*fcoe_init));
2260	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2261	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2262	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2263	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2264	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2265	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2266
2267	fcoe_init->sb_num = cp->status_blk_num;
2268	fcoe_init->eq_prod = MAX_KCQ_IDX;
2269	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2270	cp->kcq2.sw_prod_idx = 0;
2271
2272	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2273	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2274				  FCOE_CONNECTION_TYPE, &l5_data);
2275	*work = 3;
2276	return ret;
2277}
2278
2279static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2280				 u32 num, int *work)
2281{
2282	int ret = 0;
2283	u32 cid = -1, l5_cid;
2284	struct cnic_local *cp = dev->cnic_priv;
 
2285	struct fcoe_kwqe_conn_offload1 *req1;
2286	struct fcoe_kwqe_conn_offload2 *req2;
2287	struct fcoe_kwqe_conn_offload3 *req3;
2288	struct fcoe_kwqe_conn_offload4 *req4;
2289	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2290	struct cnic_context *ctx;
2291	struct fcoe_context *fctx;
2292	struct regpair ctx_addr;
2293	union l5cm_specific_data l5_data;
2294	struct fcoe_kcqe kcqe;
2295	struct kcqe *cqes[1];
2296
2297	if (num < 4) {
2298		*work = num;
2299		return -EINVAL;
2300	}
2301	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2302	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2303	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2304	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2305
2306	*work = 4;
2307
2308	l5_cid = req1->fcoe_conn_id;
2309	if (l5_cid >= dev->max_fcoe_conn)
2310		goto err_reply;
2311
2312	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2313
2314	ctx = &cp->ctx_tbl[l5_cid];
2315	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2316		goto err_reply;
2317
2318	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2319	if (ret) {
2320		ret = 0;
2321		goto err_reply;
2322	}
2323	cid = ctx->cid;
2324
2325	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2326	if (fctx) {
2327		u32 hw_cid = BNX2X_HW_CID(cp, cid);
2328		u32 val;
2329
2330		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2331					     FCOE_CONNECTION_TYPE);
2332		fctx->xstorm_ag_context.cdu_reserved = val;
2333		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2334					     FCOE_CONNECTION_TYPE);
2335		fctx->ustorm_ag_context.cdu_usage = val;
2336	}
2337	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2338		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2339		goto err_reply;
2340	}
2341	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2342	if (!fcoe_offload)
2343		goto err_reply;
2344
2345	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2346	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2347	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2348	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2349	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2350
2351	cid = BNX2X_HW_CID(cp, cid);
2352	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2353				  FCOE_CONNECTION_TYPE, &l5_data);
2354	if (!ret)
2355		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2356
2357	return ret;
2358
2359err_reply:
2360	if (cid != -1)
2361		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2362
2363	memset(&kcqe, 0, sizeof(kcqe));
2364	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2365	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2366	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2367
2368	cqes[0] = (struct kcqe *) &kcqe;
2369	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2370	return ret;
2371}
2372
2373static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2374{
2375	struct fcoe_kwqe_conn_enable_disable *req;
2376	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2377	union l5cm_specific_data l5_data;
2378	int ret;
2379	u32 cid, l5_cid;
2380	struct cnic_local *cp = dev->cnic_priv;
2381
2382	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2383	cid = req->context_id;
2384	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2385
2386	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2387		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2388		return -ENOMEM;
2389	}
2390	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2391	if (!fcoe_enable)
2392		return -ENOMEM;
2393
2394	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2395	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2396	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2397				  FCOE_CONNECTION_TYPE, &l5_data);
2398	return ret;
2399}
2400
2401static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2402{
2403	struct fcoe_kwqe_conn_enable_disable *req;
2404	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2405	union l5cm_specific_data l5_data;
2406	int ret;
2407	u32 cid, l5_cid;
2408	struct cnic_local *cp = dev->cnic_priv;
2409
2410	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2411	cid = req->context_id;
2412	l5_cid = req->conn_id;
2413	if (l5_cid >= dev->max_fcoe_conn)
2414		return -EINVAL;
2415
2416	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2417
2418	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2419		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2420		return -ENOMEM;
2421	}
2422	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2423	if (!fcoe_disable)
2424		return -ENOMEM;
2425
2426	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2427	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2428	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2429				  FCOE_CONNECTION_TYPE, &l5_data);
2430	return ret;
2431}
2432
2433static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2434{
2435	struct fcoe_kwqe_conn_destroy *req;
2436	union l5cm_specific_data l5_data;
2437	int ret;
2438	u32 cid, l5_cid;
2439	struct cnic_local *cp = dev->cnic_priv;
2440	struct cnic_context *ctx;
2441	struct fcoe_kcqe kcqe;
2442	struct kcqe *cqes[1];
2443
2444	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2445	cid = req->context_id;
2446	l5_cid = req->conn_id;
2447	if (l5_cid >= dev->max_fcoe_conn)
2448		return -EINVAL;
2449
2450	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2451
2452	ctx = &cp->ctx_tbl[l5_cid];
2453
2454	init_waitqueue_head(&ctx->waitq);
2455	ctx->wait_cond = 0;
2456
2457	memset(&kcqe, 0, sizeof(kcqe));
2458	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2459	memset(&l5_data, 0, sizeof(l5_data));
2460	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2461				  FCOE_CONNECTION_TYPE, &l5_data);
2462	if (ret == 0) {
2463		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2464		if (ctx->wait_cond)
2465			kcqe.completion_status = 0;
2466	}
2467
2468	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2469	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2470
2471	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2472	kcqe.fcoe_conn_id = req->conn_id;
2473	kcqe.fcoe_conn_context_id = cid;
2474
2475	cqes[0] = (struct kcqe *) &kcqe;
2476	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2477	return ret;
2478}
2479
2480static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2481{
2482	struct cnic_local *cp = dev->cnic_priv;
2483	u32 i;
2484
2485	for (i = start_cid; i < cp->max_cid_space; i++) {
2486		struct cnic_context *ctx = &cp->ctx_tbl[i];
2487		int j;
2488
2489		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2490			msleep(10);
2491
2492		for (j = 0; j < 5; j++) {
2493			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2494				break;
2495			msleep(20);
2496		}
2497
2498		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2499			netdev_warn(dev->netdev, "CID %x not deleted\n",
2500				   ctx->cid);
2501	}
2502}
2503
2504static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2505{
2506	struct fcoe_kwqe_destroy *req;
2507	union l5cm_specific_data l5_data;
2508	struct cnic_local *cp = dev->cnic_priv;
 
2509	int ret;
2510	u32 cid;
2511
2512	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2513
2514	req = (struct fcoe_kwqe_destroy *) kwqe;
2515	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2516
2517	memset(&l5_data, 0, sizeof(l5_data));
2518	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2519				  FCOE_CONNECTION_TYPE, &l5_data);
2520	return ret;
2521}
2522
2523static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2524{
2525	struct cnic_local *cp = dev->cnic_priv;
2526	struct kcqe kcqe;
2527	struct kcqe *cqes[1];
2528	u32 cid;
2529	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2530	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2531	u32 kcqe_op;
2532	int ulp_type;
2533
2534	cid = kwqe->kwqe_info0;
2535	memset(&kcqe, 0, sizeof(kcqe));
2536
2537	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2538		u32 l5_cid = 0;
2539
2540		ulp_type = CNIC_ULP_FCOE;
2541		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2542			struct fcoe_kwqe_conn_enable_disable *req;
2543
2544			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2545			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2546			cid = req->context_id;
2547			l5_cid = req->conn_id;
2548		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2549			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2550		} else {
2551			return;
2552		}
2553		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2554		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2555		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2556		kcqe.kcqe_info2 = cid;
2557		kcqe.kcqe_info0 = l5_cid;
2558
2559	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2560		ulp_type = CNIC_ULP_ISCSI;
2561		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2562			cid = kwqe->kwqe_info1;
2563
2564		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2565		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2566		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2567		kcqe.kcqe_info2 = cid;
2568		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2569
2570	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2571		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2572
2573		ulp_type = CNIC_ULP_L4;
2574		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2575			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2576		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2577			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2578		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2579			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2580		else
2581			return;
2582
2583		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2584				    KCQE_FLAGS_LAYER_MASK_L4;
2585		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2586		l4kcqe->cid = cid;
2587		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2588	} else {
2589		return;
2590	}
2591
2592	cqes[0] = (struct kcqe *) &kcqe;
2593	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2594}
2595
2596static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2597					 struct kwqe *wqes[], u32 num_wqes)
2598{
2599	int i, work, ret;
2600	u32 opcode;
2601	struct kwqe *kwqe;
2602
2603	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2604		return -EAGAIN;		/* bnx2 is down */
2605
2606	for (i = 0; i < num_wqes; ) {
2607		kwqe = wqes[i];
2608		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2609		work = 1;
2610
2611		switch (opcode) {
2612		case ISCSI_KWQE_OPCODE_INIT1:
2613			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2614			break;
2615		case ISCSI_KWQE_OPCODE_INIT2:
2616			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2617			break;
2618		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2619			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2620						     num_wqes - i, &work);
2621			break;
2622		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2623			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2624			break;
2625		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2626			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2627			break;
2628		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2629			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2630						 &work);
2631			break;
2632		case L4_KWQE_OPCODE_VALUE_CLOSE:
2633			ret = cnic_bnx2x_close(dev, kwqe);
2634			break;
2635		case L4_KWQE_OPCODE_VALUE_RESET:
2636			ret = cnic_bnx2x_reset(dev, kwqe);
2637			break;
2638		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2639			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2640			break;
2641		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2642			ret = cnic_bnx2x_update_pg(dev, kwqe);
2643			break;
2644		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2645			ret = 0;
2646			break;
2647		default:
2648			ret = 0;
2649			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2650				   opcode);
2651			break;
2652		}
2653		if (ret < 0) {
2654			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2655				   opcode);
2656
2657			/* Possibly bnx2x parity error, send completion
2658			 * to ulp drivers with error code to speed up
2659			 * cleanup and reset recovery.
2660			 */
2661			if (ret == -EIO || ret == -EAGAIN)
2662				cnic_bnx2x_kwqe_err(dev, kwqe);
2663		}
2664		i += work;
2665	}
2666	return 0;
2667}
2668
2669static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2670					struct kwqe *wqes[], u32 num_wqes)
2671{
2672	struct cnic_local *cp = dev->cnic_priv;
2673	int i, work, ret;
2674	u32 opcode;
2675	struct kwqe *kwqe;
2676
2677	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2678		return -EAGAIN;		/* bnx2 is down */
2679
2680	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2681		return -EINVAL;
2682
2683	for (i = 0; i < num_wqes; ) {
2684		kwqe = wqes[i];
2685		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2686		work = 1;
2687
2688		switch (opcode) {
2689		case FCOE_KWQE_OPCODE_INIT1:
2690			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2691						    num_wqes - i, &work);
2692			break;
2693		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2694			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2695						    num_wqes - i, &work);
2696			break;
2697		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2698			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2699			break;
2700		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2701			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2702			break;
2703		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2704			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2705			break;
2706		case FCOE_KWQE_OPCODE_DESTROY:
2707			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2708			break;
2709		case FCOE_KWQE_OPCODE_STAT:
2710			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2711			break;
2712		default:
2713			ret = 0;
2714			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2715				   opcode);
2716			break;
2717		}
2718		if (ret < 0) {
2719			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2720				   opcode);
2721
2722			/* Possibly bnx2x parity error, send completion
2723			 * to ulp drivers with error code to speed up
2724			 * cleanup and reset recovery.
2725			 */
2726			if (ret == -EIO || ret == -EAGAIN)
2727				cnic_bnx2x_kwqe_err(dev, kwqe);
2728		}
2729		i += work;
2730	}
2731	return 0;
2732}
2733
2734static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2735				   u32 num_wqes)
2736{
2737	int ret = -EINVAL;
2738	u32 layer_code;
2739
2740	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2741		return -EAGAIN;		/* bnx2x is down */
2742
2743	if (!num_wqes)
2744		return 0;
2745
2746	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2747	switch (layer_code) {
2748	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2749	case KWQE_FLAGS_LAYER_MASK_L4:
2750	case KWQE_FLAGS_LAYER_MASK_L2:
2751		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2752		break;
2753
2754	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2755		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2756		break;
2757	}
2758	return ret;
2759}
2760
2761static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2762{
2763	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2764		return KCQE_FLAGS_LAYER_MASK_L4;
2765
2766	return opflag & KCQE_FLAGS_LAYER_MASK;
2767}
2768
2769static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2770{
2771	struct cnic_local *cp = dev->cnic_priv;
2772	int i, j, comp = 0;
2773
2774	i = 0;
2775	j = 1;
2776	while (num_cqes) {
2777		struct cnic_ulp_ops *ulp_ops;
2778		int ulp_type;
2779		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2780		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2781
2782		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2783			comp++;
2784
2785		while (j < num_cqes) {
2786			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2787
2788			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2789				break;
2790
2791			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2792				comp++;
2793			j++;
2794		}
2795
2796		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2797			ulp_type = CNIC_ULP_RDMA;
2798		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2799			ulp_type = CNIC_ULP_ISCSI;
2800		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2801			ulp_type = CNIC_ULP_FCOE;
2802		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2803			ulp_type = CNIC_ULP_L4;
2804		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2805			goto end;
2806		else {
2807			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2808				   kcqe_op_flag);
2809			goto end;
2810		}
2811
2812		rcu_read_lock();
2813		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2814		if (likely(ulp_ops)) {
2815			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2816						  cp->completed_kcq + i, j);
2817		}
2818		rcu_read_unlock();
2819end:
2820		num_cqes -= j;
2821		i += j;
2822		j = 1;
2823	}
2824	if (unlikely(comp))
2825		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2826}
2827
2828static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2829{
2830	struct cnic_local *cp = dev->cnic_priv;
2831	u16 i, ri, hw_prod, last;
2832	struct kcqe *kcqe;
2833	int kcqe_cnt = 0, last_cnt = 0;
2834
2835	i = ri = last = info->sw_prod_idx;
2836	ri &= MAX_KCQ_IDX;
2837	hw_prod = *info->hw_prod_idx_ptr;
2838	hw_prod = info->hw_idx(hw_prod);
2839
2840	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2841		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2842		cp->completed_kcq[kcqe_cnt++] = kcqe;
2843		i = info->next_idx(i);
2844		ri = i & MAX_KCQ_IDX;
2845		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2846			last_cnt = kcqe_cnt;
2847			last = i;
2848		}
2849	}
2850
2851	info->sw_prod_idx = last;
2852	return last_cnt;
2853}
2854
2855static int cnic_l2_completion(struct cnic_local *cp)
2856{
2857	u16 hw_cons, sw_cons;
2858	struct cnic_uio_dev *udev = cp->udev;
2859	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2860					(udev->l2_ring + (2 * BCM_PAGE_SIZE));
2861	u32 cmd;
2862	int comp = 0;
2863
2864	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2865		return 0;
2866
2867	hw_cons = *cp->rx_cons_ptr;
2868	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2869		hw_cons++;
2870
2871	sw_cons = cp->rx_cons;
2872	while (sw_cons != hw_cons) {
2873		u8 cqe_fp_flags;
2874
2875		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2876		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2877		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2878			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2879			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2880			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2881			    cmd == RAMROD_CMD_ID_ETH_HALT)
2882				comp++;
2883		}
2884		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2885	}
2886	return comp;
2887}
2888
2889static void cnic_chk_pkt_rings(struct cnic_local *cp)
2890{
2891	u16 rx_cons, tx_cons;
2892	int comp = 0;
2893
2894	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2895		return;
2896
2897	rx_cons = *cp->rx_cons_ptr;
2898	tx_cons = *cp->tx_cons_ptr;
2899	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2900		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2901			comp = cnic_l2_completion(cp);
2902
2903		cp->tx_cons = tx_cons;
2904		cp->rx_cons = rx_cons;
2905
2906		if (cp->udev)
2907			uio_event_notify(&cp->udev->cnic_uinfo);
2908	}
2909	if (comp)
2910		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2911}
2912
2913static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2914{
2915	struct cnic_local *cp = dev->cnic_priv;
2916	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2917	int kcqe_cnt;
2918
2919	/* status block index must be read before reading other fields */
2920	rmb();
2921	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2922
2923	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2924
2925		service_kcqes(dev, kcqe_cnt);
2926
2927		/* Tell compiler that status_blk fields can change. */
2928		barrier();
2929		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2930		/* status block index must be read first */
2931		rmb();
2932		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2933	}
2934
2935	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2936
2937	cnic_chk_pkt_rings(cp);
2938
2939	return status_idx;
2940}
2941
2942static int cnic_service_bnx2(void *data, void *status_blk)
2943{
2944	struct cnic_dev *dev = data;
2945
2946	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2947		struct status_block *sblk = status_blk;
2948
2949		return sblk->status_idx;
2950	}
2951
2952	return cnic_service_bnx2_queues(dev);
2953}
2954
2955static void cnic_service_bnx2_msix(unsigned long data)
2956{
2957	struct cnic_dev *dev = (struct cnic_dev *) data;
2958	struct cnic_local *cp = dev->cnic_priv;
2959
2960	cp->last_status_idx = cnic_service_bnx2_queues(dev);
2961
2962	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2963		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2964}
2965
2966static void cnic_doirq(struct cnic_dev *dev)
2967{
2968	struct cnic_local *cp = dev->cnic_priv;
2969
2970	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2971		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2972
2973		prefetch(cp->status_blk.gen);
2974		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2975
2976		tasklet_schedule(&cp->cnic_irq_task);
2977	}
2978}
2979
2980static irqreturn_t cnic_irq(int irq, void *dev_instance)
2981{
2982	struct cnic_dev *dev = dev_instance;
2983	struct cnic_local *cp = dev->cnic_priv;
2984
2985	if (cp->ack_int)
2986		cp->ack_int(dev);
2987
2988	cnic_doirq(dev);
2989
2990	return IRQ_HANDLED;
2991}
2992
2993static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2994				      u16 index, u8 op, u8 update)
2995{
2996	struct cnic_local *cp = dev->cnic_priv;
2997	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2998		       COMMAND_REG_INT_ACK);
2999	struct igu_ack_register igu_ack;
3000
3001	igu_ack.status_block_index = index;
3002	igu_ack.sb_id_and_flags =
3003			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3004			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3005			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3006			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3007
3008	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3009}
3010
3011static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3012			    u16 index, u8 op, u8 update)
3013{
3014	struct igu_regular cmd_data;
3015	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3016
3017	cmd_data.sb_id_and_flags =
3018		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3019		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3020		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3021		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3022
3023
3024	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3025}
3026
3027static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3028{
3029	struct cnic_local *cp = dev->cnic_priv;
3030
3031	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3032			   IGU_INT_DISABLE, 0);
3033}
3034
3035static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3036{
3037	struct cnic_local *cp = dev->cnic_priv;
3038
3039	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3040			IGU_INT_DISABLE, 0);
3041}
3042
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3043static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3044{
3045	u32 last_status = *info->status_idx_ptr;
3046	int kcqe_cnt;
3047
3048	/* status block index must be read before reading the KCQ */
3049	rmb();
3050	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3051
3052		service_kcqes(dev, kcqe_cnt);
3053
3054		/* Tell compiler that sblk fields can change. */
3055		barrier();
3056
3057		last_status = *info->status_idx_ptr;
3058		/* status block index must be read before reading the KCQ */
3059		rmb();
3060	}
3061	return last_status;
3062}
3063
3064static void cnic_service_bnx2x_bh(unsigned long data)
3065{
3066	struct cnic_dev *dev = (struct cnic_dev *) data;
3067	struct cnic_local *cp = dev->cnic_priv;
 
3068	u32 status_idx, new_status_idx;
3069
3070	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3071		return;
3072
3073	while (1) {
3074		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3075
3076		CNIC_WR16(dev, cp->kcq1.io_addr,
3077			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3078
3079		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
3080			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
3081					   status_idx, IGU_INT_ENABLE, 1);
3082			break;
3083		}
3084
3085		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3086
3087		if (new_status_idx != status_idx)
3088			continue;
3089
3090		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3091			  MAX_KCQ_IDX);
3092
3093		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3094				status_idx, IGU_INT_ENABLE, 1);
3095
3096		break;
3097	}
3098}
3099
3100static int cnic_service_bnx2x(void *data, void *status_blk)
3101{
3102	struct cnic_dev *dev = data;
3103	struct cnic_local *cp = dev->cnic_priv;
3104
3105	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3106		cnic_doirq(dev);
3107
3108	cnic_chk_pkt_rings(cp);
3109
3110	return 0;
3111}
3112
3113static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3114{
3115	struct cnic_ulp_ops *ulp_ops;
3116
3117	if (if_type == CNIC_ULP_ISCSI)
3118		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3119
3120	mutex_lock(&cnic_lock);
3121	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3122					    lockdep_is_held(&cnic_lock));
3123	if (!ulp_ops) {
3124		mutex_unlock(&cnic_lock);
3125		return;
3126	}
3127	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3128	mutex_unlock(&cnic_lock);
3129
3130	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3131		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3132
3133	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3134}
3135
3136static void cnic_ulp_stop(struct cnic_dev *dev)
3137{
3138	struct cnic_local *cp = dev->cnic_priv;
3139	int if_type;
3140
3141	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3142		cnic_ulp_stop_one(cp, if_type);
3143}
3144
3145static void cnic_ulp_start(struct cnic_dev *dev)
3146{
3147	struct cnic_local *cp = dev->cnic_priv;
3148	int if_type;
3149
3150	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3151		struct cnic_ulp_ops *ulp_ops;
3152
3153		mutex_lock(&cnic_lock);
3154		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3155						    lockdep_is_held(&cnic_lock));
3156		if (!ulp_ops || !ulp_ops->cnic_start) {
3157			mutex_unlock(&cnic_lock);
3158			continue;
3159		}
3160		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3161		mutex_unlock(&cnic_lock);
3162
3163		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3164			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3165
3166		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3167	}
3168}
3169
3170static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3171{
3172	struct cnic_local *cp = dev->cnic_priv;
3173	struct cnic_ulp_ops *ulp_ops;
3174	int rc;
3175
3176	mutex_lock(&cnic_lock);
3177	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 
3178	if (ulp_ops && ulp_ops->cnic_get_stats)
3179		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3180	else
3181		rc = -ENODEV;
3182	mutex_unlock(&cnic_lock);
3183	return rc;
3184}
3185
3186static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3187{
3188	struct cnic_dev *dev = data;
3189	int ulp_type = CNIC_ULP_ISCSI;
3190
3191	switch (info->cmd) {
3192	case CNIC_CTL_STOP_CMD:
3193		cnic_hold(dev);
3194
3195		cnic_ulp_stop(dev);
3196		cnic_stop_hw(dev);
3197
3198		cnic_put(dev);
3199		break;
3200	case CNIC_CTL_START_CMD:
3201		cnic_hold(dev);
3202
3203		if (!cnic_start_hw(dev))
3204			cnic_ulp_start(dev);
3205
3206		cnic_put(dev);
3207		break;
3208	case CNIC_CTL_STOP_ISCSI_CMD: {
3209		struct cnic_local *cp = dev->cnic_priv;
3210		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3211		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3212		break;
3213	}
3214	case CNIC_CTL_COMPLETION_CMD: {
3215		struct cnic_ctl_completion *comp = &info->data.comp;
3216		u32 cid = BNX2X_SW_CID(comp->cid);
3217		u32 l5_cid;
3218		struct cnic_local *cp = dev->cnic_priv;
3219
 
 
 
3220		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3221			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3222
3223			if (unlikely(comp->error)) {
3224				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3225				netdev_err(dev->netdev,
3226					   "CID %x CFC delete comp error %x\n",
3227					   cid, comp->error);
3228			}
3229
3230			ctx->wait_cond = 1;
3231			wake_up(&ctx->waitq);
3232		}
3233		break;
3234	}
3235	case CNIC_CTL_FCOE_STATS_GET_CMD:
3236		ulp_type = CNIC_ULP_FCOE;
3237		/* fall through */
3238	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3239		cnic_hold(dev);
3240		cnic_copy_ulp_stats(dev, ulp_type);
3241		cnic_put(dev);
3242		break;
3243
3244	default:
3245		return -EINVAL;
3246	}
3247	return 0;
3248}
3249
3250static void cnic_ulp_init(struct cnic_dev *dev)
3251{
3252	int i;
3253	struct cnic_local *cp = dev->cnic_priv;
3254
3255	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3256		struct cnic_ulp_ops *ulp_ops;
3257
3258		mutex_lock(&cnic_lock);
3259		ulp_ops = cnic_ulp_tbl_prot(i);
3260		if (!ulp_ops || !ulp_ops->cnic_init) {
3261			mutex_unlock(&cnic_lock);
3262			continue;
3263		}
3264		ulp_get(ulp_ops);
3265		mutex_unlock(&cnic_lock);
3266
3267		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3268			ulp_ops->cnic_init(dev);
3269
3270		ulp_put(ulp_ops);
3271	}
3272}
3273
3274static void cnic_ulp_exit(struct cnic_dev *dev)
3275{
3276	int i;
3277	struct cnic_local *cp = dev->cnic_priv;
3278
3279	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3280		struct cnic_ulp_ops *ulp_ops;
3281
3282		mutex_lock(&cnic_lock);
3283		ulp_ops = cnic_ulp_tbl_prot(i);
3284		if (!ulp_ops || !ulp_ops->cnic_exit) {
3285			mutex_unlock(&cnic_lock);
3286			continue;
3287		}
3288		ulp_get(ulp_ops);
3289		mutex_unlock(&cnic_lock);
3290
3291		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3292			ulp_ops->cnic_exit(dev);
3293
3294		ulp_put(ulp_ops);
3295	}
3296}
3297
3298static int cnic_cm_offload_pg(struct cnic_sock *csk)
3299{
3300	struct cnic_dev *dev = csk->dev;
3301	struct l4_kwq_offload_pg *l4kwqe;
3302	struct kwqe *wqes[1];
3303
3304	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3305	memset(l4kwqe, 0, sizeof(*l4kwqe));
3306	wqes[0] = (struct kwqe *) l4kwqe;
3307
3308	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3309	l4kwqe->flags =
3310		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3311	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3312
3313	l4kwqe->da0 = csk->ha[0];
3314	l4kwqe->da1 = csk->ha[1];
3315	l4kwqe->da2 = csk->ha[2];
3316	l4kwqe->da3 = csk->ha[3];
3317	l4kwqe->da4 = csk->ha[4];
3318	l4kwqe->da5 = csk->ha[5];
3319
3320	l4kwqe->sa0 = dev->mac_addr[0];
3321	l4kwqe->sa1 = dev->mac_addr[1];
3322	l4kwqe->sa2 = dev->mac_addr[2];
3323	l4kwqe->sa3 = dev->mac_addr[3];
3324	l4kwqe->sa4 = dev->mac_addr[4];
3325	l4kwqe->sa5 = dev->mac_addr[5];
3326
3327	l4kwqe->etype = ETH_P_IP;
3328	l4kwqe->ipid_start = DEF_IPID_START;
3329	l4kwqe->host_opaque = csk->l5_cid;
3330
3331	if (csk->vlan_id) {
3332		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3333		l4kwqe->vlan_tag = csk->vlan_id;
3334		l4kwqe->l2hdr_nbytes += 4;
3335	}
3336
3337	return dev->submit_kwqes(dev, wqes, 1);
3338}
3339
3340static int cnic_cm_update_pg(struct cnic_sock *csk)
3341{
3342	struct cnic_dev *dev = csk->dev;
3343	struct l4_kwq_update_pg *l4kwqe;
3344	struct kwqe *wqes[1];
3345
3346	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3347	memset(l4kwqe, 0, sizeof(*l4kwqe));
3348	wqes[0] = (struct kwqe *) l4kwqe;
3349
3350	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3351	l4kwqe->flags =
3352		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3353	l4kwqe->pg_cid = csk->pg_cid;
3354
3355	l4kwqe->da0 = csk->ha[0];
3356	l4kwqe->da1 = csk->ha[1];
3357	l4kwqe->da2 = csk->ha[2];
3358	l4kwqe->da3 = csk->ha[3];
3359	l4kwqe->da4 = csk->ha[4];
3360	l4kwqe->da5 = csk->ha[5];
3361
3362	l4kwqe->pg_host_opaque = csk->l5_cid;
3363	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3364
3365	return dev->submit_kwqes(dev, wqes, 1);
3366}
3367
3368static int cnic_cm_upload_pg(struct cnic_sock *csk)
3369{
3370	struct cnic_dev *dev = csk->dev;
3371	struct l4_kwq_upload *l4kwqe;
3372	struct kwqe *wqes[1];
3373
3374	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3375	memset(l4kwqe, 0, sizeof(*l4kwqe));
3376	wqes[0] = (struct kwqe *) l4kwqe;
3377
3378	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3379	l4kwqe->flags =
3380		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3381	l4kwqe->cid = csk->pg_cid;
3382
3383	return dev->submit_kwqes(dev, wqes, 1);
3384}
3385
3386static int cnic_cm_conn_req(struct cnic_sock *csk)
3387{
3388	struct cnic_dev *dev = csk->dev;
3389	struct l4_kwq_connect_req1 *l4kwqe1;
3390	struct l4_kwq_connect_req2 *l4kwqe2;
3391	struct l4_kwq_connect_req3 *l4kwqe3;
3392	struct kwqe *wqes[3];
3393	u8 tcp_flags = 0;
3394	int num_wqes = 2;
3395
3396	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3397	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3398	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3399	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3400	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3401	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3402
3403	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3404	l4kwqe3->flags =
3405		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3406	l4kwqe3->ka_timeout = csk->ka_timeout;
3407	l4kwqe3->ka_interval = csk->ka_interval;
3408	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3409	l4kwqe3->tos = csk->tos;
3410	l4kwqe3->ttl = csk->ttl;
3411	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3412	l4kwqe3->pmtu = csk->mtu;
3413	l4kwqe3->rcv_buf = csk->rcv_buf;
3414	l4kwqe3->snd_buf = csk->snd_buf;
3415	l4kwqe3->seed = csk->seed;
3416
3417	wqes[0] = (struct kwqe *) l4kwqe1;
3418	if (test_bit(SK_F_IPV6, &csk->flags)) {
3419		wqes[1] = (struct kwqe *) l4kwqe2;
3420		wqes[2] = (struct kwqe *) l4kwqe3;
3421		num_wqes = 3;
3422
3423		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3424		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3425		l4kwqe2->flags =
3426			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3427			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3428		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3429		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3430		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3431		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3432		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3433		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3434		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3435			       sizeof(struct tcphdr);
3436	} else {
3437		wqes[1] = (struct kwqe *) l4kwqe3;
3438		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3439			       sizeof(struct tcphdr);
3440	}
3441
3442	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3443	l4kwqe1->flags =
3444		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3445		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3446	l4kwqe1->cid = csk->cid;
3447	l4kwqe1->pg_cid = csk->pg_cid;
3448	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3449	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3450	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3451	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3452	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3453		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3454	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3455		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3456	if (csk->tcp_flags & SK_TCP_NAGLE)
3457		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3458	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3459		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3460	if (csk->tcp_flags & SK_TCP_SACK)
3461		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3462	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3463		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3464
3465	l4kwqe1->tcp_flags = tcp_flags;
3466
3467	return dev->submit_kwqes(dev, wqes, num_wqes);
3468}
3469
3470static int cnic_cm_close_req(struct cnic_sock *csk)
3471{
3472	struct cnic_dev *dev = csk->dev;
3473	struct l4_kwq_close_req *l4kwqe;
3474	struct kwqe *wqes[1];
3475
3476	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3477	memset(l4kwqe, 0, sizeof(*l4kwqe));
3478	wqes[0] = (struct kwqe *) l4kwqe;
3479
3480	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3481	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3482	l4kwqe->cid = csk->cid;
3483
3484	return dev->submit_kwqes(dev, wqes, 1);
3485}
3486
3487static int cnic_cm_abort_req(struct cnic_sock *csk)
3488{
3489	struct cnic_dev *dev = csk->dev;
3490	struct l4_kwq_reset_req *l4kwqe;
3491	struct kwqe *wqes[1];
3492
3493	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3494	memset(l4kwqe, 0, sizeof(*l4kwqe));
3495	wqes[0] = (struct kwqe *) l4kwqe;
3496
3497	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3498	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3499	l4kwqe->cid = csk->cid;
3500
3501	return dev->submit_kwqes(dev, wqes, 1);
3502}
3503
3504static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3505			  u32 l5_cid, struct cnic_sock **csk, void *context)
3506{
3507	struct cnic_local *cp = dev->cnic_priv;
3508	struct cnic_sock *csk1;
3509
3510	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3511		return -EINVAL;
3512
3513	if (cp->ctx_tbl) {
3514		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3515
3516		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3517			return -EAGAIN;
3518	}
3519
3520	csk1 = &cp->csk_tbl[l5_cid];
3521	if (atomic_read(&csk1->ref_count))
3522		return -EAGAIN;
3523
3524	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3525		return -EBUSY;
3526
3527	csk1->dev = dev;
3528	csk1->cid = cid;
3529	csk1->l5_cid = l5_cid;
3530	csk1->ulp_type = ulp_type;
3531	csk1->context = context;
3532
3533	csk1->ka_timeout = DEF_KA_TIMEOUT;
3534	csk1->ka_interval = DEF_KA_INTERVAL;
3535	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3536	csk1->tos = DEF_TOS;
3537	csk1->ttl = DEF_TTL;
3538	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3539	csk1->rcv_buf = DEF_RCV_BUF;
3540	csk1->snd_buf = DEF_SND_BUF;
3541	csk1->seed = DEF_SEED;
 
3542
3543	*csk = csk1;
3544	return 0;
3545}
3546
3547static void cnic_cm_cleanup(struct cnic_sock *csk)
3548{
3549	if (csk->src_port) {
3550		struct cnic_dev *dev = csk->dev;
3551		struct cnic_local *cp = dev->cnic_priv;
3552
3553		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3554		csk->src_port = 0;
3555	}
3556}
3557
3558static void cnic_close_conn(struct cnic_sock *csk)
3559{
3560	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3561		cnic_cm_upload_pg(csk);
3562		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3563	}
3564	cnic_cm_cleanup(csk);
3565}
3566
3567static int cnic_cm_destroy(struct cnic_sock *csk)
3568{
3569	if (!cnic_in_use(csk))
3570		return -EINVAL;
3571
3572	csk_hold(csk);
3573	clear_bit(SK_F_INUSE, &csk->flags);
3574	smp_mb__after_clear_bit();
3575	while (atomic_read(&csk->ref_count) != 1)
3576		msleep(1);
3577	cnic_cm_cleanup(csk);
3578
3579	csk->flags = 0;
3580	csk_put(csk);
3581	return 0;
3582}
3583
3584static inline u16 cnic_get_vlan(struct net_device *dev,
3585				struct net_device **vlan_dev)
3586{
3587	if (dev->priv_flags & IFF_802_1Q_VLAN) {
3588		*vlan_dev = vlan_dev_real_dev(dev);
3589		return vlan_dev_vlan_id(dev);
3590	}
3591	*vlan_dev = dev;
3592	return 0;
3593}
3594
3595static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3596			     struct dst_entry **dst)
3597{
3598#if defined(CONFIG_INET)
3599	struct rtable *rt;
3600
3601	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3602	if (!IS_ERR(rt)) {
3603		*dst = &rt->dst;
3604		return 0;
3605	}
3606	return PTR_ERR(rt);
3607#else
3608	return -ENETUNREACH;
3609#endif
3610}
3611
3612static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3613			     struct dst_entry **dst)
3614{
3615#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3616	struct flowi6 fl6;
3617
3618	memset(&fl6, 0, sizeof(fl6));
3619	fl6.daddr = dst_addr->sin6_addr;
3620	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3621		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3622
3623	*dst = ip6_route_output(&init_net, NULL, &fl6);
3624	if ((*dst)->error) {
3625		dst_release(*dst);
3626		*dst = NULL;
3627		return -ENETUNREACH;
3628	} else
3629		return 0;
3630#endif
3631
3632	return -ENETUNREACH;
3633}
3634
3635static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3636					   int ulp_type)
3637{
3638	struct cnic_dev *dev = NULL;
3639	struct dst_entry *dst;
3640	struct net_device *netdev = NULL;
3641	int err = -ENETUNREACH;
3642
3643	if (dst_addr->sin_family == AF_INET)
3644		err = cnic_get_v4_route(dst_addr, &dst);
3645	else if (dst_addr->sin_family == AF_INET6) {
3646		struct sockaddr_in6 *dst_addr6 =
3647			(struct sockaddr_in6 *) dst_addr;
3648
3649		err = cnic_get_v6_route(dst_addr6, &dst);
3650	} else
3651		return NULL;
3652
3653	if (err)
3654		return NULL;
3655
3656	if (!dst->dev)
3657		goto done;
3658
3659	cnic_get_vlan(dst->dev, &netdev);
3660
3661	dev = cnic_from_netdev(netdev);
3662
3663done:
3664	dst_release(dst);
3665	if (dev)
3666		cnic_put(dev);
3667	return dev;
3668}
3669
3670static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3671{
3672	struct cnic_dev *dev = csk->dev;
3673	struct cnic_local *cp = dev->cnic_priv;
3674
3675	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3676}
3677
3678static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3679{
3680	struct cnic_dev *dev = csk->dev;
3681	struct cnic_local *cp = dev->cnic_priv;
3682	int is_v6, rc = 0;
3683	struct dst_entry *dst = NULL;
3684	struct net_device *realdev;
3685	__be16 local_port;
3686	u32 port_id;
3687
3688	if (saddr->local.v6.sin6_family == AF_INET6 &&
3689	    saddr->remote.v6.sin6_family == AF_INET6)
3690		is_v6 = 1;
3691	else if (saddr->local.v4.sin_family == AF_INET &&
3692		 saddr->remote.v4.sin_family == AF_INET)
3693		is_v6 = 0;
3694	else
3695		return -EINVAL;
3696
3697	clear_bit(SK_F_IPV6, &csk->flags);
3698
3699	if (is_v6) {
3700		set_bit(SK_F_IPV6, &csk->flags);
3701		cnic_get_v6_route(&saddr->remote.v6, &dst);
3702
3703		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3704		       sizeof(struct in6_addr));
3705		csk->dst_port = saddr->remote.v6.sin6_port;
3706		local_port = saddr->local.v6.sin6_port;
3707
3708	} else {
3709		cnic_get_v4_route(&saddr->remote.v4, &dst);
3710
3711		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3712		csk->dst_port = saddr->remote.v4.sin_port;
3713		local_port = saddr->local.v4.sin_port;
3714	}
3715
3716	csk->vlan_id = 0;
3717	csk->mtu = dev->netdev->mtu;
3718	if (dst && dst->dev) {
3719		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3720		if (realdev == dev->netdev) {
3721			csk->vlan_id = vlan;
3722			csk->mtu = dst_mtu(dst);
3723		}
3724	}
3725
3726	port_id = be16_to_cpu(local_port);
3727	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3728	    port_id < CNIC_LOCAL_PORT_MAX) {
3729		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3730			port_id = 0;
3731	} else
3732		port_id = 0;
3733
3734	if (!port_id) {
3735		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3736		if (port_id == -1) {
3737			rc = -ENOMEM;
3738			goto err_out;
3739		}
3740		local_port = cpu_to_be16(port_id);
3741	}
3742	csk->src_port = local_port;
3743
3744err_out:
3745	dst_release(dst);
3746	return rc;
3747}
3748
3749static void cnic_init_csk_state(struct cnic_sock *csk)
3750{
3751	csk->state = 0;
3752	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3753	clear_bit(SK_F_CLOSING, &csk->flags);
3754}
3755
3756static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3757{
3758	struct cnic_local *cp = csk->dev->cnic_priv;
3759	int err = 0;
3760
3761	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3762		return -EOPNOTSUPP;
3763
3764	if (!cnic_in_use(csk))
3765		return -EINVAL;
3766
3767	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3768		return -EINVAL;
3769
3770	cnic_init_csk_state(csk);
3771
3772	err = cnic_get_route(csk, saddr);
3773	if (err)
3774		goto err_out;
3775
3776	err = cnic_resolve_addr(csk, saddr);
3777	if (!err)
3778		return 0;
3779
3780err_out:
3781	clear_bit(SK_F_CONNECT_START, &csk->flags);
3782	return err;
3783}
3784
3785static int cnic_cm_abort(struct cnic_sock *csk)
3786{
3787	struct cnic_local *cp = csk->dev->cnic_priv;
3788	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3789
3790	if (!cnic_in_use(csk))
3791		return -EINVAL;
3792
3793	if (cnic_abort_prep(csk))
3794		return cnic_cm_abort_req(csk);
3795
3796	/* Getting here means that we haven't started connect, or
3797	 * connect was not successful.
3798	 */
3799
3800	cp->close_conn(csk, opcode);
3801	if (csk->state != opcode)
 
 
 
 
3802		return -EALREADY;
 
3803
3804	return 0;
3805}
3806
3807static int cnic_cm_close(struct cnic_sock *csk)
3808{
3809	if (!cnic_in_use(csk))
3810		return -EINVAL;
3811
3812	if (cnic_close_prep(csk)) {
3813		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3814		return cnic_cm_close_req(csk);
3815	} else {
 
 
 
 
3816		return -EALREADY;
3817	}
3818	return 0;
3819}
3820
3821static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3822			   u8 opcode)
3823{
3824	struct cnic_ulp_ops *ulp_ops;
3825	int ulp_type = csk->ulp_type;
3826
3827	rcu_read_lock();
3828	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3829	if (ulp_ops) {
3830		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3831			ulp_ops->cm_connect_complete(csk);
3832		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3833			ulp_ops->cm_close_complete(csk);
3834		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3835			ulp_ops->cm_remote_abort(csk);
3836		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3837			ulp_ops->cm_abort_complete(csk);
3838		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3839			ulp_ops->cm_remote_close(csk);
3840	}
3841	rcu_read_unlock();
3842}
3843
3844static int cnic_cm_set_pg(struct cnic_sock *csk)
3845{
3846	if (cnic_offld_prep(csk)) {
3847		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3848			cnic_cm_update_pg(csk);
3849		else
3850			cnic_cm_offload_pg(csk);
3851	}
3852	return 0;
3853}
3854
3855static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3856{
3857	struct cnic_local *cp = dev->cnic_priv;
3858	u32 l5_cid = kcqe->pg_host_opaque;
3859	u8 opcode = kcqe->op_code;
3860	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3861
3862	csk_hold(csk);
3863	if (!cnic_in_use(csk))
3864		goto done;
3865
3866	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3867		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3868		goto done;
3869	}
3870	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3871	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3872		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3873		cnic_cm_upcall(cp, csk,
3874			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3875		goto done;
3876	}
3877
3878	csk->pg_cid = kcqe->pg_cid;
3879	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3880	cnic_cm_conn_req(csk);
3881
3882done:
3883	csk_put(csk);
3884}
3885
3886static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3887{
3888	struct cnic_local *cp = dev->cnic_priv;
3889	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3890	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3891	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3892
3893	ctx->timestamp = jiffies;
3894	ctx->wait_cond = 1;
3895	wake_up(&ctx->waitq);
3896}
3897
3898static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3899{
3900	struct cnic_local *cp = dev->cnic_priv;
3901	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3902	u8 opcode = l4kcqe->op_code;
3903	u32 l5_cid;
3904	struct cnic_sock *csk;
3905
3906	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3907		cnic_process_fcoe_term_conn(dev, kcqe);
3908		return;
3909	}
3910	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3911	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3912		cnic_cm_process_offld_pg(dev, l4kcqe);
3913		return;
3914	}
3915
3916	l5_cid = l4kcqe->conn_id;
3917	if (opcode & 0x80)
3918		l5_cid = l4kcqe->cid;
3919	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3920		return;
3921
3922	csk = &cp->csk_tbl[l5_cid];
3923	csk_hold(csk);
3924
3925	if (!cnic_in_use(csk)) {
3926		csk_put(csk);
3927		return;
3928	}
3929
3930	switch (opcode) {
3931	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3932		if (l4kcqe->status != 0) {
3933			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3934			cnic_cm_upcall(cp, csk,
3935				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3936		}
3937		break;
3938	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3939		if (l4kcqe->status == 0)
3940			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3941		else if (l4kcqe->status ==
3942			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
3943			set_bit(SK_F_HW_ERR, &csk->flags);
3944
3945		smp_mb__before_clear_bit();
3946		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3947		cnic_cm_upcall(cp, csk, opcode);
3948		break;
3949
 
 
 
 
 
 
 
 
 
 
 
3950	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3951	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3952	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3953	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3954	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3955		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
3956			set_bit(SK_F_HW_ERR, &csk->flags);
3957
3958		cp->close_conn(csk, opcode);
3959		break;
3960
3961	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3962		/* after we already sent CLOSE_REQ */
3963		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3964		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3965		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3966			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3967		else
3968			cnic_cm_upcall(cp, csk, opcode);
3969		break;
3970	}
3971	csk_put(csk);
3972}
3973
3974static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3975{
3976	struct cnic_dev *dev = data;
3977	int i;
3978
3979	for (i = 0; i < num; i++)
3980		cnic_cm_process_kcqe(dev, kcqe[i]);
3981}
3982
3983static struct cnic_ulp_ops cm_ulp_ops = {
3984	.indicate_kcqes		= cnic_cm_indicate_kcqe,
3985};
3986
3987static void cnic_cm_free_mem(struct cnic_dev *dev)
3988{
3989	struct cnic_local *cp = dev->cnic_priv;
3990
3991	kfree(cp->csk_tbl);
3992	cp->csk_tbl = NULL;
3993	cnic_free_id_tbl(&cp->csk_port_tbl);
3994}
3995
3996static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3997{
3998	struct cnic_local *cp = dev->cnic_priv;
3999	u32 port_id;
 
4000
4001	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4002			      GFP_KERNEL);
4003	if (!cp->csk_tbl)
4004		return -ENOMEM;
4005
4006	port_id = random32();
4007	port_id %= CNIC_LOCAL_PORT_RANGE;
 
 
4008	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4009			     CNIC_LOCAL_PORT_MIN, port_id)) {
4010		cnic_cm_free_mem(dev);
4011		return -ENOMEM;
4012	}
4013	return 0;
4014}
4015
4016static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4017{
4018	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4019		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4020		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4021		csk->state = opcode;
4022	}
4023
4024	/* 1. If event opcode matches the expected event in csk->state
4025	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4026	 *    event
4027	 * 3. If the expected event is 0, meaning the connection was never
4028	 *    never established, we accept the opcode from cm_abort.
4029	 */
4030	if (opcode == csk->state || csk->state == 0 ||
4031	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4032	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4033		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4034			if (csk->state == 0)
4035				csk->state = opcode;
4036			return 1;
4037		}
4038	}
4039	return 0;
4040}
4041
4042static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4043{
4044	struct cnic_dev *dev = csk->dev;
4045	struct cnic_local *cp = dev->cnic_priv;
4046
4047	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4048		cnic_cm_upcall(cp, csk, opcode);
4049		return;
4050	}
4051
4052	clear_bit(SK_F_CONNECT_START, &csk->flags);
4053	cnic_close_conn(csk);
4054	csk->state = opcode;
4055	cnic_cm_upcall(cp, csk, opcode);
4056}
4057
4058static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4059{
4060}
4061
4062static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4063{
4064	u32 seed;
4065
4066	seed = random32();
4067	cnic_ctx_wr(dev, 45, 0, seed);
4068	return 0;
4069}
4070
4071static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4072{
4073	struct cnic_dev *dev = csk->dev;
4074	struct cnic_local *cp = dev->cnic_priv;
4075	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4076	union l5cm_specific_data l5_data;
4077	u32 cmd = 0;
4078	int close_complete = 0;
4079
4080	switch (opcode) {
4081	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4082	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4083	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4084		if (cnic_ready_to_close(csk, opcode)) {
4085			if (test_bit(SK_F_HW_ERR, &csk->flags))
4086				close_complete = 1;
4087			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4088				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4089			else
4090				close_complete = 1;
4091		}
4092		break;
4093	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4094		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4095		break;
4096	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4097		close_complete = 1;
4098		break;
4099	}
4100	if (cmd) {
4101		memset(&l5_data, 0, sizeof(l5_data));
4102
4103		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4104				    &l5_data);
4105	} else if (close_complete) {
4106		ctx->timestamp = jiffies;
4107		cnic_close_conn(csk);
4108		cnic_cm_upcall(cp, csk, csk->state);
4109	}
4110}
4111
4112static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4113{
4114	struct cnic_local *cp = dev->cnic_priv;
4115
4116	if (!cp->ctx_tbl)
4117		return;
4118
4119	if (!netif_running(dev->netdev))
4120		return;
4121
4122	cnic_bnx2x_delete_wait(dev, 0);
4123
4124	cancel_delayed_work(&cp->delete_task);
4125	flush_workqueue(cnic_wq);
4126
4127	if (atomic_read(&cp->iscsi_conn) != 0)
4128		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4129			    atomic_read(&cp->iscsi_conn));
4130}
4131
4132static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4133{
4134	struct cnic_local *cp = dev->cnic_priv;
4135	u32 pfid = cp->pfid;
4136	u32 port = CNIC_PORT(cp);
4137
4138	cnic_init_bnx2x_mac(dev);
4139	cnic_bnx2x_set_tcp_timestamp(dev, 1);
4140
4141	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4142		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4143
4144	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4145		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4146	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4147		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4148		DEF_MAX_DA_COUNT);
4149
4150	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4151		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4152	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4153		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4154	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4155		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4156	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4157		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4158
4159	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4160		DEF_MAX_CWND);
4161	return 0;
4162}
4163
4164static void cnic_delete_task(struct work_struct *work)
4165{
4166	struct cnic_local *cp;
4167	struct cnic_dev *dev;
4168	u32 i;
4169	int need_resched = 0;
4170
4171	cp = container_of(work, struct cnic_local, delete_task.work);
4172	dev = cp->dev;
4173
4174	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4175		struct drv_ctl_info info;
4176
4177		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4178
 
4179		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4180		cp->ethdev->drv_ctl(dev->netdev, &info);
4181	}
4182
4183	for (i = 0; i < cp->max_cid_space; i++) {
4184		struct cnic_context *ctx = &cp->ctx_tbl[i];
4185		int err;
4186
4187		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4188		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4189			continue;
4190
4191		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4192			need_resched = 1;
4193			continue;
4194		}
4195
4196		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4197			continue;
4198
4199		err = cnic_bnx2x_destroy_ramrod(dev, i);
4200
4201		cnic_free_bnx2x_conn_resc(dev, i);
4202		if (!err) {
4203			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4204				atomic_dec(&cp->iscsi_conn);
4205
4206			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4207		}
4208	}
4209
4210	if (need_resched)
4211		queue_delayed_work(cnic_wq, &cp->delete_task,
4212				   msecs_to_jiffies(10));
4213
4214}
4215
4216static int cnic_cm_open(struct cnic_dev *dev)
4217{
4218	struct cnic_local *cp = dev->cnic_priv;
4219	int err;
4220
4221	err = cnic_cm_alloc_mem(dev);
4222	if (err)
4223		return err;
4224
4225	err = cp->start_cm(dev);
4226
4227	if (err)
4228		goto err_out;
4229
4230	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4231
4232	dev->cm_create = cnic_cm_create;
4233	dev->cm_destroy = cnic_cm_destroy;
4234	dev->cm_connect = cnic_cm_connect;
4235	dev->cm_abort = cnic_cm_abort;
4236	dev->cm_close = cnic_cm_close;
4237	dev->cm_select_dev = cnic_cm_select_dev;
4238
4239	cp->ulp_handle[CNIC_ULP_L4] = dev;
4240	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4241	return 0;
4242
4243err_out:
4244	cnic_cm_free_mem(dev);
4245	return err;
4246}
4247
4248static int cnic_cm_shutdown(struct cnic_dev *dev)
4249{
4250	struct cnic_local *cp = dev->cnic_priv;
4251	int i;
4252
4253	cp->stop_cm(dev);
4254
4255	if (!cp->csk_tbl)
4256		return 0;
4257
4258	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4259		struct cnic_sock *csk = &cp->csk_tbl[i];
4260
4261		clear_bit(SK_F_INUSE, &csk->flags);
4262		cnic_cm_cleanup(csk);
4263	}
4264	cnic_cm_free_mem(dev);
4265
4266	return 0;
4267}
4268
4269static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4270{
4271	u32 cid_addr;
4272	int i;
4273
4274	cid_addr = GET_CID_ADDR(cid);
4275
4276	for (i = 0; i < CTX_SIZE; i += 4)
4277		cnic_ctx_wr(dev, cid_addr, i, 0);
4278}
4279
4280static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4281{
4282	struct cnic_local *cp = dev->cnic_priv;
4283	int ret = 0, i;
4284	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4285
4286	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4287		return 0;
4288
4289	for (i = 0; i < cp->ctx_blks; i++) {
4290		int j;
4291		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4292		u32 val;
4293
4294		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4295
4296		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4297			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4298		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4299			(u64) cp->ctx_arr[i].mapping >> 32);
4300		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4301			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4302		for (j = 0; j < 10; j++) {
4303
4304			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4305			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4306				break;
4307			udelay(5);
4308		}
4309		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4310			ret = -EBUSY;
4311			break;
4312		}
4313	}
4314	return ret;
4315}
4316
4317static void cnic_free_irq(struct cnic_dev *dev)
4318{
4319	struct cnic_local *cp = dev->cnic_priv;
4320	struct cnic_eth_dev *ethdev = cp->ethdev;
4321
4322	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4323		cp->disable_int_sync(dev);
4324		tasklet_kill(&cp->cnic_irq_task);
4325		free_irq(ethdev->irq_arr[0].vector, dev);
4326	}
4327}
4328
4329static int cnic_request_irq(struct cnic_dev *dev)
4330{
4331	struct cnic_local *cp = dev->cnic_priv;
4332	struct cnic_eth_dev *ethdev = cp->ethdev;
4333	int err;
4334
4335	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4336	if (err)
4337		tasklet_disable(&cp->cnic_irq_task);
4338
4339	return err;
4340}
4341
4342static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4343{
4344	struct cnic_local *cp = dev->cnic_priv;
4345	struct cnic_eth_dev *ethdev = cp->ethdev;
4346
4347	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4348		int err, i = 0;
4349		int sblk_num = cp->status_blk_num;
4350		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4351			   BNX2_HC_SB_CONFIG_1;
4352
4353		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4354
4355		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4356		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4357		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4358
4359		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4360		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4361			     (unsigned long) dev);
4362		err = cnic_request_irq(dev);
4363		if (err)
4364			return err;
4365
4366		while (cp->status_blk.bnx2->status_completion_producer_index &&
4367		       i < 10) {
4368			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4369				1 << (11 + sblk_num));
4370			udelay(10);
4371			i++;
4372			barrier();
4373		}
4374		if (cp->status_blk.bnx2->status_completion_producer_index) {
4375			cnic_free_irq(dev);
4376			goto failed;
4377		}
4378
4379	} else {
4380		struct status_block *sblk = cp->status_blk.gen;
4381		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4382		int i = 0;
4383
4384		while (sblk->status_completion_producer_index && i < 10) {
4385			CNIC_WR(dev, BNX2_HC_COMMAND,
4386				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4387			udelay(10);
4388			i++;
4389			barrier();
4390		}
4391		if (sblk->status_completion_producer_index)
4392			goto failed;
4393
4394	}
4395	return 0;
4396
4397failed:
4398	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4399	return -EBUSY;
4400}
4401
4402static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4403{
4404	struct cnic_local *cp = dev->cnic_priv;
4405	struct cnic_eth_dev *ethdev = cp->ethdev;
4406
4407	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4408		return;
4409
4410	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4411		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4412}
4413
4414static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4415{
4416	struct cnic_local *cp = dev->cnic_priv;
4417	struct cnic_eth_dev *ethdev = cp->ethdev;
4418
4419	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4420		return;
4421
4422	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4423		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4424	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4425	synchronize_irq(ethdev->irq_arr[0].vector);
4426}
4427
4428static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4429{
4430	struct cnic_local *cp = dev->cnic_priv;
4431	struct cnic_eth_dev *ethdev = cp->ethdev;
4432	struct cnic_uio_dev *udev = cp->udev;
4433	u32 cid_addr, tx_cid, sb_id;
4434	u32 val, offset0, offset1, offset2, offset3;
4435	int i;
4436	struct tx_bd *txbd;
4437	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4438	struct status_block *s_blk = cp->status_blk.gen;
4439
4440	sb_id = cp->status_blk_num;
4441	tx_cid = 20;
4442	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4443	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4444		struct status_block_msix *sblk = cp->status_blk.bnx2;
4445
4446		tx_cid = TX_TSS_CID + sb_id - 1;
4447		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4448			(TX_TSS_CID << 7));
4449		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4450	}
4451	cp->tx_cons = *cp->tx_cons_ptr;
4452
4453	cid_addr = GET_CID_ADDR(tx_cid);
4454	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4455		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4456
4457		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4458			cnic_ctx_wr(dev, cid_addr2, i, 0);
4459
4460		offset0 = BNX2_L2CTX_TYPE_XI;
4461		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4462		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4463		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4464	} else {
4465		cnic_init_context(dev, tx_cid);
4466		cnic_init_context(dev, tx_cid + 1);
4467
4468		offset0 = BNX2_L2CTX_TYPE;
4469		offset1 = BNX2_L2CTX_CMD_TYPE;
4470		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4471		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4472	}
4473	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4474	cnic_ctx_wr(dev, cid_addr, offset0, val);
4475
4476	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4477	cnic_ctx_wr(dev, cid_addr, offset1, val);
4478
4479	txbd = udev->l2_ring;
4480
4481	buf_map = udev->l2_buf_map;
4482	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4483		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4484		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4485	}
4486	val = (u64) ring_map >> 32;
4487	cnic_ctx_wr(dev, cid_addr, offset2, val);
4488	txbd->tx_bd_haddr_hi = val;
4489
4490	val = (u64) ring_map & 0xffffffff;
4491	cnic_ctx_wr(dev, cid_addr, offset3, val);
4492	txbd->tx_bd_haddr_lo = val;
4493}
4494
4495static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4496{
4497	struct cnic_local *cp = dev->cnic_priv;
4498	struct cnic_eth_dev *ethdev = cp->ethdev;
4499	struct cnic_uio_dev *udev = cp->udev;
4500	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4501	int i;
4502	struct rx_bd *rxbd;
4503	struct status_block *s_blk = cp->status_blk.gen;
4504	dma_addr_t ring_map = udev->l2_ring_map;
4505
4506	sb_id = cp->status_blk_num;
4507	cnic_init_context(dev, 2);
4508	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4509	coal_reg = BNX2_HC_COMMAND;
4510	coal_val = CNIC_RD(dev, coal_reg);
4511	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4512		struct status_block_msix *sblk = cp->status_blk.bnx2;
4513
4514		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4515		coal_reg = BNX2_HC_COALESCE_NOW;
4516		coal_val = 1 << (11 + sb_id);
4517	}
4518	i = 0;
4519	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4520		CNIC_WR(dev, coal_reg, coal_val);
4521		udelay(10);
4522		i++;
4523		barrier();
4524	}
4525	cp->rx_cons = *cp->rx_cons_ptr;
4526
4527	cid_addr = GET_CID_ADDR(2);
4528	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4529	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4530	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4531
4532	if (sb_id == 0)
4533		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4534	else
4535		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4536	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4537
4538	rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4539	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4540		dma_addr_t buf_map;
4541		int n = (i % cp->l2_rx_ring_size) + 1;
4542
4543		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4544		rxbd->rx_bd_len = cp->l2_single_buf_size;
4545		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4546		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4547		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4548	}
4549	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4550	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4551	rxbd->rx_bd_haddr_hi = val;
4552
4553	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4554	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4555	rxbd->rx_bd_haddr_lo = val;
4556
4557	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4558	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4559}
4560
4561static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4562{
4563	struct kwqe *wqes[1], l2kwqe;
4564
4565	memset(&l2kwqe, 0, sizeof(l2kwqe));
4566	wqes[0] = &l2kwqe;
4567	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4568			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4569			       KWQE_OPCODE_SHIFT) | 2;
4570	dev->submit_kwqes(dev, wqes, 1);
4571}
4572
4573static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4574{
4575	struct cnic_local *cp = dev->cnic_priv;
4576	u32 val;
4577
4578	val = cp->func << 2;
4579
4580	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4581
4582	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4583			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4584	dev->mac_addr[0] = (u8) (val >> 8);
4585	dev->mac_addr[1] = (u8) val;
4586
4587	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4588
4589	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4590			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4591	dev->mac_addr[2] = (u8) (val >> 24);
4592	dev->mac_addr[3] = (u8) (val >> 16);
4593	dev->mac_addr[4] = (u8) (val >> 8);
4594	dev->mac_addr[5] = (u8) val;
4595
4596	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4597
4598	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4599	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4600		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4601
4602	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4603	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4604	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4605}
4606
4607static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4608{
4609	struct cnic_local *cp = dev->cnic_priv;
4610	struct cnic_eth_dev *ethdev = cp->ethdev;
4611	struct status_block *sblk = cp->status_blk.gen;
4612	u32 val, kcq_cid_addr, kwq_cid_addr;
4613	int err;
4614
4615	cnic_set_bnx2_mac(dev);
4616
4617	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4618	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4619	if (BCM_PAGE_BITS > 12)
4620		val |= (12 - 8)  << 4;
4621	else
4622		val |= (BCM_PAGE_BITS - 8)  << 4;
4623
4624	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4625
4626	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4627	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4628	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4629
4630	err = cnic_setup_5709_context(dev, 1);
4631	if (err)
4632		return err;
4633
4634	cnic_init_context(dev, KWQ_CID);
4635	cnic_init_context(dev, KCQ_CID);
4636
4637	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4638	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4639
4640	cp->max_kwq_idx = MAX_KWQ_IDX;
4641	cp->kwq_prod_idx = 0;
4642	cp->kwq_con_idx = 0;
4643	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4644
4645	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4646		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4647	else
4648		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4649
4650	/* Initialize the kernel work queue context. */
4651	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4652	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4653	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4654
4655	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4656	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4657
4658	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4659	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4660
4661	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4662	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4663
4664	val = (u32) cp->kwq_info.pgtbl_map;
4665	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4666
4667	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4668	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4669
4670	cp->kcq1.sw_prod_idx = 0;
4671	cp->kcq1.hw_prod_idx_ptr =
4672		(u16 *) &sblk->status_completion_producer_index;
4673
4674	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4675
4676	/* Initialize the kernel complete queue context. */
4677	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4678	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4679	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4680
4681	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4682	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4683
4684	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4685	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4686
4687	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4688	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4689
4690	val = (u32) cp->kcq1.dma.pgtbl_map;
4691	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4692
4693	cp->int_num = 0;
4694	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4695		struct status_block_msix *msblk = cp->status_blk.bnx2;
4696		u32 sb_id = cp->status_blk_num;
4697		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4698
4699		cp->kcq1.hw_prod_idx_ptr =
4700			(u16 *) &msblk->status_completion_producer_index;
4701		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4702		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4703		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4704		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4705		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4706	}
4707
4708	/* Enable Commnad Scheduler notification when we write to the
4709	 * host producer index of the kernel contexts. */
4710	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4711
4712	/* Enable Command Scheduler notification when we write to either
4713	 * the Send Queue or Receive Queue producer indexes of the kernel
4714	 * bypass contexts. */
4715	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4716	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4717
4718	/* Notify COM when the driver post an application buffer. */
4719	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4720
4721	/* Set the CP and COM doorbells.  These two processors polls the
4722	 * doorbell for a non zero value before running.  This must be done
4723	 * after setting up the kernel queue contexts. */
4724	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4725	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4726
4727	cnic_init_bnx2_tx_ring(dev);
4728	cnic_init_bnx2_rx_ring(dev);
4729
4730	err = cnic_init_bnx2_irq(dev);
4731	if (err) {
4732		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4733		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4734		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4735		return err;
4736	}
4737
 
 
4738	return 0;
4739}
4740
4741static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4742{
4743	struct cnic_local *cp = dev->cnic_priv;
4744	struct cnic_eth_dev *ethdev = cp->ethdev;
4745	u32 start_offset = ethdev->ctx_tbl_offset;
4746	int i;
4747
4748	for (i = 0; i < cp->ctx_blks; i++) {
4749		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4750		dma_addr_t map = ctx->mapping;
4751
4752		if (cp->ctx_align) {
4753			unsigned long mask = cp->ctx_align - 1;
4754
4755			map = (map + mask) & ~mask;
4756		}
4757
4758		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4759	}
4760}
4761
4762static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4763{
4764	struct cnic_local *cp = dev->cnic_priv;
4765	struct cnic_eth_dev *ethdev = cp->ethdev;
4766	int err = 0;
4767
4768	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4769		     (unsigned long) dev);
4770	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4771		err = cnic_request_irq(dev);
4772
4773	return err;
4774}
4775
4776static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4777						u16 sb_id, u8 sb_index,
4778						u8 disable)
4779{
 
4780
4781	u32 addr = BAR_CSTRORM_INTMEM +
4782			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4783			offsetof(struct hc_status_block_data_e1x, index_data) +
4784			sizeof(struct hc_index_data)*sb_index +
4785			offsetof(struct hc_index_data, flags);
4786	u16 flags = CNIC_RD16(dev, addr);
4787	/* clear and set */
4788	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4789	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4790		  HC_INDEX_DATA_HC_ENABLED);
4791	CNIC_WR16(dev, addr, flags);
4792}
4793
4794static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4795{
4796	struct cnic_local *cp = dev->cnic_priv;
 
4797	u8 sb_id = cp->status_blk_num;
4798
4799	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4800			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4801			offsetof(struct hc_status_block_data_e1x, index_data) +
4802			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4803			offsetof(struct hc_index_data, timeout), 64 / 4);
4804	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4805}
4806
4807static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4808{
4809}
4810
4811static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4812				    struct client_init_ramrod_data *data)
4813{
4814	struct cnic_local *cp = dev->cnic_priv;
 
4815	struct cnic_uio_dev *udev = cp->udev;
4816	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4817	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4818	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4819	int i;
4820	u32 cli = cp->ethdev->iscsi_l2_client_id;
4821	u32 val;
4822
4823	memset(txbd, 0, BCM_PAGE_SIZE);
4824
4825	buf_map = udev->l2_buf_map;
4826	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4827		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
 
 
 
4828		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4829
4830		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4831		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4832		reg_bd->addr_hi = start_bd->addr_hi;
4833		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4834		start_bd->nbytes = cpu_to_le16(0x10);
4835		start_bd->nbd = cpu_to_le16(3);
4836		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4837		start_bd->general_data = (UNICAST_ADDRESS <<
4838			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4839		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4840
 
 
 
 
 
 
4841	}
4842
4843	val = (u64) ring_map >> 32;
4844	txbd->next_bd.addr_hi = cpu_to_le32(val);
4845
4846	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4847
4848	val = (u64) ring_map & 0xffffffff;
4849	txbd->next_bd.addr_lo = cpu_to_le32(val);
4850
4851	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4852
4853	/* Other ramrod params */
4854	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4855	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4856
4857	/* reset xstorm per client statistics */
4858	if (cli < MAX_STAT_COUNTER_ID) {
4859		data->general.statistics_zero_flg = 1;
4860		data->general.statistics_en_flg = 1;
4861		data->general.statistics_counter_id = cli;
4862	}
4863
4864	cp->tx_cons_ptr =
4865		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4866}
4867
4868static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4869				    struct client_init_ramrod_data *data)
4870{
4871	struct cnic_local *cp = dev->cnic_priv;
 
4872	struct cnic_uio_dev *udev = cp->udev;
4873	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4874				BCM_PAGE_SIZE);
4875	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4876				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
4877	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4878	int i;
4879	u32 cli = cp->ethdev->iscsi_l2_client_id;
4880	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4881	u32 val;
4882	dma_addr_t ring_map = udev->l2_ring_map;
4883
4884	/* General data */
4885	data->general.client_id = cli;
4886	data->general.activate_flg = 1;
4887	data->general.sp_client_id = cli;
4888	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4889	data->general.func_id = cp->pfid;
4890
4891	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4892		dma_addr_t buf_map;
4893		int n = (i % cp->l2_rx_ring_size) + 1;
4894
4895		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4896		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4897		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4898	}
4899
4900	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4901	rxbd->addr_hi = cpu_to_le32(val);
4902	data->rx.bd_page_base.hi = cpu_to_le32(val);
4903
4904	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4905	rxbd->addr_lo = cpu_to_le32(val);
4906	data->rx.bd_page_base.lo = cpu_to_le32(val);
4907
4908	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4909	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4910	rxcqe->addr_hi = cpu_to_le32(val);
4911	data->rx.cqe_page_base.hi = cpu_to_le32(val);
4912
4913	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4914	rxcqe->addr_lo = cpu_to_le32(val);
4915	data->rx.cqe_page_base.lo = cpu_to_le32(val);
4916
4917	/* Other ramrod params */
4918	data->rx.client_qzone_id = cl_qzone_id;
4919	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4920	data->rx.status_block_id = BNX2X_DEF_SB_ID;
4921
4922	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4923
4924	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4925	data->rx.outer_vlan_removal_enable_flg = 1;
4926	data->rx.silent_vlan_removal_flg = 1;
4927	data->rx.silent_vlan_value = 0;
4928	data->rx.silent_vlan_mask = 0xffff;
4929
4930	cp->rx_cons_ptr =
4931		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4932	cp->rx_cons = *cp->rx_cons_ptr;
4933}
4934
4935static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4936{
4937	struct cnic_local *cp = dev->cnic_priv;
4938	u32 pfid = cp->pfid;
 
4939
4940	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4941			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4942	cp->kcq1.sw_prod_idx = 0;
4943
4944	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4945		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4946
4947		cp->kcq1.hw_prod_idx_ptr =
4948			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4949		cp->kcq1.status_idx_ptr =
4950			&sb->sb.running_index[SM_RX_ID];
4951	} else {
4952		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4953
4954		cp->kcq1.hw_prod_idx_ptr =
4955			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4956		cp->kcq1.status_idx_ptr =
4957			&sb->sb.running_index[SM_RX_ID];
4958	}
4959
4960	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4961		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4962
4963		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4964					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4965		cp->kcq2.sw_prod_idx = 0;
4966		cp->kcq2.hw_prod_idx_ptr =
4967			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4968		cp->kcq2.status_idx_ptr =
4969			&sb->sb.running_index[SM_RX_ID];
4970	}
4971}
4972
4973static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4974{
4975	struct cnic_local *cp = dev->cnic_priv;
 
4976	struct cnic_eth_dev *ethdev = cp->ethdev;
4977	int func = CNIC_FUNC(cp), ret;
4978	u32 pfid;
4979
4980	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
4981	cp->port_mode = CHIP_PORT_MODE_NONE;
4982
4983	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4984		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4985
4986		if (!(val & 1))
4987			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4988		else
4989			val = (val >> 1) & 1;
4990
4991		if (val) {
4992			cp->port_mode = CHIP_4_PORT_MODE;
4993			cp->pfid = func >> 1;
4994		} else {
4995			cp->port_mode = CHIP_2_PORT_MODE;
4996			cp->pfid = func & 0x6;
4997		}
4998	} else {
4999		cp->pfid = func;
5000	}
5001	pfid = cp->pfid;
5002
5003	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5004			       cp->iscsi_start_cid, 0);
5005
5006	if (ret)
5007		return -ENOMEM;
5008
5009	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5010		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5011					cp->fcoe_start_cid, 0);
5012
5013		if (ret)
5014			return -ENOMEM;
5015	}
5016
5017	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5018
5019	cnic_init_bnx2x_kcq(dev);
5020
5021	/* Only 1 EQ */
5022	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5023	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5024		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5025	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5026		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5027		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5028	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5029		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5030		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5031	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5032		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5033		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5034	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5035		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5036		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5037	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5038		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5039	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5040		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5041	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5042		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5043		HC_INDEX_ISCSI_EQ_CONS);
5044
5045	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5046		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5047		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5048	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5049		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5050		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5051
5052	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5053		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5054
5055	cnic_setup_bnx2x_context(dev);
5056
5057	ret = cnic_init_bnx2x_irq(dev);
5058	if (ret)
5059		return ret;
5060
 
5061	return 0;
5062}
5063
5064static void cnic_init_rings(struct cnic_dev *dev)
5065{
5066	struct cnic_local *cp = dev->cnic_priv;
 
5067	struct cnic_uio_dev *udev = cp->udev;
5068
5069	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5070		return;
5071
5072	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5073		cnic_init_bnx2_tx_ring(dev);
5074		cnic_init_bnx2_rx_ring(dev);
5075		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5076	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5077		u32 cli = cp->ethdev->iscsi_l2_client_id;
5078		u32 cid = cp->ethdev->iscsi_l2_cid;
5079		u32 cl_qzone_id;
5080		struct client_init_ramrod_data *data;
5081		union l5cm_specific_data l5_data;
5082		struct ustorm_eth_rx_producers rx_prods = {0};
5083		u32 off, i, *cid_ptr;
5084
5085		rx_prods.bd_prod = 0;
5086		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5087		barrier();
5088
5089		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
5090
5091		off = BAR_USTRORM_INTMEM +
5092			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
5093			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5094			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
5095
5096		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5097			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5098
5099		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5100
5101		data = udev->l2_buf;
5102		cid_ptr = udev->l2_buf + 12;
5103
5104		memset(data, 0, sizeof(*data));
5105
5106		cnic_init_bnx2x_tx_ring(dev, data);
5107		cnic_init_bnx2x_rx_ring(dev, data);
5108
 
 
5109		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5110		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5111
5112		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5113
5114		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5115			cid, ETH_CONNECTION_TYPE, &l5_data);
5116
5117		i = 0;
5118		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5119		       ++i < 10)
5120			msleep(1);
5121
5122		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5123			netdev_err(dev->netdev,
5124				"iSCSI CLIENT_SETUP did not complete\n");
5125		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5126		cnic_ring_ctl(dev, cid, cli, 1);
5127		*cid_ptr = cid;
 
 
5128	}
5129}
5130
5131static void cnic_shutdown_rings(struct cnic_dev *dev)
5132{
5133	struct cnic_local *cp = dev->cnic_priv;
5134	struct cnic_uio_dev *udev = cp->udev;
5135	void *rx_ring;
5136
5137	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5138		return;
5139
5140	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5141		cnic_shutdown_bnx2_rx_ring(dev);
5142	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5143		u32 cli = cp->ethdev->iscsi_l2_client_id;
5144		u32 cid = cp->ethdev->iscsi_l2_cid;
5145		union l5cm_specific_data l5_data;
5146		int i;
5147
5148		cnic_ring_ctl(dev, cid, cli, 0);
5149
5150		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5151
5152		l5_data.phy_address.lo = cli;
5153		l5_data.phy_address.hi = 0;
5154		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5155			cid, ETH_CONNECTION_TYPE, &l5_data);
5156		i = 0;
5157		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5158		       ++i < 10)
5159			msleep(1);
5160
5161		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5162			netdev_err(dev->netdev,
5163				"iSCSI CLIENT_HALT did not complete\n");
5164		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5165
5166		memset(&l5_data, 0, sizeof(l5_data));
5167		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5168			cid, NONE_CONNECTION_TYPE, &l5_data);
5169		msleep(10);
5170	}
5171	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5172	rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5173	memset(rx_ring, 0, BCM_PAGE_SIZE);
5174}
5175
5176static int cnic_register_netdev(struct cnic_dev *dev)
5177{
5178	struct cnic_local *cp = dev->cnic_priv;
5179	struct cnic_eth_dev *ethdev = cp->ethdev;
5180	int err;
5181
5182	if (!ethdev)
5183		return -ENODEV;
5184
5185	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5186		return 0;
5187
5188	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5189	if (err)
5190		netdev_err(dev->netdev, "register_cnic failed\n");
5191
 
 
 
 
 
 
 
5192	return err;
5193}
5194
5195static void cnic_unregister_netdev(struct cnic_dev *dev)
5196{
5197	struct cnic_local *cp = dev->cnic_priv;
5198	struct cnic_eth_dev *ethdev = cp->ethdev;
5199
5200	if (!ethdev)
5201		return;
5202
5203	ethdev->drv_unregister_cnic(dev->netdev);
5204}
5205
5206static int cnic_start_hw(struct cnic_dev *dev)
5207{
5208	struct cnic_local *cp = dev->cnic_priv;
5209	struct cnic_eth_dev *ethdev = cp->ethdev;
5210	int err;
5211
5212	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5213		return -EALREADY;
5214
5215	dev->regview = ethdev->io_base;
5216	pci_dev_get(dev->pcidev);
5217	cp->func = PCI_FUNC(dev->pcidev->devfn);
5218	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5219	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5220
5221	err = cp->alloc_resc(dev);
5222	if (err) {
5223		netdev_err(dev->netdev, "allocate resource failure\n");
5224		goto err1;
5225	}
5226
5227	err = cp->start_hw(dev);
5228	if (err)
5229		goto err1;
5230
5231	err = cnic_cm_open(dev);
5232	if (err)
5233		goto err1;
5234
5235	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5236
5237	cp->enable_int(dev);
5238
5239	return 0;
5240
5241err1:
5242	cp->free_resc(dev);
 
 
 
5243	pci_dev_put(dev->pcidev);
5244	return err;
5245}
5246
5247static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5248{
5249	cnic_disable_bnx2_int_sync(dev);
5250
5251	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5252	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5253
5254	cnic_init_context(dev, KWQ_CID);
5255	cnic_init_context(dev, KCQ_CID);
5256
5257	cnic_setup_5709_context(dev, 0);
5258	cnic_free_irq(dev);
5259
5260	cnic_free_resc(dev);
5261}
5262
5263
5264static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5265{
5266	struct cnic_local *cp = dev->cnic_priv;
 
 
 
 
5267
5268	cnic_free_irq(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5269	*cp->kcq1.hw_prod_idx_ptr = 0;
5270	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5271		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5272	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5273	cnic_free_resc(dev);
5274}
5275
5276static void cnic_stop_hw(struct cnic_dev *dev)
5277{
5278	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5279		struct cnic_local *cp = dev->cnic_priv;
5280		int i = 0;
5281
5282		/* Need to wait for the ring shutdown event to complete
5283		 * before clearing the CNIC_UP flag.
5284		 */
5285		while (cp->udev->uio_dev != -1 && i < 15) {
5286			msleep(100);
5287			i++;
5288		}
5289		cnic_shutdown_rings(dev);
 
 
5290		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5291		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5292		synchronize_rcu();
5293		cnic_cm_shutdown(dev);
5294		cp->stop_hw(dev);
5295		pci_dev_put(dev->pcidev);
5296	}
5297}
5298
5299static void cnic_free_dev(struct cnic_dev *dev)
5300{
5301	int i = 0;
5302
5303	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5304		msleep(100);
5305		i++;
5306	}
5307	if (atomic_read(&dev->ref_count) != 0)
5308		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5309
5310	netdev_info(dev->netdev, "Removed CNIC device\n");
5311	dev_put(dev->netdev);
5312	kfree(dev);
5313}
5314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5315static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5316				       struct pci_dev *pdev)
5317{
5318	struct cnic_dev *cdev;
5319	struct cnic_local *cp;
5320	int alloc_size;
5321
5322	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5323
5324	cdev = kzalloc(alloc_size , GFP_KERNEL);
5325	if (cdev == NULL) {
5326		netdev_err(dev, "allocate dev struct failure\n");
5327		return NULL;
5328	}
5329
5330	cdev->netdev = dev;
5331	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5332	cdev->register_device = cnic_register_device;
5333	cdev->unregister_device = cnic_unregister_device;
5334	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
 
 
5335
5336	cp = cdev->cnic_priv;
5337	cp->dev = cdev;
5338	cp->l2_single_buf_size = 0x400;
5339	cp->l2_rx_ring_size = 3;
5340
5341	spin_lock_init(&cp->cnic_ulp_lock);
5342
5343	netdev_info(dev, "Added CNIC device\n");
5344
5345	return cdev;
5346}
5347
5348static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5349{
5350	struct pci_dev *pdev;
5351	struct cnic_dev *cdev;
5352	struct cnic_local *cp;
 
5353	struct cnic_eth_dev *ethdev = NULL;
5354	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5355
5356	probe = symbol_get(bnx2_cnic_probe);
5357	if (probe) {
5358		ethdev = (*probe)(dev);
5359		symbol_put(bnx2_cnic_probe);
5360	}
5361	if (!ethdev)
5362		return NULL;
5363
5364	pdev = ethdev->pdev;
5365	if (!pdev)
5366		return NULL;
5367
5368	dev_hold(dev);
5369	pci_dev_get(pdev);
5370	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5371	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5372	    (pdev->revision < 0x10)) {
5373		pci_dev_put(pdev);
5374		goto cnic_err;
5375	}
5376	pci_dev_put(pdev);
5377
5378	cdev = cnic_alloc_dev(dev, pdev);
5379	if (cdev == NULL)
5380		goto cnic_err;
5381
5382	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5383	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5384
5385	cp = cdev->cnic_priv;
5386	cp->ethdev = ethdev;
5387	cdev->pcidev = pdev;
5388	cp->chip_id = ethdev->chip_id;
5389
5390	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5391
5392	cp->cnic_ops = &cnic_bnx2_ops;
5393	cp->start_hw = cnic_start_bnx2_hw;
5394	cp->stop_hw = cnic_stop_bnx2_hw;
5395	cp->setup_pgtbl = cnic_setup_page_tbl;
5396	cp->alloc_resc = cnic_alloc_bnx2_resc;
5397	cp->free_resc = cnic_free_resc;
5398	cp->start_cm = cnic_cm_init_bnx2_hw;
5399	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5400	cp->enable_int = cnic_enable_bnx2_int;
5401	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5402	cp->close_conn = cnic_close_bnx2_conn;
5403	return cdev;
5404
5405cnic_err:
5406	dev_put(dev);
5407	return NULL;
5408}
5409
5410static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5411{
5412	struct pci_dev *pdev;
5413	struct cnic_dev *cdev;
5414	struct cnic_local *cp;
 
5415	struct cnic_eth_dev *ethdev = NULL;
5416	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5417
5418	probe = symbol_get(bnx2x_cnic_probe);
5419	if (probe) {
5420		ethdev = (*probe)(dev);
5421		symbol_put(bnx2x_cnic_probe);
5422	}
5423	if (!ethdev)
5424		return NULL;
5425
5426	pdev = ethdev->pdev;
5427	if (!pdev)
5428		return NULL;
5429
5430	dev_hold(dev);
5431	cdev = cnic_alloc_dev(dev, pdev);
5432	if (cdev == NULL) {
5433		dev_put(dev);
5434		return NULL;
5435	}
5436
5437	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5438	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5439
5440	cp = cdev->cnic_priv;
5441	cp->ethdev = ethdev;
5442	cdev->pcidev = pdev;
5443	cp->chip_id = ethdev->chip_id;
5444
5445	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5446
5447	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5448		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5449	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
5450	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5451		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
 
 
5452
5453	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5454		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5455
5456	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5457
5458	cp->cnic_ops = &cnic_bnx2x_ops;
5459	cp->start_hw = cnic_start_bnx2x_hw;
5460	cp->stop_hw = cnic_stop_bnx2x_hw;
5461	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5462	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5463	cp->free_resc = cnic_free_resc;
5464	cp->start_cm = cnic_cm_init_bnx2x_hw;
5465	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5466	cp->enable_int = cnic_enable_bnx2x_int;
5467	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5468	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
5469		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5470	else
 
5471		cp->ack_int = cnic_ack_bnx2x_msix;
 
 
5472	cp->close_conn = cnic_close_bnx2x_conn;
5473	return cdev;
5474}
5475
5476static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5477{
5478	struct ethtool_drvinfo drvinfo;
5479	struct cnic_dev *cdev = NULL;
5480
5481	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5482		memset(&drvinfo, 0, sizeof(drvinfo));
5483		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5484
5485		if (!strcmp(drvinfo.driver, "bnx2"))
5486			cdev = init_bnx2_cnic(dev);
5487		if (!strcmp(drvinfo.driver, "bnx2x"))
5488			cdev = init_bnx2x_cnic(dev);
5489		if (cdev) {
5490			write_lock(&cnic_dev_lock);
5491			list_add(&cdev->list, &cnic_dev_list);
5492			write_unlock(&cnic_dev_lock);
5493		}
5494	}
5495	return cdev;
5496}
5497
5498static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5499			      u16 vlan_id)
5500{
5501	int if_type;
5502
5503	rcu_read_lock();
5504	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5505		struct cnic_ulp_ops *ulp_ops;
5506		void *ctx;
5507
5508		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5509		if (!ulp_ops || !ulp_ops->indicate_netevent)
 
 
 
5510			continue;
 
5511
5512		ctx = cp->ulp_handle[if_type];
5513
 
 
 
5514		ulp_ops->indicate_netevent(ctx, event, vlan_id);
 
 
5515	}
5516	rcu_read_unlock();
5517}
5518
5519/**
5520 * netdev event handler
5521 */
5522static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5523							 void *ptr)
5524{
5525	struct net_device *netdev = ptr;
5526	struct cnic_dev *dev;
5527	int new_dev = 0;
5528
5529	dev = cnic_from_netdev(netdev);
5530
5531	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5532		/* Check for the hot-plug device */
5533		dev = is_cnic_dev(netdev);
5534		if (dev) {
5535			new_dev = 1;
5536			cnic_hold(dev);
5537		}
5538	}
5539	if (dev) {
5540		struct cnic_local *cp = dev->cnic_priv;
5541
5542		if (new_dev)
5543			cnic_ulp_init(dev);
5544		else if (event == NETDEV_UNREGISTER)
5545			cnic_ulp_exit(dev);
5546
5547		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5548			if (cnic_register_netdev(dev) != 0) {
5549				cnic_put(dev);
5550				goto done;
5551			}
5552			if (!cnic_start_hw(dev))
5553				cnic_ulp_start(dev);
5554		}
5555
5556		cnic_rcv_netevent(cp, event, 0);
5557
5558		if (event == NETDEV_GOING_DOWN) {
5559			cnic_ulp_stop(dev);
5560			cnic_stop_hw(dev);
5561			cnic_unregister_netdev(dev);
5562		} else if (event == NETDEV_UNREGISTER) {
5563			write_lock(&cnic_dev_lock);
5564			list_del_init(&dev->list);
5565			write_unlock(&cnic_dev_lock);
5566
5567			cnic_put(dev);
5568			cnic_free_dev(dev);
5569			goto done;
5570		}
5571		cnic_put(dev);
5572	} else {
5573		struct net_device *realdev;
5574		u16 vid;
5575
5576		vid = cnic_get_vlan(netdev, &realdev);
5577		if (realdev) {
5578			dev = cnic_from_netdev(realdev);
5579			if (dev) {
5580				vid |= VLAN_TAG_PRESENT;
5581				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5582				cnic_put(dev);
5583			}
5584		}
5585	}
5586done:
5587	return NOTIFY_DONE;
5588}
5589
5590static struct notifier_block cnic_netdev_notifier = {
5591	.notifier_call = cnic_netdev_event
5592};
5593
5594static void cnic_release(void)
5595{
5596	struct cnic_dev *dev;
5597	struct cnic_uio_dev *udev;
5598
5599	while (!list_empty(&cnic_dev_list)) {
5600		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5601		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5602			cnic_ulp_stop(dev);
5603			cnic_stop_hw(dev);
5604		}
5605
5606		cnic_ulp_exit(dev);
5607		cnic_unregister_netdev(dev);
5608		list_del_init(&dev->list);
5609		cnic_free_dev(dev);
5610	}
5611	while (!list_empty(&cnic_udev_list)) {
5612		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5613				  list);
5614		cnic_free_uio(udev);
5615	}
5616}
5617
5618static int __init cnic_init(void)
5619{
5620	int rc = 0;
5621
5622	pr_info("%s", version);
5623
5624	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5625	if (rc) {
5626		cnic_release();
5627		return rc;
5628	}
5629
5630	cnic_wq = create_singlethread_workqueue("cnic_wq");
5631	if (!cnic_wq) {
5632		cnic_release();
5633		unregister_netdevice_notifier(&cnic_netdev_notifier);
5634		return -ENOMEM;
5635	}
5636
5637	return 0;
5638}
5639
5640static void __exit cnic_exit(void)
5641{
5642	unregister_netdevice_notifier(&cnic_netdev_notifier);
5643	cnic_release();
5644	destroy_workqueue(cnic_wq);
5645}
5646
5647module_init(cnic_init);
5648module_exit(cnic_exit);