Linux Audio

Check our new training course

Loading...
v6.8
   1/* cnic.c: QLogic CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
  12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/module.h>
  18
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/pci.h>
  24#include <linux/init.h>
  25#include <linux/netdevice.h>
  26#include <linux/uio_driver.h>
  27#include <linux/in.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/delay.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/random.h>
  34#if IS_ENABLED(CONFIG_VLAN_8021Q)
  35#define BCM_VLAN 1
  36#endif
  37#include <net/ip.h>
  38#include <net/tcp.h>
  39#include <net/route.h>
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <net/ip6_checksum.h>
  43#include <scsi/iscsi_if.h>
  44
  45#define BCM_CNIC	1
  46#include "cnic_if.h"
  47#include "bnx2.h"
  48#include "bnx2x/bnx2x.h"
  49#include "bnx2x/bnx2x_reg.h"
  50#include "bnx2x/bnx2x_fw_defs.h"
  51#include "bnx2x/bnx2x_hsi.h"
  52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
  53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
  54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
  55#include "cnic.h"
  56#include "cnic_defs.h"
  57
  58#define CNIC_MODULE_NAME	"cnic"
  59
  60static char version[] =
  61	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  62
  63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  64	      "Chen (zongxi@broadcom.com");
  65MODULE_DESCRIPTION("QLogic cnic Driver");
  66MODULE_LICENSE("GPL");
  67MODULE_VERSION(CNIC_MODULE_VERSION);
  68
  69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  70static LIST_HEAD(cnic_dev_list);
  71static LIST_HEAD(cnic_udev_list);
  72static DEFINE_RWLOCK(cnic_dev_lock);
  73static DEFINE_MUTEX(cnic_lock);
  74
  75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  76
  77/* helper function, assuming cnic_lock is held */
  78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  79{
  80	return rcu_dereference_protected(cnic_ulp_tbl[type],
  81					 lockdep_is_held(&cnic_lock));
  82}
  83
  84static int cnic_service_bnx2(void *, void *);
  85static int cnic_service_bnx2x(void *, void *);
  86static int cnic_ctl(void *, struct cnic_ctl_info *);
  87
  88static struct cnic_ops cnic_bnx2_ops = {
  89	.cnic_owner	= THIS_MODULE,
  90	.cnic_handler	= cnic_service_bnx2,
  91	.cnic_ctl	= cnic_ctl,
  92};
  93
  94static struct cnic_ops cnic_bnx2x_ops = {
  95	.cnic_owner	= THIS_MODULE,
  96	.cnic_handler	= cnic_service_bnx2x,
  97	.cnic_ctl	= cnic_ctl,
  98};
  99
 100static struct workqueue_struct *cnic_wq;
 101
 102static void cnic_shutdown_rings(struct cnic_dev *);
 103static void cnic_init_rings(struct cnic_dev *);
 104static int cnic_cm_set_pg(struct cnic_sock *);
 105
 106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 107{
 108	struct cnic_uio_dev *udev = uinfo->priv;
 109	struct cnic_dev *dev;
 110
 111	if (!capable(CAP_NET_ADMIN))
 112		return -EPERM;
 113
 114	if (udev->uio_dev != -1)
 115		return -EBUSY;
 116
 117	rtnl_lock();
 118	dev = udev->dev;
 119
 120	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 121		rtnl_unlock();
 122		return -ENODEV;
 123	}
 124
 125	udev->uio_dev = iminor(inode);
 126
 127	cnic_shutdown_rings(dev);
 128	cnic_init_rings(dev);
 129	rtnl_unlock();
 130
 131	return 0;
 132}
 133
 134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 135{
 136	struct cnic_uio_dev *udev = uinfo->priv;
 137
 138	udev->uio_dev = -1;
 139	return 0;
 140}
 141
 142static inline void cnic_hold(struct cnic_dev *dev)
 143{
 144	atomic_inc(&dev->ref_count);
 145}
 146
 147static inline void cnic_put(struct cnic_dev *dev)
 148{
 149	atomic_dec(&dev->ref_count);
 150}
 151
 152static inline void csk_hold(struct cnic_sock *csk)
 153{
 154	atomic_inc(&csk->ref_count);
 155}
 156
 157static inline void csk_put(struct cnic_sock *csk)
 158{
 159	atomic_dec(&csk->ref_count);
 160}
 161
 162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 163{
 164	struct cnic_dev *cdev;
 165
 166	read_lock(&cnic_dev_lock);
 167	list_for_each_entry(cdev, &cnic_dev_list, list) {
 168		if (netdev == cdev->netdev) {
 169			cnic_hold(cdev);
 170			read_unlock(&cnic_dev_lock);
 171			return cdev;
 172		}
 173	}
 174	read_unlock(&cnic_dev_lock);
 175	return NULL;
 176}
 177
 178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 179{
 180	atomic_inc(&ulp_ops->ref_count);
 181}
 182
 183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 184{
 185	atomic_dec(&ulp_ops->ref_count);
 186}
 187
 188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 189{
 190	struct cnic_local *cp = dev->cnic_priv;
 191	struct cnic_eth_dev *ethdev = cp->ethdev;
 192	struct drv_ctl_info info;
 193	struct drv_ctl_io *io = &info.data.io;
 194
 195	memset(&info, 0, sizeof(struct drv_ctl_info));
 196	info.cmd = DRV_CTL_CTX_WR_CMD;
 197	io->cid_addr = cid_addr;
 198	io->offset = off;
 199	io->data = val;
 200	ethdev->drv_ctl(dev->netdev, &info);
 201}
 202
 203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 204{
 205	struct cnic_local *cp = dev->cnic_priv;
 206	struct cnic_eth_dev *ethdev = cp->ethdev;
 207	struct drv_ctl_info info;
 208	struct drv_ctl_io *io = &info.data.io;
 209
 210	memset(&info, 0, sizeof(struct drv_ctl_info));
 211	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 212	io->offset = off;
 213	io->dma_addr = addr;
 214	ethdev->drv_ctl(dev->netdev, &info);
 215}
 216
 217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 218{
 219	struct cnic_local *cp = dev->cnic_priv;
 220	struct cnic_eth_dev *ethdev = cp->ethdev;
 221	struct drv_ctl_info info;
 222	struct drv_ctl_l2_ring *ring = &info.data.ring;
 223
 224	memset(&info, 0, sizeof(struct drv_ctl_info));
 225	if (start)
 226		info.cmd = DRV_CTL_START_L2_CMD;
 227	else
 228		info.cmd = DRV_CTL_STOP_L2_CMD;
 229
 230	ring->cid = cid;
 231	ring->client_id = cl_id;
 232	ethdev->drv_ctl(dev->netdev, &info);
 233}
 234
 235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 236{
 237	struct cnic_local *cp = dev->cnic_priv;
 238	struct cnic_eth_dev *ethdev = cp->ethdev;
 239	struct drv_ctl_info info;
 240	struct drv_ctl_io *io = &info.data.io;
 241
 242	memset(&info, 0, sizeof(struct drv_ctl_info));
 243	info.cmd = DRV_CTL_IO_WR_CMD;
 244	io->offset = off;
 245	io->data = val;
 246	ethdev->drv_ctl(dev->netdev, &info);
 247}
 248
 249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 250{
 251	struct cnic_local *cp = dev->cnic_priv;
 252	struct cnic_eth_dev *ethdev = cp->ethdev;
 253	struct drv_ctl_info info;
 254	struct drv_ctl_io *io = &info.data.io;
 255
 256	memset(&info, 0, sizeof(struct drv_ctl_info));
 257	info.cmd = DRV_CTL_IO_RD_CMD;
 258	io->offset = off;
 259	ethdev->drv_ctl(dev->netdev, &info);
 260	return io->data;
 261}
 262
 263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 264{
 265	struct cnic_local *cp = dev->cnic_priv;
 266	struct cnic_eth_dev *ethdev = cp->ethdev;
 267	struct drv_ctl_info info;
 268	struct fcoe_capabilities *fcoe_cap =
 269		&info.data.register_data.fcoe_features;
 270
 271	memset(&info, 0, sizeof(struct drv_ctl_info));
 272	if (reg) {
 273		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 274		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
 275			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
 276	} else {
 277		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 278	}
 279
 280	info.data.ulp_type = ulp_type;
 281	info.drv_state = state;
 282	ethdev->drv_ctl(dev->netdev, &info);
 283}
 284
 285static int cnic_in_use(struct cnic_sock *csk)
 286{
 287	return test_bit(SK_F_INUSE, &csk->flags);
 288}
 289
 290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 291{
 292	struct cnic_local *cp = dev->cnic_priv;
 293	struct cnic_eth_dev *ethdev = cp->ethdev;
 294	struct drv_ctl_info info;
 295
 296	memset(&info, 0, sizeof(struct drv_ctl_info));
 297	info.cmd = cmd;
 298	info.data.credit.credit_count = count;
 299	ethdev->drv_ctl(dev->netdev, &info);
 300}
 301
 302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 303{
 304	u32 i;
 305
 306	if (!cp->ctx_tbl)
 307		return -EINVAL;
 308
 309	for (i = 0; i < cp->max_cid_space; i++) {
 310		if (cp->ctx_tbl[i].cid == cid) {
 311			*l5_cid = i;
 312			return 0;
 313		}
 314	}
 315	return -EINVAL;
 316}
 317
 318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 319			   struct cnic_sock *csk)
 320{
 321	struct iscsi_path path_req;
 322	char *buf = NULL;
 323	u16 len = 0;
 324	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 325	struct cnic_ulp_ops *ulp_ops;
 326	struct cnic_uio_dev *udev = cp->udev;
 327	int rc = 0, retry = 0;
 328
 329	if (!udev || udev->uio_dev == -1)
 330		return -ENODEV;
 331
 332	if (csk) {
 333		len = sizeof(path_req);
 334		buf = (char *) &path_req;
 335		memset(&path_req, 0, len);
 336
 337		msg_type = ISCSI_KEVENT_PATH_REQ;
 338		path_req.handle = (u64) csk->l5_cid;
 339		if (test_bit(SK_F_IPV6, &csk->flags)) {
 340			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 341			       sizeof(struct in6_addr));
 342			path_req.ip_addr_len = 16;
 343		} else {
 344			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 345			       sizeof(struct in_addr));
 346			path_req.ip_addr_len = 4;
 347		}
 348		path_req.vlan_id = csk->vlan_id;
 349		path_req.pmtu = csk->mtu;
 350	}
 351
 352	while (retry < 3) {
 353		rc = 0;
 354		rcu_read_lock();
 355		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
 356		if (ulp_ops)
 357			rc = ulp_ops->iscsi_nl_send_msg(
 358				cp->ulp_handle[CNIC_ULP_ISCSI],
 359				msg_type, buf, len);
 360		rcu_read_unlock();
 361		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 362			break;
 363
 364		msleep(100);
 365		retry++;
 366	}
 367	return rc;
 368}
 369
 370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 371
 372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 373				  char *buf, u16 len)
 374{
 375	int rc = -EINVAL;
 376
 377	switch (msg_type) {
 378	case ISCSI_UEVENT_PATH_UPDATE: {
 379		struct cnic_local *cp;
 380		u32 l5_cid;
 381		struct cnic_sock *csk;
 382		struct iscsi_path *path_resp;
 383
 384		if (len < sizeof(*path_resp))
 385			break;
 386
 387		path_resp = (struct iscsi_path *) buf;
 388		cp = dev->cnic_priv;
 389		l5_cid = (u32) path_resp->handle;
 390		if (l5_cid >= MAX_CM_SK_TBL_SZ)
 391			break;
 392
 393		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
 394			rc = -ENODEV;
 395			break;
 396		}
 397		csk = &cp->csk_tbl[l5_cid];
 398		csk_hold(csk);
 399		if (cnic_in_use(csk) &&
 400		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 401
 402			csk->vlan_id = path_resp->vlan_id;
 403
 404			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
 405			if (test_bit(SK_F_IPV6, &csk->flags))
 406				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 407				       sizeof(struct in6_addr));
 408			else
 409				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 410				       sizeof(struct in_addr));
 411
 412			if (is_valid_ether_addr(csk->ha)) {
 413				cnic_cm_set_pg(csk);
 414			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 415				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 416
 417				cnic_cm_upcall(cp, csk,
 418					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 419				clear_bit(SK_F_CONNECT_START, &csk->flags);
 420			}
 421		}
 422		csk_put(csk);
 423		rc = 0;
 424	}
 425	}
 426
 427	return rc;
 428}
 429
 430static int cnic_offld_prep(struct cnic_sock *csk)
 431{
 432	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 433		return 0;
 434
 435	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 436		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 437		return 0;
 438	}
 439
 440	return 1;
 441}
 442
 443static int cnic_close_prep(struct cnic_sock *csk)
 444{
 445	clear_bit(SK_F_CONNECT_START, &csk->flags);
 446	smp_mb__after_atomic();
 447
 448	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 449		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 450			msleep(1);
 451
 452		return 1;
 453	}
 454	return 0;
 455}
 456
 457static int cnic_abort_prep(struct cnic_sock *csk)
 458{
 459	clear_bit(SK_F_CONNECT_START, &csk->flags);
 460	smp_mb__after_atomic();
 461
 462	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 463		msleep(1);
 464
 465	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 466		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 467		return 1;
 468	}
 469
 470	return 0;
 471}
 472
 473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 474{
 475	struct cnic_dev *dev;
 476
 477	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 478		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 479		return -EINVAL;
 480	}
 481	mutex_lock(&cnic_lock);
 482	if (cnic_ulp_tbl_prot(ulp_type)) {
 483		pr_err("%s: Type %d has already been registered\n",
 484		       __func__, ulp_type);
 485		mutex_unlock(&cnic_lock);
 486		return -EBUSY;
 487	}
 488
 489	read_lock(&cnic_dev_lock);
 490	list_for_each_entry(dev, &cnic_dev_list, list) {
 491		struct cnic_local *cp = dev->cnic_priv;
 492
 493		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 494	}
 495	read_unlock(&cnic_dev_lock);
 496
 497	atomic_set(&ulp_ops->ref_count, 0);
 498	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 499	mutex_unlock(&cnic_lock);
 500
 501	/* Prevent race conditions with netdev_event */
 502	rtnl_lock();
 503	list_for_each_entry(dev, &cnic_dev_list, list) {
 504		struct cnic_local *cp = dev->cnic_priv;
 505
 506		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 507			ulp_ops->cnic_init(dev);
 508	}
 509	rtnl_unlock();
 510
 511	return 0;
 512}
 513
 514int cnic_unregister_driver(int ulp_type)
 515{
 516	struct cnic_dev *dev;
 517	struct cnic_ulp_ops *ulp_ops;
 518	int i = 0;
 519
 520	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 521		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 522		return -EINVAL;
 523	}
 524	mutex_lock(&cnic_lock);
 525	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 526	if (!ulp_ops) {
 527		pr_err("%s: Type %d has not been registered\n",
 528		       __func__, ulp_type);
 529		goto out_unlock;
 530	}
 531	read_lock(&cnic_dev_lock);
 532	list_for_each_entry(dev, &cnic_dev_list, list) {
 533		struct cnic_local *cp = dev->cnic_priv;
 534
 535		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 536			pr_err("%s: Type %d still has devices registered\n",
 537			       __func__, ulp_type);
 538			read_unlock(&cnic_dev_lock);
 539			goto out_unlock;
 540		}
 541	}
 542	read_unlock(&cnic_dev_lock);
 543
 544	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 545
 546	mutex_unlock(&cnic_lock);
 547	synchronize_rcu();
 548	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 549		msleep(100);
 550		i++;
 551	}
 552
 553	if (atomic_read(&ulp_ops->ref_count) != 0)
 554		pr_warn("%s: Failed waiting for ref count to go to zero\n",
 555			__func__);
 556	return 0;
 557
 558out_unlock:
 559	mutex_unlock(&cnic_lock);
 560	return -EINVAL;
 561}
 562
 563static int cnic_start_hw(struct cnic_dev *);
 564static void cnic_stop_hw(struct cnic_dev *);
 565
 566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 567				void *ulp_ctx)
 568{
 569	struct cnic_local *cp = dev->cnic_priv;
 570	struct cnic_ulp_ops *ulp_ops;
 571
 572	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 573		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 574		return -EINVAL;
 575	}
 576	mutex_lock(&cnic_lock);
 577	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 578		pr_err("%s: Driver with type %d has not been registered\n",
 579		       __func__, ulp_type);
 580		mutex_unlock(&cnic_lock);
 581		return -EAGAIN;
 582	}
 583	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 584		pr_err("%s: Type %d has already been registered to this device\n",
 585		       __func__, ulp_type);
 586		mutex_unlock(&cnic_lock);
 587		return -EBUSY;
 588	}
 589
 590	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 591	cp->ulp_handle[ulp_type] = ulp_ctx;
 592	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 593	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 594	cnic_hold(dev);
 595
 596	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 597		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 598			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 599
 600	mutex_unlock(&cnic_lock);
 601
 602	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 603
 604	return 0;
 605
 606}
 607EXPORT_SYMBOL(cnic_register_driver);
 608
 609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 610{
 611	struct cnic_local *cp = dev->cnic_priv;
 612	int i = 0;
 613
 614	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 615		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 616		return -EINVAL;
 617	}
 618
 619	if (ulp_type == CNIC_ULP_ISCSI)
 620		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 621
 622	mutex_lock(&cnic_lock);
 623	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 624		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 625		cnic_put(dev);
 626	} else {
 627		pr_err("%s: device not registered to this ulp type %d\n",
 628		       __func__, ulp_type);
 629		mutex_unlock(&cnic_lock);
 630		return -EINVAL;
 631	}
 632	mutex_unlock(&cnic_lock);
 633
 634	if (ulp_type == CNIC_ULP_FCOE)
 635		dev->fcoe_cap = NULL;
 636
 637	synchronize_rcu();
 638
 639	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 640	       i < 20) {
 641		msleep(100);
 642		i++;
 643	}
 644	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 645		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 646
 647	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 648		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
 649	else
 650		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 651
 652	return 0;
 653}
 654EXPORT_SYMBOL(cnic_unregister_driver);
 655
 656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 657			    u32 next)
 658{
 659	id_tbl->start = start_id;
 660	id_tbl->max = size;
 661	id_tbl->next = next;
 662	spin_lock_init(&id_tbl->lock);
 663	id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
 664	if (!id_tbl->table)
 665		return -ENOMEM;
 666
 667	return 0;
 668}
 669
 670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 671{
 672	bitmap_free(id_tbl->table);
 673	id_tbl->table = NULL;
 674}
 675
 676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 677{
 678	int ret = -1;
 679
 680	id -= id_tbl->start;
 681	if (id >= id_tbl->max)
 682		return ret;
 683
 684	spin_lock(&id_tbl->lock);
 685	if (!test_bit(id, id_tbl->table)) {
 686		set_bit(id, id_tbl->table);
 687		ret = 0;
 688	}
 689	spin_unlock(&id_tbl->lock);
 690	return ret;
 691}
 692
 693/* Returns -1 if not successful */
 694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 695{
 696	u32 id;
 697
 698	spin_lock(&id_tbl->lock);
 699	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 700	if (id >= id_tbl->max) {
 701		id = -1;
 702		if (id_tbl->next != 0) {
 703			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 704			if (id >= id_tbl->next)
 705				id = -1;
 706		}
 707	}
 708
 709	if (id < id_tbl->max) {
 710		set_bit(id, id_tbl->table);
 711		id_tbl->next = (id + 1) & (id_tbl->max - 1);
 712		id += id_tbl->start;
 713	}
 714
 715	spin_unlock(&id_tbl->lock);
 716
 717	return id;
 718}
 719
 720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 721{
 722	if (id == -1)
 723		return;
 724
 725	id -= id_tbl->start;
 726	if (id >= id_tbl->max)
 727		return;
 728
 729	clear_bit(id, id_tbl->table);
 730}
 731
 732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 733{
 734	int i;
 735
 736	if (!dma->pg_arr)
 737		return;
 738
 739	for (i = 0; i < dma->num_pages; i++) {
 740		if (dma->pg_arr[i]) {
 741			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
 742					  dma->pg_arr[i], dma->pg_map_arr[i]);
 743			dma->pg_arr[i] = NULL;
 744		}
 745	}
 746	if (dma->pgtbl) {
 747		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 748				  dma->pgtbl, dma->pgtbl_map);
 749		dma->pgtbl = NULL;
 750	}
 751	kfree(dma->pg_arr);
 752	dma->pg_arr = NULL;
 753	dma->num_pages = 0;
 754}
 755
 756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 757{
 758	int i;
 759	__le32 *page_table = (__le32 *) dma->pgtbl;
 760
 761	for (i = 0; i < dma->num_pages; i++) {
 762		/* Each entry needs to be in big endian format. */
 763		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 764		page_table++;
 765		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 766		page_table++;
 767	}
 768}
 769
 770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 771{
 772	int i;
 773	__le32 *page_table = (__le32 *) dma->pgtbl;
 774
 775	for (i = 0; i < dma->num_pages; i++) {
 776		/* Each entry needs to be in little endian format. */
 777		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 778		page_table++;
 779		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 780		page_table++;
 781	}
 782}
 783
 784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 785			  int pages, int use_pg_tbl)
 786{
 787	int i, size;
 788	struct cnic_local *cp = dev->cnic_priv;
 789
 790	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 791	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 792	if (dma->pg_arr == NULL)
 793		return -ENOMEM;
 794
 795	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 796	dma->num_pages = pages;
 797
 798	for (i = 0; i < pages; i++) {
 799		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 800						    CNIC_PAGE_SIZE,
 801						    &dma->pg_map_arr[i],
 802						    GFP_ATOMIC);
 803		if (dma->pg_arr[i] == NULL)
 804			goto error;
 805	}
 806	if (!use_pg_tbl)
 807		return 0;
 808
 809	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
 810			  ~(CNIC_PAGE_SIZE - 1);
 811	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 812					&dma->pgtbl_map, GFP_ATOMIC);
 813	if (dma->pgtbl == NULL)
 814		goto error;
 815
 816	cp->setup_pgtbl(dev, dma);
 817
 818	return 0;
 819
 820error:
 821	cnic_free_dma(dev, dma);
 822	return -ENOMEM;
 823}
 824
 825static void cnic_free_context(struct cnic_dev *dev)
 826{
 827	struct cnic_local *cp = dev->cnic_priv;
 828	int i;
 829
 830	for (i = 0; i < cp->ctx_blks; i++) {
 831		if (cp->ctx_arr[i].ctx) {
 832			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 833					  cp->ctx_arr[i].ctx,
 834					  cp->ctx_arr[i].mapping);
 835			cp->ctx_arr[i].ctx = NULL;
 836		}
 837	}
 838}
 839
 840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 841{
 842	if (udev->l2_buf) {
 843		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 844				  udev->l2_buf, udev->l2_buf_map);
 845		udev->l2_buf = NULL;
 846	}
 847
 848	if (udev->l2_ring) {
 849		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 850				  udev->l2_ring, udev->l2_ring_map);
 851		udev->l2_ring = NULL;
 852	}
 853
 854}
 855
 856static void __cnic_free_uio(struct cnic_uio_dev *udev)
 857{
 858	uio_unregister_device(&udev->cnic_uinfo);
 859
 860	__cnic_free_uio_rings(udev);
 861
 862	pci_dev_put(udev->pdev);
 863	kfree(udev);
 864}
 865
 866static void cnic_free_uio(struct cnic_uio_dev *udev)
 867{
 868	if (!udev)
 869		return;
 870
 871	write_lock(&cnic_dev_lock);
 872	list_del_init(&udev->list);
 873	write_unlock(&cnic_dev_lock);
 874	__cnic_free_uio(udev);
 875}
 876
 877static void cnic_free_resc(struct cnic_dev *dev)
 878{
 879	struct cnic_local *cp = dev->cnic_priv;
 880	struct cnic_uio_dev *udev = cp->udev;
 881
 882	if (udev) {
 883		udev->dev = NULL;
 884		cp->udev = NULL;
 885		if (udev->uio_dev == -1)
 886			__cnic_free_uio_rings(udev);
 887	}
 888
 889	cnic_free_context(dev);
 890	kfree(cp->ctx_arr);
 891	cp->ctx_arr = NULL;
 892	cp->ctx_blks = 0;
 893
 894	cnic_free_dma(dev, &cp->gbl_buf_info);
 895	cnic_free_dma(dev, &cp->kwq_info);
 896	cnic_free_dma(dev, &cp->kwq_16_data_info);
 897	cnic_free_dma(dev, &cp->kcq2.dma);
 898	cnic_free_dma(dev, &cp->kcq1.dma);
 899	kfree(cp->iscsi_tbl);
 900	cp->iscsi_tbl = NULL;
 901	kfree(cp->ctx_tbl);
 902	cp->ctx_tbl = NULL;
 903
 904	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 905	cnic_free_id_tbl(&cp->cid_tbl);
 906}
 907
 908static int cnic_alloc_context(struct cnic_dev *dev)
 909{
 910	struct cnic_local *cp = dev->cnic_priv;
 911
 912	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 913		int i, k, arr_size;
 914
 915		cp->ctx_blk_size = CNIC_PAGE_SIZE;
 916		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
 917		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 918			   sizeof(struct cnic_ctx);
 919		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 920		if (cp->ctx_arr == NULL)
 921			return -ENOMEM;
 922
 923		k = 0;
 924		for (i = 0; i < 2; i++) {
 925			u32 j, reg, off, lo, hi;
 926
 927			if (i == 0)
 928				off = BNX2_PG_CTX_MAP;
 929			else
 930				off = BNX2_ISCSI_CTX_MAP;
 931
 932			reg = cnic_reg_rd_ind(dev, off);
 933			lo = reg >> 16;
 934			hi = reg & 0xffff;
 935			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 936				cp->ctx_arr[k].cid = j;
 937		}
 938
 939		cp->ctx_blks = k;
 940		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 941			cp->ctx_blks = 0;
 942			return -ENOMEM;
 943		}
 944
 945		for (i = 0; i < cp->ctx_blks; i++) {
 946			cp->ctx_arr[i].ctx =
 947				dma_alloc_coherent(&dev->pcidev->dev,
 948						   CNIC_PAGE_SIZE,
 949						   &cp->ctx_arr[i].mapping,
 950						   GFP_KERNEL);
 951			if (cp->ctx_arr[i].ctx == NULL)
 952				return -ENOMEM;
 953		}
 954	}
 955	return 0;
 956}
 957
 958static u16 cnic_bnx2_next_idx(u16 idx)
 959{
 960	return idx + 1;
 961}
 962
 963static u16 cnic_bnx2_hw_idx(u16 idx)
 964{
 965	return idx;
 966}
 967
 968static u16 cnic_bnx2x_next_idx(u16 idx)
 969{
 970	idx++;
 971	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 972		idx++;
 973
 974	return idx;
 975}
 976
 977static u16 cnic_bnx2x_hw_idx(u16 idx)
 978{
 979	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 980		idx++;
 981	return idx;
 982}
 983
 984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 985			  bool use_pg_tbl)
 986{
 987	int err, i, use_page_tbl = 0;
 988	struct kcqe **kcq;
 989
 990	if (use_pg_tbl)
 991		use_page_tbl = 1;
 992
 993	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 994	if (err)
 995		return err;
 996
 997	kcq = (struct kcqe **) info->dma.pg_arr;
 998	info->kcq = kcq;
 999
1000	info->next_idx = cnic_bnx2_next_idx;
1001	info->hw_idx = cnic_bnx2_hw_idx;
1002	if (use_pg_tbl)
1003		return 0;
1004
1005	info->next_idx = cnic_bnx2x_next_idx;
1006	info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008	for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009		struct bnx2x_bd_chain_next *next =
1010			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011		int j = i + 1;
1012
1013		if (j >= KCQ_PAGE_CNT)
1014			j = 0;
1015		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017	}
1018	return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023	struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025	if (udev->l2_ring)
1026		return 0;
1027
1028	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030					   &udev->l2_ring_map, GFP_KERNEL);
 
1031	if (!udev->l2_ring)
1032		return -ENOMEM;
1033
1034	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037					  &udev->l2_buf_map, GFP_KERNEL);
 
1038	if (!udev->l2_buf) {
1039		__cnic_free_uio_rings(udev);
1040		return -ENOMEM;
1041	}
1042
1043	return 0;
1044
1045}
1046
1047static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1048{
1049	struct cnic_local *cp = dev->cnic_priv;
1050	struct cnic_uio_dev *udev;
1051
1052	list_for_each_entry(udev, &cnic_udev_list, list) {
1053		if (udev->pdev == dev->pcidev) {
1054			udev->dev = dev;
1055			if (__cnic_alloc_uio_rings(udev, pages)) {
1056				udev->dev = NULL;
1057				return -ENOMEM;
1058			}
1059			cp->udev = udev;
1060			return 0;
1061		}
1062	}
1063
1064	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1065	if (!udev)
1066		return -ENOMEM;
1067
1068	udev->uio_dev = -1;
1069
1070	udev->dev = dev;
1071	udev->pdev = dev->pcidev;
1072
1073	if (__cnic_alloc_uio_rings(udev, pages))
1074		goto err_udev;
1075
1076	list_add(&udev->list, &cnic_udev_list);
1077
1078	pci_dev_get(udev->pdev);
1079
1080	cp->udev = udev;
1081
1082	return 0;
1083
1084 err_udev:
1085	kfree(udev);
1086	return -ENOMEM;
1087}
1088
1089static int cnic_init_uio(struct cnic_dev *dev)
1090{
1091	struct cnic_local *cp = dev->cnic_priv;
1092	struct cnic_uio_dev *udev = cp->udev;
1093	struct uio_info *uinfo;
1094	int ret = 0;
1095
1096	if (!udev)
1097		return -ENOMEM;
1098
1099	uinfo = &udev->cnic_uinfo;
1100
1101	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102	uinfo->mem[0].internal_addr = dev->regview;
1103	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1104
1105	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107						     TX_MAX_TSS_RINGS + 1);
1108		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1109					CNIC_PAGE_MASK;
1110		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1111			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1112		else
1113			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1114
1115		uinfo->name = "bnx2_cnic";
1116	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1117		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1118
1119		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1120			CNIC_PAGE_MASK;
1121		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1122
1123		uinfo->name = "bnx2x_cnic";
1124	}
1125
1126	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1127
1128	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1129	uinfo->mem[2].size = udev->l2_ring_size;
1130	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1131
1132	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1133	uinfo->mem[3].size = udev->l2_buf_size;
1134	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1135
1136	uinfo->version = CNIC_MODULE_VERSION;
1137	uinfo->irq = UIO_IRQ_CUSTOM;
1138
1139	uinfo->open = cnic_uio_open;
1140	uinfo->release = cnic_uio_close;
1141
1142	if (udev->uio_dev == -1) {
1143		if (!uinfo->priv) {
1144			uinfo->priv = udev;
1145
1146			ret = uio_register_device(&udev->pdev->dev, uinfo);
1147		}
1148	} else {
1149		cnic_init_rings(dev);
1150	}
1151
1152	return ret;
1153}
1154
1155static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1156{
1157	struct cnic_local *cp = dev->cnic_priv;
1158	int ret;
1159
1160	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1161	if (ret)
1162		goto error;
1163	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1164
1165	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1166	if (ret)
1167		goto error;
1168
1169	ret = cnic_alloc_context(dev);
1170	if (ret)
1171		goto error;
1172
1173	ret = cnic_alloc_uio_rings(dev, 2);
1174	if (ret)
1175		goto error;
1176
1177	ret = cnic_init_uio(dev);
1178	if (ret)
1179		goto error;
1180
1181	return 0;
1182
1183error:
1184	cnic_free_resc(dev);
1185	return ret;
1186}
1187
1188static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1189{
1190	struct cnic_local *cp = dev->cnic_priv;
1191	struct bnx2x *bp = netdev_priv(dev->netdev);
1192	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1193	int total_mem, blks, i;
1194
1195	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1196	blks = total_mem / ctx_blk_size;
1197	if (total_mem % ctx_blk_size)
1198		blks++;
1199
1200	if (blks > cp->ethdev->ctx_tbl_len)
1201		return -ENOMEM;
1202
1203	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1204	if (cp->ctx_arr == NULL)
1205		return -ENOMEM;
1206
1207	cp->ctx_blks = blks;
1208	cp->ctx_blk_size = ctx_blk_size;
1209	if (!CHIP_IS_E1(bp))
1210		cp->ctx_align = 0;
1211	else
1212		cp->ctx_align = ctx_blk_size;
1213
1214	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1215
1216	for (i = 0; i < blks; i++) {
1217		cp->ctx_arr[i].ctx =
1218			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1219					   &cp->ctx_arr[i].mapping,
1220					   GFP_KERNEL);
1221		if (cp->ctx_arr[i].ctx == NULL)
1222			return -ENOMEM;
1223
1224		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1225			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1226				cnic_free_context(dev);
1227				cp->ctx_blk_size += cp->ctx_align;
1228				i = -1;
1229				continue;
1230			}
1231		}
1232	}
1233	return 0;
1234}
1235
1236static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1237{
1238	struct cnic_local *cp = dev->cnic_priv;
1239	struct bnx2x *bp = netdev_priv(dev->netdev);
1240	struct cnic_eth_dev *ethdev = cp->ethdev;
1241	u32 start_cid = ethdev->starting_cid;
1242	int i, j, n, ret, pages;
1243	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1244
1245	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1246	cp->iscsi_start_cid = start_cid;
1247	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1248
1249	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1250		cp->max_cid_space += dev->max_fcoe_conn;
1251		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1252		if (!cp->fcoe_init_cid)
1253			cp->fcoe_init_cid = 0x10;
1254	}
1255
1256	cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1257				GFP_KERNEL);
1258	if (!cp->iscsi_tbl)
1259		goto error;
1260
1261	cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1262			      GFP_KERNEL);
1263	if (!cp->ctx_tbl)
1264		goto error;
1265
1266	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1267		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1268		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1269	}
1270
1271	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1272		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1273
1274	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1275		CNIC_PAGE_SIZE;
1276
1277	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1278	if (ret)
1279		goto error;
1280
1281	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1282	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1283		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1284
1285		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1286		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1287						   off;
1288
1289		if ((i % n) == (n - 1))
1290			j++;
1291	}
1292
1293	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1294	if (ret)
1295		goto error;
1296
1297	if (CNIC_SUPPORTS_FCOE(bp)) {
1298		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1299		if (ret)
1300			goto error;
1301	}
1302
1303	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1304	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1305	if (ret)
1306		goto error;
1307
1308	ret = cnic_alloc_bnx2x_context(dev);
1309	if (ret)
1310		goto error;
1311
1312	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1313		return 0;
1314
1315	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1316
1317	cp->l2_rx_ring_size = 15;
1318
1319	ret = cnic_alloc_uio_rings(dev, 4);
1320	if (ret)
1321		goto error;
1322
1323	ret = cnic_init_uio(dev);
1324	if (ret)
1325		goto error;
1326
1327	return 0;
1328
1329error:
1330	cnic_free_resc(dev);
1331	return -ENOMEM;
1332}
1333
1334static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1335{
1336	return cp->max_kwq_idx -
1337		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1338}
1339
1340static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1341				  u32 num_wqes)
1342{
1343	struct cnic_local *cp = dev->cnic_priv;
1344	struct kwqe *prod_qe;
1345	u16 prod, sw_prod, i;
1346
1347	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1348		return -EAGAIN;		/* bnx2 is down */
1349
1350	spin_lock_bh(&cp->cnic_ulp_lock);
1351	if (num_wqes > cnic_kwq_avail(cp) &&
1352	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1353		spin_unlock_bh(&cp->cnic_ulp_lock);
1354		return -EAGAIN;
1355	}
1356
1357	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1358
1359	prod = cp->kwq_prod_idx;
1360	sw_prod = prod & MAX_KWQ_IDX;
1361	for (i = 0; i < num_wqes; i++) {
1362		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1363		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1364		prod++;
1365		sw_prod = prod & MAX_KWQ_IDX;
1366	}
1367	cp->kwq_prod_idx = prod;
1368
1369	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1370
1371	spin_unlock_bh(&cp->cnic_ulp_lock);
1372	return 0;
1373}
1374
1375static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1376				   union l5cm_specific_data *l5_data)
1377{
1378	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1379	dma_addr_t map;
1380
1381	map = ctx->kwqe_data_mapping;
1382	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1383	l5_data->phy_address.hi = (u64) map >> 32;
1384	return ctx->kwqe_data;
1385}
1386
1387static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1388				u32 type, union l5cm_specific_data *l5_data)
1389{
1390	struct cnic_local *cp = dev->cnic_priv;
1391	struct bnx2x *bp = netdev_priv(dev->netdev);
1392	struct l5cm_spe kwqe;
1393	struct kwqe_16 *kwq[1];
1394	u16 type_16;
1395	int ret;
1396
1397	kwqe.hdr.conn_and_cmd_data =
1398		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1399			     BNX2X_HW_CID(bp, cid)));
1400
1401	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1402	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1403		   SPE_HDR_FUNCTION_ID;
1404
1405	kwqe.hdr.type = cpu_to_le16(type_16);
1406	kwqe.hdr.reserved1 = 0;
1407	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1408	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1409
1410	kwq[0] = (struct kwqe_16 *) &kwqe;
1411
1412	spin_lock_bh(&cp->cnic_ulp_lock);
1413	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1414	spin_unlock_bh(&cp->cnic_ulp_lock);
1415
1416	if (ret == 1)
1417		return 0;
1418
1419	return ret;
1420}
1421
1422static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1423				   struct kcqe *cqes[], u32 num_cqes)
1424{
1425	struct cnic_local *cp = dev->cnic_priv;
1426	struct cnic_ulp_ops *ulp_ops;
1427
1428	rcu_read_lock();
1429	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1430	if (likely(ulp_ops)) {
1431		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1432					  cqes, num_cqes);
1433	}
1434	rcu_read_unlock();
1435}
1436
1437static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1438				       int en_tcp_dack)
1439{
1440	struct bnx2x *bp = netdev_priv(dev->netdev);
1441	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1442	u16 tstorm_flags = 0;
1443
1444	if (time_stamps) {
1445		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1446		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1447	}
1448	if (en_tcp_dack)
1449		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1450
1451	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1452		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1453
1454	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1455		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1456}
1457
1458static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1459{
1460	struct cnic_local *cp = dev->cnic_priv;
1461	struct bnx2x *bp = netdev_priv(dev->netdev);
1462	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1463	int hq_bds, pages;
1464	u32 pfid = bp->pfid;
1465
1466	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1467	cp->num_ccells = req1->num_ccells_per_conn;
1468	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1469			      cp->num_iscsi_tasks;
1470	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1471			BNX2X_ISCSI_R2TQE_SIZE;
1472	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1473	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1474	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1475	cp->num_cqs = req1->num_cqs;
1476
1477	if (!dev->max_iscsi_conn)
1478		return 0;
1479
1480	/* init Tstorm RAM */
1481	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1482		  req1->rq_num_wqes);
1483	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1484		  CNIC_PAGE_SIZE);
1485	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1486		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1487	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1488		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1489		  req1->num_tasks_per_conn);
1490
1491	/* init Ustorm RAM */
1492	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1493		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1494		  req1->rq_buffer_size);
1495	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1496		  CNIC_PAGE_SIZE);
1497	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1498		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1499	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1500		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1501		  req1->num_tasks_per_conn);
1502	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1503		  req1->rq_num_wqes);
1504	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1505		  req1->cq_num_wqes);
1506	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1507		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1508
1509	/* init Xstorm RAM */
1510	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1511		  CNIC_PAGE_SIZE);
1512	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1513		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1514	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1515		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1516		  req1->num_tasks_per_conn);
1517	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1518		  hq_bds);
1519	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1520		  req1->num_tasks_per_conn);
1521	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1522		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1523
1524	/* init Cstorm RAM */
1525	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1526		  CNIC_PAGE_SIZE);
1527	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1528		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1529	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1530		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1531		  req1->num_tasks_per_conn);
1532	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1533		  req1->cq_num_wqes);
1534	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1535		  hq_bds);
1536
1537	cnic_bnx2x_set_tcp_options(dev,
1538			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1539			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1540
1541	return 0;
1542}
1543
1544static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1545{
1546	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1547	struct bnx2x *bp = netdev_priv(dev->netdev);
1548	u32 pfid = bp->pfid;
1549	struct iscsi_kcqe kcqe;
1550	struct kcqe *cqes[1];
1551
1552	memset(&kcqe, 0, sizeof(kcqe));
1553	if (!dev->max_iscsi_conn) {
1554		kcqe.completion_status =
1555			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1556		goto done;
1557	}
1558
1559	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1560		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1561	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1563		req2->error_bit_map[1]);
1564
1565	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1566		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1567	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1568		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571		req2->error_bit_map[1]);
1572
1573	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1574		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1575
1576	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1577
1578done:
1579	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1580	cqes[0] = (struct kcqe *) &kcqe;
1581	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1582
1583	return 0;
1584}
1585
1586static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1587{
1588	struct cnic_local *cp = dev->cnic_priv;
1589	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1590
1591	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1592		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1593
1594		cnic_free_dma(dev, &iscsi->hq_info);
1595		cnic_free_dma(dev, &iscsi->r2tq_info);
1596		cnic_free_dma(dev, &iscsi->task_array_info);
1597		cnic_free_id(&cp->cid_tbl, ctx->cid);
1598	} else {
1599		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1600	}
1601
1602	ctx->cid = 0;
1603}
1604
1605static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1606{
1607	u32 cid;
1608	int ret, pages;
1609	struct cnic_local *cp = dev->cnic_priv;
1610	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1611	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1612
1613	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1614		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1615		if (cid == -1) {
1616			ret = -ENOMEM;
1617			goto error;
1618		}
1619		ctx->cid = cid;
1620		return 0;
1621	}
1622
1623	cid = cnic_alloc_new_id(&cp->cid_tbl);
1624	if (cid == -1) {
1625		ret = -ENOMEM;
1626		goto error;
1627	}
1628
1629	ctx->cid = cid;
1630	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1631
1632	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1633	if (ret)
1634		goto error;
1635
1636	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1637	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1638	if (ret)
1639		goto error;
1640
1641	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1642	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1643	if (ret)
1644		goto error;
1645
1646	return 0;
1647
1648error:
1649	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1650	return ret;
1651}
1652
1653static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1654				struct regpair *ctx_addr)
1655{
1656	struct cnic_local *cp = dev->cnic_priv;
1657	struct cnic_eth_dev *ethdev = cp->ethdev;
1658	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1659	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1660	unsigned long align_off = 0;
1661	dma_addr_t ctx_map;
1662	void *ctx;
1663
1664	if (cp->ctx_align) {
1665		unsigned long mask = cp->ctx_align - 1;
1666
1667		if (cp->ctx_arr[blk].mapping & mask)
1668			align_off = cp->ctx_align -
1669				    (cp->ctx_arr[blk].mapping & mask);
1670	}
1671	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1672		(off * BNX2X_CONTEXT_MEM_SIZE);
1673	ctx = cp->ctx_arr[blk].ctx + align_off +
1674	      (off * BNX2X_CONTEXT_MEM_SIZE);
1675	if (init)
1676		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1677
1678	ctx_addr->lo = ctx_map & 0xffffffff;
1679	ctx_addr->hi = (u64) ctx_map >> 32;
1680	return ctx;
1681}
1682
1683static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1684				u32 num)
1685{
1686	struct cnic_local *cp = dev->cnic_priv;
1687	struct bnx2x *bp = netdev_priv(dev->netdev);
1688	struct iscsi_kwqe_conn_offload1 *req1 =
1689			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1690	struct iscsi_kwqe_conn_offload2 *req2 =
1691			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1692	struct iscsi_kwqe_conn_offload3 *req3;
1693	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1694	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1695	u32 cid = ctx->cid;
1696	u32 hw_cid = BNX2X_HW_CID(bp, cid);
1697	struct iscsi_context *ictx;
1698	struct regpair context_addr;
1699	int i, j, n = 2, n_max;
1700	u8 port = BP_PORT(bp);
1701
1702	ctx->ctx_flags = 0;
1703	if (!req2->num_additional_wqes)
1704		return -EINVAL;
1705
1706	n_max = req2->num_additional_wqes + 2;
1707
1708	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1709	if (ictx == NULL)
1710		return -ENOMEM;
1711
1712	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1713
1714	ictx->xstorm_ag_context.hq_prod = 1;
1715
1716	ictx->xstorm_st_context.iscsi.first_burst_length =
1717		ISCSI_DEF_FIRST_BURST_LEN;
1718	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1719		ISCSI_DEF_MAX_RECV_SEG_LEN;
1720	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1721		req1->sq_page_table_addr_lo;
1722	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1723		req1->sq_page_table_addr_hi;
1724	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1725	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1726	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1727		iscsi->hq_info.pgtbl_map & 0xffffffff;
1728	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1729		(u64) iscsi->hq_info.pgtbl_map >> 32;
1730	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1731		iscsi->hq_info.pgtbl[0];
1732	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1733		iscsi->hq_info.pgtbl[1];
1734	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1735		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1736	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1737		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1738	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1739		iscsi->r2tq_info.pgtbl[0];
1740	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1741		iscsi->r2tq_info.pgtbl[1];
1742	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1743		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1744	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1745		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1746	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1747		BNX2X_ISCSI_PBL_NOT_CACHED;
1748	ictx->xstorm_st_context.iscsi.flags.flags |=
1749		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1750	ictx->xstorm_st_context.iscsi.flags.flags |=
1751		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1752	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1753		ETH_P_8021Q;
1754	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1755	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1756
1757		port = 0;
1758	}
1759	ictx->xstorm_st_context.common.flags =
1760		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1761	ictx->xstorm_st_context.common.flags =
1762		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1763
1764	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1765	/* TSTORM requires the base address of RQ DB & not PTE */
1766	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1767		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1768	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1769		req2->rq_page_table_addr_hi;
1770	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1771	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1772	ictx->tstorm_st_context.tcp.flags2 |=
1773		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1774	ictx->tstorm_st_context.tcp.ooo_support_mode =
1775		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1776
1777	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1778
1779	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1780		req2->rq_page_table_addr_lo;
1781	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1782		req2->rq_page_table_addr_hi;
1783	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1784	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1785	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1786		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1787	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1788		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1789	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1790		iscsi->r2tq_info.pgtbl[0];
1791	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1792		iscsi->r2tq_info.pgtbl[1];
1793	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1794		req1->cq_page_table_addr_lo;
1795	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1796		req1->cq_page_table_addr_hi;
1797	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1798	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1799	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1800	ictx->ustorm_st_context.task_pbe_cache_index =
1801		BNX2X_ISCSI_PBL_NOT_CACHED;
1802	ictx->ustorm_st_context.task_pdu_cache_index =
1803		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1804
1805	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1806		if (j == 3) {
1807			if (n >= n_max)
1808				break;
1809			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1810			j = 0;
1811		}
1812		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1813		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1814			req3->qp_first_pte[j].hi;
1815		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1816			req3->qp_first_pte[j].lo;
1817	}
1818
1819	ictx->ustorm_st_context.task_pbl_base.lo =
1820		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1821	ictx->ustorm_st_context.task_pbl_base.hi =
1822		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1823	ictx->ustorm_st_context.tce_phy_addr.lo =
1824		iscsi->task_array_info.pgtbl[0];
1825	ictx->ustorm_st_context.tce_phy_addr.hi =
1826		iscsi->task_array_info.pgtbl[1];
1827	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1828	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1829	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1830	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1831		ISCSI_DEF_MAX_BURST_LEN;
1832	ictx->ustorm_st_context.negotiated_rx |=
1833		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1834		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1835
1836	ictx->cstorm_st_context.hq_pbl_base.lo =
1837		iscsi->hq_info.pgtbl_map & 0xffffffff;
1838	ictx->cstorm_st_context.hq_pbl_base.hi =
1839		(u64) iscsi->hq_info.pgtbl_map >> 32;
1840	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1841	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1842	ictx->cstorm_st_context.task_pbl_base.lo =
1843		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1844	ictx->cstorm_st_context.task_pbl_base.hi =
1845		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1846	/* CSTORM and USTORM initialization is different, CSTORM requires
1847	 * CQ DB base & not PTE addr */
1848	ictx->cstorm_st_context.cq_db_base.lo =
1849		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1850	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1851	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1852	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1853	for (i = 0; i < cp->num_cqs; i++) {
1854		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1855			ISCSI_INITIAL_SN;
1856		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1857			ISCSI_INITIAL_SN;
1858	}
1859
1860	ictx->xstorm_ag_context.cdu_reserved =
1861		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1862				       ISCSI_CONNECTION_TYPE);
1863	ictx->ustorm_ag_context.cdu_usage =
1864		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1865				       ISCSI_CONNECTION_TYPE);
1866	return 0;
1867
1868}
1869
1870static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1871				   u32 num, int *work)
1872{
1873	struct iscsi_kwqe_conn_offload1 *req1;
1874	struct iscsi_kwqe_conn_offload2 *req2;
1875	struct cnic_local *cp = dev->cnic_priv;
1876	struct bnx2x *bp = netdev_priv(dev->netdev);
1877	struct cnic_context *ctx;
1878	struct iscsi_kcqe kcqe;
1879	struct kcqe *cqes[1];
1880	u32 l5_cid;
1881	int ret = 0;
1882
1883	if (num < 2) {
1884		*work = num;
1885		return -EINVAL;
1886	}
1887
1888	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1889	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1890	if ((num - 2) < req2->num_additional_wqes) {
1891		*work = num;
1892		return -EINVAL;
1893	}
1894	*work = 2 + req2->num_additional_wqes;
1895
1896	l5_cid = req1->iscsi_conn_id;
1897	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1898		return -EINVAL;
1899
1900	memset(&kcqe, 0, sizeof(kcqe));
1901	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1902	kcqe.iscsi_conn_id = l5_cid;
1903	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1904
1905	ctx = &cp->ctx_tbl[l5_cid];
1906	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1907		kcqe.completion_status =
1908			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1909		goto done;
1910	}
1911
1912	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1913		atomic_dec(&cp->iscsi_conn);
1914		goto done;
1915	}
1916	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1917	if (ret) {
1918		atomic_dec(&cp->iscsi_conn);
1919		goto done;
1920	}
1921	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1922	if (ret < 0) {
1923		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1924		atomic_dec(&cp->iscsi_conn);
1925		goto done;
1926	}
1927
1928	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1929	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1930
1931done:
1932	cqes[0] = (struct kcqe *) &kcqe;
1933	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1934	return 0;
1935}
1936
1937
1938static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1939{
1940	struct cnic_local *cp = dev->cnic_priv;
1941	struct iscsi_kwqe_conn_update *req =
1942		(struct iscsi_kwqe_conn_update *) kwqe;
1943	void *data;
1944	union l5cm_specific_data l5_data;
1945	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1946	int ret;
1947
1948	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1949		return -EINVAL;
1950
1951	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1952	if (!data)
1953		return -ENOMEM;
1954
1955	memcpy(data, kwqe, sizeof(struct kwqe));
1956
1957	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1958			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1959	return ret;
1960}
1961
1962static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1963{
1964	struct cnic_local *cp = dev->cnic_priv;
1965	struct bnx2x *bp = netdev_priv(dev->netdev);
1966	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1967	union l5cm_specific_data l5_data;
1968	int ret;
1969	u32 hw_cid;
1970
1971	init_waitqueue_head(&ctx->waitq);
1972	ctx->wait_cond = 0;
1973	memset(&l5_data, 0, sizeof(l5_data));
1974	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1975
1976	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1977				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1978
1979	if (ret == 0) {
1980		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1981		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1982			return -EBUSY;
1983	}
1984
1985	return 0;
1986}
1987
1988static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1989{
1990	struct cnic_local *cp = dev->cnic_priv;
1991	struct iscsi_kwqe_conn_destroy *req =
1992		(struct iscsi_kwqe_conn_destroy *) kwqe;
1993	u32 l5_cid = req->reserved0;
1994	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1995	int ret = 0;
1996	struct iscsi_kcqe kcqe;
1997	struct kcqe *cqes[1];
1998
1999	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2000		goto skip_cfc_delete;
2001
2002	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2003		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2004
2005		if (delta > (2 * HZ))
2006			delta = 0;
2007
2008		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2009		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2010		goto destroy_reply;
2011	}
2012
2013	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2014
2015skip_cfc_delete:
2016	cnic_free_bnx2x_conn_resc(dev, l5_cid);
2017
2018	if (!ret) {
2019		atomic_dec(&cp->iscsi_conn);
2020		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2021	}
2022
2023destroy_reply:
2024	memset(&kcqe, 0, sizeof(kcqe));
2025	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2026	kcqe.iscsi_conn_id = l5_cid;
2027	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2028	kcqe.iscsi_conn_context_id = req->context_id;
2029
2030	cqes[0] = (struct kcqe *) &kcqe;
2031	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2032
2033	return 0;
2034}
2035
2036static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2037				      struct l4_kwq_connect_req1 *kwqe1,
2038				      struct l4_kwq_connect_req3 *kwqe3,
2039				      struct l5cm_active_conn_buffer *conn_buf)
2040{
2041	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2042	struct l5cm_xstorm_conn_buffer *xstorm_buf =
2043		&conn_buf->xstorm_conn_buffer;
2044	struct l5cm_tstorm_conn_buffer *tstorm_buf =
2045		&conn_buf->tstorm_conn_buffer;
2046	struct regpair context_addr;
2047	u32 cid = BNX2X_SW_CID(kwqe1->cid);
2048	struct in6_addr src_ip, dst_ip;
2049	int i;
2050	u32 *addrp;
2051
2052	addrp = (u32 *) &conn_addr->local_ip_addr;
2053	for (i = 0; i < 4; i++, addrp++)
2054		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2055
2056	addrp = (u32 *) &conn_addr->remote_ip_addr;
2057	for (i = 0; i < 4; i++, addrp++)
2058		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2059
2060	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2061
2062	xstorm_buf->context_addr.hi = context_addr.hi;
2063	xstorm_buf->context_addr.lo = context_addr.lo;
2064	xstorm_buf->mss = 0xffff;
2065	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2066	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2067		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2068	xstorm_buf->pseudo_header_checksum =
2069		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2070
2071	if (kwqe3->ka_timeout) {
2072		tstorm_buf->ka_enable = 1;
2073		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2074		tstorm_buf->ka_interval = kwqe3->ka_interval;
2075		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2076	}
2077	tstorm_buf->max_rt_time = 0xffffffff;
2078}
2079
2080static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2081{
2082	struct bnx2x *bp = netdev_priv(dev->netdev);
2083	u32 pfid = bp->pfid;
2084	u8 *mac = dev->mac_addr;
2085
2086	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2087		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2088	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2089		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2090	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2091		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2092	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2094	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2096	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2098
2099	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2101	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2103		 mac[4]);
2104	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2106	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2108		 mac[2]);
2109	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2111	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2112		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2113		 mac[0]);
2114}
2115
2116static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2117			      u32 num, int *work)
2118{
2119	struct cnic_local *cp = dev->cnic_priv;
2120	struct bnx2x *bp = netdev_priv(dev->netdev);
2121	struct l4_kwq_connect_req1 *kwqe1 =
2122		(struct l4_kwq_connect_req1 *) wqes[0];
2123	struct l4_kwq_connect_req3 *kwqe3;
2124	struct l5cm_active_conn_buffer *conn_buf;
2125	struct l5cm_conn_addr_params *conn_addr;
2126	union l5cm_specific_data l5_data;
2127	u32 l5_cid = kwqe1->pg_cid;
2128	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2129	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2130	int ret;
2131
2132	if (num < 2) {
2133		*work = num;
2134		return -EINVAL;
2135	}
2136
2137	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2138		*work = 3;
2139	else
2140		*work = 2;
2141
2142	if (num < *work) {
2143		*work = num;
2144		return -EINVAL;
2145	}
2146
2147	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2148		netdev_err(dev->netdev, "conn_buf size too big\n");
2149		return -ENOMEM;
2150	}
2151	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2152	if (!conn_buf)
2153		return -ENOMEM;
2154
2155	memset(conn_buf, 0, sizeof(*conn_buf));
2156
2157	conn_addr = &conn_buf->conn_addr_buf;
2158	conn_addr->remote_addr_0 = csk->ha[0];
2159	conn_addr->remote_addr_1 = csk->ha[1];
2160	conn_addr->remote_addr_2 = csk->ha[2];
2161	conn_addr->remote_addr_3 = csk->ha[3];
2162	conn_addr->remote_addr_4 = csk->ha[4];
2163	conn_addr->remote_addr_5 = csk->ha[5];
2164
2165	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2166		struct l4_kwq_connect_req2 *kwqe2 =
2167			(struct l4_kwq_connect_req2 *) wqes[1];
2168
2169		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2170		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2171		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2172
2173		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2174		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2175		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2176		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2177	}
2178	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2179
2180	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2181	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2182	conn_addr->local_tcp_port = kwqe1->src_port;
2183	conn_addr->remote_tcp_port = kwqe1->dst_port;
2184
2185	conn_addr->pmtu = kwqe3->pmtu;
2186	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2187
2188	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2189		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2190
2191	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2192			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2193	if (!ret)
2194		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2195
2196	return ret;
2197}
2198
2199static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2200{
2201	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2202	union l5cm_specific_data l5_data;
2203	int ret;
2204
2205	memset(&l5_data, 0, sizeof(l5_data));
2206	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2207			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2208	return ret;
2209}
2210
2211static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2212{
2213	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2214	union l5cm_specific_data l5_data;
2215	int ret;
2216
2217	memset(&l5_data, 0, sizeof(l5_data));
2218	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2219			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2220	return ret;
2221}
2222static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2223{
2224	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2225	struct l4_kcq kcqe;
2226	struct kcqe *cqes[1];
2227
2228	memset(&kcqe, 0, sizeof(kcqe));
2229	kcqe.pg_host_opaque = req->host_opaque;
2230	kcqe.pg_cid = req->host_opaque;
2231	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2232	cqes[0] = (struct kcqe *) &kcqe;
2233	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2234	return 0;
2235}
2236
2237static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2238{
2239	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2240	struct l4_kcq kcqe;
2241	struct kcqe *cqes[1];
2242
2243	memset(&kcqe, 0, sizeof(kcqe));
2244	kcqe.pg_host_opaque = req->pg_host_opaque;
2245	kcqe.pg_cid = req->pg_cid;
2246	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2247	cqes[0] = (struct kcqe *) &kcqe;
2248	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2249	return 0;
2250}
2251
2252static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2253{
2254	struct fcoe_kwqe_stat *req;
2255	struct fcoe_stat_ramrod_params *fcoe_stat;
2256	union l5cm_specific_data l5_data;
2257	struct cnic_local *cp = dev->cnic_priv;
2258	struct bnx2x *bp = netdev_priv(dev->netdev);
2259	int ret;
2260	u32 cid;
2261
2262	req = (struct fcoe_kwqe_stat *) kwqe;
2263	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2264
2265	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2266	if (!fcoe_stat)
2267		return -ENOMEM;
2268
2269	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2270	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2271
2272	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2273				  FCOE_CONNECTION_TYPE, &l5_data);
2274	return ret;
2275}
2276
2277static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2278				 u32 num, int *work)
2279{
2280	int ret;
2281	struct cnic_local *cp = dev->cnic_priv;
2282	struct bnx2x *bp = netdev_priv(dev->netdev);
2283	u32 cid;
2284	struct fcoe_init_ramrod_params *fcoe_init;
2285	struct fcoe_kwqe_init1 *req1;
2286	struct fcoe_kwqe_init2 *req2;
2287	struct fcoe_kwqe_init3 *req3;
2288	union l5cm_specific_data l5_data;
2289
2290	if (num < 3) {
2291		*work = num;
2292		return -EINVAL;
2293	}
2294	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2295	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2296	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2297	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2298		*work = 1;
2299		return -EINVAL;
2300	}
2301	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2302		*work = 2;
2303		return -EINVAL;
2304	}
2305
2306	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2307		netdev_err(dev->netdev, "fcoe_init size too big\n");
2308		return -ENOMEM;
2309	}
2310	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2311	if (!fcoe_init)
2312		return -ENOMEM;
2313
2314	memset(fcoe_init, 0, sizeof(*fcoe_init));
2315	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2316	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2317	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2318	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2319	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2320	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2321
2322	fcoe_init->sb_num = cp->status_blk_num;
2323	fcoe_init->eq_prod = MAX_KCQ_IDX;
2324	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2325	cp->kcq2.sw_prod_idx = 0;
2326
2327	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2328	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2329				  FCOE_CONNECTION_TYPE, &l5_data);
2330	*work = 3;
2331	return ret;
2332}
2333
2334static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2335				 u32 num, int *work)
2336{
2337	int ret = 0;
2338	u32 cid = -1, l5_cid;
2339	struct cnic_local *cp = dev->cnic_priv;
2340	struct bnx2x *bp = netdev_priv(dev->netdev);
2341	struct fcoe_kwqe_conn_offload1 *req1;
2342	struct fcoe_kwqe_conn_offload2 *req2;
2343	struct fcoe_kwqe_conn_offload3 *req3;
2344	struct fcoe_kwqe_conn_offload4 *req4;
2345	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2346	struct cnic_context *ctx;
2347	struct fcoe_context *fctx;
2348	struct regpair ctx_addr;
2349	union l5cm_specific_data l5_data;
2350	struct fcoe_kcqe kcqe;
2351	struct kcqe *cqes[1];
2352
2353	if (num < 4) {
2354		*work = num;
2355		return -EINVAL;
2356	}
2357	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2358	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2359	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2360	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2361
2362	*work = 4;
2363
2364	l5_cid = req1->fcoe_conn_id;
2365	if (l5_cid >= dev->max_fcoe_conn)
2366		goto err_reply;
2367
2368	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2369
2370	ctx = &cp->ctx_tbl[l5_cid];
2371	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2372		goto err_reply;
2373
2374	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2375	if (ret) {
2376		ret = 0;
2377		goto err_reply;
2378	}
2379	cid = ctx->cid;
2380
2381	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2382	if (fctx) {
2383		u32 hw_cid = BNX2X_HW_CID(bp, cid);
2384		u32 val;
2385
2386		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2387					     FCOE_CONNECTION_TYPE);
2388		fctx->xstorm_ag_context.cdu_reserved = val;
2389		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2390					     FCOE_CONNECTION_TYPE);
2391		fctx->ustorm_ag_context.cdu_usage = val;
2392	}
2393	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2394		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2395		goto err_reply;
2396	}
2397	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2398	if (!fcoe_offload)
2399		goto err_reply;
2400
2401	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2402	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2403	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2404	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2405	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2406
2407	cid = BNX2X_HW_CID(bp, cid);
2408	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2409				  FCOE_CONNECTION_TYPE, &l5_data);
2410	if (!ret)
2411		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2412
2413	return ret;
2414
2415err_reply:
2416	if (cid != -1)
2417		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2418
2419	memset(&kcqe, 0, sizeof(kcqe));
2420	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2421	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2422	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2423
2424	cqes[0] = (struct kcqe *) &kcqe;
2425	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2426	return ret;
2427}
2428
2429static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2430{
2431	struct fcoe_kwqe_conn_enable_disable *req;
2432	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2433	union l5cm_specific_data l5_data;
2434	int ret;
2435	u32 cid, l5_cid;
2436	struct cnic_local *cp = dev->cnic_priv;
2437
2438	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2439	cid = req->context_id;
2440	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2441
2442	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2443		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2444		return -ENOMEM;
2445	}
2446	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2447	if (!fcoe_enable)
2448		return -ENOMEM;
2449
2450	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2451	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2452	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2453				  FCOE_CONNECTION_TYPE, &l5_data);
2454	return ret;
2455}
2456
2457static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2458{
2459	struct fcoe_kwqe_conn_enable_disable *req;
2460	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2461	union l5cm_specific_data l5_data;
2462	int ret;
2463	u32 cid, l5_cid;
2464	struct cnic_local *cp = dev->cnic_priv;
2465
2466	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2467	cid = req->context_id;
2468	l5_cid = req->conn_id;
2469	if (l5_cid >= dev->max_fcoe_conn)
2470		return -EINVAL;
2471
2472	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2473
2474	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2475		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2476		return -ENOMEM;
2477	}
2478	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2479	if (!fcoe_disable)
2480		return -ENOMEM;
2481
2482	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2483	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2484	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2485				  FCOE_CONNECTION_TYPE, &l5_data);
2486	return ret;
2487}
2488
2489static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2490{
2491	struct fcoe_kwqe_conn_destroy *req;
2492	union l5cm_specific_data l5_data;
2493	int ret;
2494	u32 cid, l5_cid;
2495	struct cnic_local *cp = dev->cnic_priv;
2496	struct cnic_context *ctx;
2497	struct fcoe_kcqe kcqe;
2498	struct kcqe *cqes[1];
2499
2500	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2501	cid = req->context_id;
2502	l5_cid = req->conn_id;
2503	if (l5_cid >= dev->max_fcoe_conn)
2504		return -EINVAL;
2505
2506	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2507
2508	ctx = &cp->ctx_tbl[l5_cid];
2509
2510	init_waitqueue_head(&ctx->waitq);
2511	ctx->wait_cond = 0;
2512
2513	memset(&kcqe, 0, sizeof(kcqe));
2514	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2515	memset(&l5_data, 0, sizeof(l5_data));
2516	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2517				  FCOE_CONNECTION_TYPE, &l5_data);
2518	if (ret == 0) {
2519		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2520		if (ctx->wait_cond)
2521			kcqe.completion_status = 0;
2522	}
2523
2524	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2525	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2526
2527	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2528	kcqe.fcoe_conn_id = req->conn_id;
2529	kcqe.fcoe_conn_context_id = cid;
2530
2531	cqes[0] = (struct kcqe *) &kcqe;
2532	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2533	return ret;
2534}
2535
2536static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2537{
2538	struct cnic_local *cp = dev->cnic_priv;
2539	u32 i;
2540
2541	for (i = start_cid; i < cp->max_cid_space; i++) {
2542		struct cnic_context *ctx = &cp->ctx_tbl[i];
2543		int j;
2544
2545		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2546			msleep(10);
2547
2548		for (j = 0; j < 5; j++) {
2549			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2550				break;
2551			msleep(20);
2552		}
2553
2554		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2555			netdev_warn(dev->netdev, "CID %x not deleted\n",
2556				   ctx->cid);
2557	}
2558}
2559
2560static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2561{
2562	union l5cm_specific_data l5_data;
2563	struct cnic_local *cp = dev->cnic_priv;
2564	struct bnx2x *bp = netdev_priv(dev->netdev);
2565	int ret;
2566	u32 cid;
2567
2568	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2569
2570	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2571
2572	memset(&l5_data, 0, sizeof(l5_data));
2573	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2574				  FCOE_CONNECTION_TYPE, &l5_data);
2575	return ret;
2576}
2577
2578static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2579{
2580	struct cnic_local *cp = dev->cnic_priv;
2581	struct kcqe kcqe;
2582	struct kcqe *cqes[1];
2583	u32 cid;
2584	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2585	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2586	u32 kcqe_op;
2587	int ulp_type;
2588
2589	cid = kwqe->kwqe_info0;
2590	memset(&kcqe, 0, sizeof(kcqe));
2591
2592	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2593		u32 l5_cid = 0;
2594
2595		ulp_type = CNIC_ULP_FCOE;
2596		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2597			struct fcoe_kwqe_conn_enable_disable *req;
2598
2599			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2600			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2601			cid = req->context_id;
2602			l5_cid = req->conn_id;
2603		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2604			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2605		} else {
2606			return;
2607		}
2608		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2609		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2610		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2611		kcqe.kcqe_info2 = cid;
2612		kcqe.kcqe_info0 = l5_cid;
2613
2614	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2615		ulp_type = CNIC_ULP_ISCSI;
2616		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2617			cid = kwqe->kwqe_info1;
2618
2619		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2620		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2621		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2622		kcqe.kcqe_info2 = cid;
2623		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2624
2625	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2626		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2627
2628		ulp_type = CNIC_ULP_L4;
2629		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2630			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2631		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2632			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2633		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2634			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2635		else
2636			return;
2637
2638		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2639				    KCQE_FLAGS_LAYER_MASK_L4;
2640		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2641		l4kcqe->cid = cid;
2642		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2643	} else {
2644		return;
2645	}
2646
2647	cqes[0] = &kcqe;
2648	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2649}
2650
2651static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2652					 struct kwqe *wqes[], u32 num_wqes)
2653{
2654	int i, work, ret;
2655	u32 opcode;
2656	struct kwqe *kwqe;
2657
2658	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2659		return -EAGAIN;		/* bnx2 is down */
2660
2661	for (i = 0; i < num_wqes; ) {
2662		kwqe = wqes[i];
2663		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2664		work = 1;
2665
2666		switch (opcode) {
2667		case ISCSI_KWQE_OPCODE_INIT1:
2668			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2669			break;
2670		case ISCSI_KWQE_OPCODE_INIT2:
2671			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2672			break;
2673		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2674			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2675						     num_wqes - i, &work);
2676			break;
2677		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2678			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2679			break;
2680		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2681			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2682			break;
2683		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2684			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2685						 &work);
2686			break;
2687		case L4_KWQE_OPCODE_VALUE_CLOSE:
2688			ret = cnic_bnx2x_close(dev, kwqe);
2689			break;
2690		case L4_KWQE_OPCODE_VALUE_RESET:
2691			ret = cnic_bnx2x_reset(dev, kwqe);
2692			break;
2693		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2694			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2695			break;
2696		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2697			ret = cnic_bnx2x_update_pg(dev, kwqe);
2698			break;
2699		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2700			ret = 0;
2701			break;
2702		default:
2703			ret = 0;
2704			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2705				   opcode);
2706			break;
2707		}
2708		if (ret < 0) {
2709			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2710				   opcode);
2711
2712			/* Possibly bnx2x parity error, send completion
2713			 * to ulp drivers with error code to speed up
2714			 * cleanup and reset recovery.
2715			 */
2716			if (ret == -EIO || ret == -EAGAIN)
2717				cnic_bnx2x_kwqe_err(dev, kwqe);
2718		}
2719		i += work;
2720	}
2721	return 0;
2722}
2723
2724static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2725					struct kwqe *wqes[], u32 num_wqes)
2726{
2727	struct bnx2x *bp = netdev_priv(dev->netdev);
2728	int i, work, ret;
2729	u32 opcode;
2730	struct kwqe *kwqe;
2731
2732	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2733		return -EAGAIN;		/* bnx2 is down */
2734
2735	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2736		return -EINVAL;
2737
2738	for (i = 0; i < num_wqes; ) {
2739		kwqe = wqes[i];
2740		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2741		work = 1;
2742
2743		switch (opcode) {
2744		case FCOE_KWQE_OPCODE_INIT1:
2745			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2746						    num_wqes - i, &work);
2747			break;
2748		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2749			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2750						    num_wqes - i, &work);
2751			break;
2752		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2753			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2754			break;
2755		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2756			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2757			break;
2758		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2759			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2760			break;
2761		case FCOE_KWQE_OPCODE_DESTROY:
2762			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2763			break;
2764		case FCOE_KWQE_OPCODE_STAT:
2765			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2766			break;
2767		default:
2768			ret = 0;
2769			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2770				   opcode);
2771			break;
2772		}
2773		if (ret < 0) {
2774			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2775				   opcode);
2776
2777			/* Possibly bnx2x parity error, send completion
2778			 * to ulp drivers with error code to speed up
2779			 * cleanup and reset recovery.
2780			 */
2781			if (ret == -EIO || ret == -EAGAIN)
2782				cnic_bnx2x_kwqe_err(dev, kwqe);
2783		}
2784		i += work;
2785	}
2786	return 0;
2787}
2788
2789static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2790				   u32 num_wqes)
2791{
2792	int ret = -EINVAL;
2793	u32 layer_code;
2794
2795	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2796		return -EAGAIN;		/* bnx2x is down */
2797
2798	if (!num_wqes)
2799		return 0;
2800
2801	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2802	switch (layer_code) {
2803	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2804	case KWQE_FLAGS_LAYER_MASK_L4:
2805	case KWQE_FLAGS_LAYER_MASK_L2:
2806		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2807		break;
2808
2809	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2810		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2811		break;
2812	}
2813	return ret;
2814}
2815
2816static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2817{
2818	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2819		return KCQE_FLAGS_LAYER_MASK_L4;
2820
2821	return opflag & KCQE_FLAGS_LAYER_MASK;
2822}
2823
2824static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2825{
2826	struct cnic_local *cp = dev->cnic_priv;
2827	int i, j, comp = 0;
2828
2829	i = 0;
2830	j = 1;
2831	while (num_cqes) {
2832		struct cnic_ulp_ops *ulp_ops;
2833		int ulp_type;
2834		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2835		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2836
2837		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2838			comp++;
2839
2840		while (j < num_cqes) {
2841			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2842
2843			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2844				break;
2845
2846			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2847				comp++;
2848			j++;
2849		}
2850
2851		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2852			ulp_type = CNIC_ULP_RDMA;
2853		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2854			ulp_type = CNIC_ULP_ISCSI;
2855		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2856			ulp_type = CNIC_ULP_FCOE;
2857		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2858			ulp_type = CNIC_ULP_L4;
2859		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2860			goto end;
2861		else {
2862			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2863				   kcqe_op_flag);
2864			goto end;
2865		}
2866
2867		rcu_read_lock();
2868		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2869		if (likely(ulp_ops)) {
2870			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2871						  cp->completed_kcq + i, j);
2872		}
2873		rcu_read_unlock();
2874end:
2875		num_cqes -= j;
2876		i += j;
2877		j = 1;
2878	}
2879	if (unlikely(comp))
2880		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2881}
2882
2883static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2884{
2885	struct cnic_local *cp = dev->cnic_priv;
2886	u16 i, ri, hw_prod, last;
2887	struct kcqe *kcqe;
2888	int kcqe_cnt = 0, last_cnt = 0;
2889
2890	i = ri = last = info->sw_prod_idx;
2891	ri &= MAX_KCQ_IDX;
2892	hw_prod = *info->hw_prod_idx_ptr;
2893	hw_prod = info->hw_idx(hw_prod);
2894
2895	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2896		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2897		cp->completed_kcq[kcqe_cnt++] = kcqe;
2898		i = info->next_idx(i);
2899		ri = i & MAX_KCQ_IDX;
2900		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2901			last_cnt = kcqe_cnt;
2902			last = i;
2903		}
2904	}
2905
2906	info->sw_prod_idx = last;
2907	return last_cnt;
2908}
2909
2910static int cnic_l2_completion(struct cnic_local *cp)
2911{
2912	u16 hw_cons, sw_cons;
2913	struct cnic_uio_dev *udev = cp->udev;
2914	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2915					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2916	u32 cmd;
2917	int comp = 0;
2918
2919	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2920		return 0;
2921
2922	hw_cons = *cp->rx_cons_ptr;
2923	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2924		hw_cons++;
2925
2926	sw_cons = cp->rx_cons;
2927	while (sw_cons != hw_cons) {
2928		u8 cqe_fp_flags;
2929
2930		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2931		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2932		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2933			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2934			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2935			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2936			    cmd == RAMROD_CMD_ID_ETH_HALT)
2937				comp++;
2938		}
2939		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2940	}
2941	return comp;
2942}
2943
2944static void cnic_chk_pkt_rings(struct cnic_local *cp)
2945{
2946	u16 rx_cons, tx_cons;
2947	int comp = 0;
2948
2949	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2950		return;
2951
2952	rx_cons = *cp->rx_cons_ptr;
2953	tx_cons = *cp->tx_cons_ptr;
2954	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2955		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2956			comp = cnic_l2_completion(cp);
2957
2958		cp->tx_cons = tx_cons;
2959		cp->rx_cons = rx_cons;
2960
2961		if (cp->udev)
2962			uio_event_notify(&cp->udev->cnic_uinfo);
2963	}
2964	if (comp)
2965		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2966}
2967
2968static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2969{
2970	struct cnic_local *cp = dev->cnic_priv;
2971	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2972	int kcqe_cnt;
2973
2974	/* status block index must be read before reading other fields */
2975	rmb();
2976	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2977
2978	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2979
2980		service_kcqes(dev, kcqe_cnt);
2981
2982		/* Tell compiler that status_blk fields can change. */
2983		barrier();
2984		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2985		/* status block index must be read first */
2986		rmb();
2987		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2988	}
2989
2990	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2991
2992	cnic_chk_pkt_rings(cp);
2993
2994	return status_idx;
2995}
2996
2997static int cnic_service_bnx2(void *data, void *status_blk)
2998{
2999	struct cnic_dev *dev = data;
3000
3001	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3002		struct status_block *sblk = status_blk;
3003
3004		return sblk->status_idx;
3005	}
3006
3007	return cnic_service_bnx2_queues(dev);
3008}
3009
3010static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3011{
3012	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3013	struct cnic_dev *dev = cp->dev;
3014
3015	cp->last_status_idx = cnic_service_bnx2_queues(dev);
3016
3017	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3018		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3019}
3020
3021static void cnic_doirq(struct cnic_dev *dev)
3022{
3023	struct cnic_local *cp = dev->cnic_priv;
3024
3025	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3026		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3027
3028		prefetch(cp->status_blk.gen);
3029		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3030
3031		tasklet_schedule(&cp->cnic_irq_task);
3032	}
3033}
3034
3035static irqreturn_t cnic_irq(int irq, void *dev_instance)
3036{
3037	struct cnic_dev *dev = dev_instance;
3038	struct cnic_local *cp = dev->cnic_priv;
3039
3040	if (cp->ack_int)
3041		cp->ack_int(dev);
3042
3043	cnic_doirq(dev);
3044
3045	return IRQ_HANDLED;
3046}
3047
3048static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3049				      u16 index, u8 op, u8 update)
3050{
3051	struct bnx2x *bp = netdev_priv(dev->netdev);
3052	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3053		       COMMAND_REG_INT_ACK);
3054	struct igu_ack_register igu_ack;
3055
3056	igu_ack.status_block_index = index;
3057	igu_ack.sb_id_and_flags =
3058			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3059			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3060			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3061			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3062
3063	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3064}
3065
3066static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3067			    u16 index, u8 op, u8 update)
3068{
3069	struct igu_regular cmd_data;
3070	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3071
3072	cmd_data.sb_id_and_flags =
3073		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3074		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3075		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3076		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3077
3078
3079	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3080}
3081
3082static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3083{
3084	struct cnic_local *cp = dev->cnic_priv;
3085
3086	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3087			   IGU_INT_DISABLE, 0);
3088}
3089
3090static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3091{
3092	struct cnic_local *cp = dev->cnic_priv;
3093
3094	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3095			IGU_INT_DISABLE, 0);
3096}
3097
3098static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3099{
3100	struct cnic_local *cp = dev->cnic_priv;
3101
3102	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3103			   IGU_INT_ENABLE, 1);
3104}
3105
3106static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3107{
3108	struct cnic_local *cp = dev->cnic_priv;
3109
3110	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3111			IGU_INT_ENABLE, 1);
3112}
3113
3114static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3115{
3116	u32 last_status = *info->status_idx_ptr;
3117	int kcqe_cnt;
3118
3119	/* status block index must be read before reading the KCQ */
3120	rmb();
3121	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3122
3123		service_kcqes(dev, kcqe_cnt);
3124
3125		/* Tell compiler that sblk fields can change. */
3126		barrier();
3127
3128		last_status = *info->status_idx_ptr;
3129		/* status block index must be read before reading the KCQ */
3130		rmb();
3131	}
3132	return last_status;
3133}
3134
3135static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3136{
3137	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3138	struct cnic_dev *dev = cp->dev;
3139	struct bnx2x *bp = netdev_priv(dev->netdev);
3140	u32 status_idx, new_status_idx;
3141
3142	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3143		return;
3144
3145	while (1) {
3146		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3147
3148		CNIC_WR16(dev, cp->kcq1.io_addr,
3149			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3150
3151		if (!CNIC_SUPPORTS_FCOE(bp)) {
3152			cp->arm_int(dev, status_idx);
3153			break;
3154		}
3155
3156		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3157
3158		if (new_status_idx != status_idx)
3159			continue;
3160
3161		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3162			  MAX_KCQ_IDX);
3163
3164		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3165				status_idx, IGU_INT_ENABLE, 1);
3166
3167		break;
3168	}
3169}
3170
3171static int cnic_service_bnx2x(void *data, void *status_blk)
3172{
3173	struct cnic_dev *dev = data;
3174	struct cnic_local *cp = dev->cnic_priv;
3175
3176	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3177		cnic_doirq(dev);
3178
3179	cnic_chk_pkt_rings(cp);
3180
3181	return 0;
3182}
3183
3184static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3185{
3186	struct cnic_ulp_ops *ulp_ops;
3187
3188	if (if_type == CNIC_ULP_ISCSI)
3189		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3190
3191	mutex_lock(&cnic_lock);
3192	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3193					    lockdep_is_held(&cnic_lock));
3194	if (!ulp_ops) {
3195		mutex_unlock(&cnic_lock);
3196		return;
3197	}
3198	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3199	mutex_unlock(&cnic_lock);
3200
3201	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3202		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3203
3204	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3205}
3206
3207static void cnic_ulp_stop(struct cnic_dev *dev)
3208{
3209	struct cnic_local *cp = dev->cnic_priv;
3210	int if_type;
3211
3212	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3213		cnic_ulp_stop_one(cp, if_type);
3214}
3215
3216static void cnic_ulp_start(struct cnic_dev *dev)
3217{
3218	struct cnic_local *cp = dev->cnic_priv;
3219	int if_type;
3220
3221	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3222		struct cnic_ulp_ops *ulp_ops;
3223
3224		mutex_lock(&cnic_lock);
3225		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3226						    lockdep_is_held(&cnic_lock));
3227		if (!ulp_ops || !ulp_ops->cnic_start) {
3228			mutex_unlock(&cnic_lock);
3229			continue;
3230		}
3231		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3232		mutex_unlock(&cnic_lock);
3233
3234		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3235			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3236
3237		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3238	}
3239}
3240
3241static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3242{
3243	struct cnic_local *cp = dev->cnic_priv;
3244	struct cnic_ulp_ops *ulp_ops;
3245	int rc;
3246
3247	mutex_lock(&cnic_lock);
3248	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3249					    lockdep_is_held(&cnic_lock));
3250	if (ulp_ops && ulp_ops->cnic_get_stats)
3251		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3252	else
3253		rc = -ENODEV;
3254	mutex_unlock(&cnic_lock);
3255	return rc;
3256}
3257
3258static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3259{
3260	struct cnic_dev *dev = data;
3261	int ulp_type = CNIC_ULP_ISCSI;
3262
3263	switch (info->cmd) {
3264	case CNIC_CTL_STOP_CMD:
3265		cnic_hold(dev);
3266
3267		cnic_ulp_stop(dev);
3268		cnic_stop_hw(dev);
3269
3270		cnic_put(dev);
3271		break;
3272	case CNIC_CTL_START_CMD:
3273		cnic_hold(dev);
3274
3275		if (!cnic_start_hw(dev))
3276			cnic_ulp_start(dev);
3277
3278		cnic_put(dev);
3279		break;
3280	case CNIC_CTL_STOP_ISCSI_CMD: {
3281		struct cnic_local *cp = dev->cnic_priv;
3282		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3283		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3284		break;
3285	}
3286	case CNIC_CTL_COMPLETION_CMD: {
3287		struct cnic_ctl_completion *comp = &info->data.comp;
3288		u32 cid = BNX2X_SW_CID(comp->cid);
3289		u32 l5_cid;
3290		struct cnic_local *cp = dev->cnic_priv;
3291
3292		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3293			break;
3294
3295		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3296			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3297
3298			if (unlikely(comp->error)) {
3299				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3300				netdev_err(dev->netdev,
3301					   "CID %x CFC delete comp error %x\n",
3302					   cid, comp->error);
3303			}
3304
3305			ctx->wait_cond = 1;
3306			wake_up(&ctx->waitq);
3307		}
3308		break;
3309	}
3310	case CNIC_CTL_FCOE_STATS_GET_CMD:
3311		ulp_type = CNIC_ULP_FCOE;
3312		fallthrough;
3313	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3314		cnic_hold(dev);
3315		cnic_copy_ulp_stats(dev, ulp_type);
3316		cnic_put(dev);
3317		break;
3318
3319	default:
3320		return -EINVAL;
3321	}
3322	return 0;
3323}
3324
3325static void cnic_ulp_init(struct cnic_dev *dev)
3326{
3327	int i;
3328	struct cnic_local *cp = dev->cnic_priv;
3329
3330	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3331		struct cnic_ulp_ops *ulp_ops;
3332
3333		mutex_lock(&cnic_lock);
3334		ulp_ops = cnic_ulp_tbl_prot(i);
3335		if (!ulp_ops || !ulp_ops->cnic_init) {
3336			mutex_unlock(&cnic_lock);
3337			continue;
3338		}
3339		ulp_get(ulp_ops);
3340		mutex_unlock(&cnic_lock);
3341
3342		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3343			ulp_ops->cnic_init(dev);
3344
3345		ulp_put(ulp_ops);
3346	}
3347}
3348
3349static void cnic_ulp_exit(struct cnic_dev *dev)
3350{
3351	int i;
3352	struct cnic_local *cp = dev->cnic_priv;
3353
3354	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3355		struct cnic_ulp_ops *ulp_ops;
3356
3357		mutex_lock(&cnic_lock);
3358		ulp_ops = cnic_ulp_tbl_prot(i);
3359		if (!ulp_ops || !ulp_ops->cnic_exit) {
3360			mutex_unlock(&cnic_lock);
3361			continue;
3362		}
3363		ulp_get(ulp_ops);
3364		mutex_unlock(&cnic_lock);
3365
3366		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3367			ulp_ops->cnic_exit(dev);
3368
3369		ulp_put(ulp_ops);
3370	}
3371}
3372
3373static int cnic_cm_offload_pg(struct cnic_sock *csk)
3374{
3375	struct cnic_dev *dev = csk->dev;
3376	struct l4_kwq_offload_pg *l4kwqe;
3377	struct kwqe *wqes[1];
3378
3379	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3380	memset(l4kwqe, 0, sizeof(*l4kwqe));
3381	wqes[0] = (struct kwqe *) l4kwqe;
3382
3383	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3384	l4kwqe->flags =
3385		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3386	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3387
3388	l4kwqe->da0 = csk->ha[0];
3389	l4kwqe->da1 = csk->ha[1];
3390	l4kwqe->da2 = csk->ha[2];
3391	l4kwqe->da3 = csk->ha[3];
3392	l4kwqe->da4 = csk->ha[4];
3393	l4kwqe->da5 = csk->ha[5];
3394
3395	l4kwqe->sa0 = dev->mac_addr[0];
3396	l4kwqe->sa1 = dev->mac_addr[1];
3397	l4kwqe->sa2 = dev->mac_addr[2];
3398	l4kwqe->sa3 = dev->mac_addr[3];
3399	l4kwqe->sa4 = dev->mac_addr[4];
3400	l4kwqe->sa5 = dev->mac_addr[5];
3401
3402	l4kwqe->etype = ETH_P_IP;
3403	l4kwqe->ipid_start = DEF_IPID_START;
3404	l4kwqe->host_opaque = csk->l5_cid;
3405
3406	if (csk->vlan_id) {
3407		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3408		l4kwqe->vlan_tag = csk->vlan_id;
3409		l4kwqe->l2hdr_nbytes += 4;
3410	}
3411
3412	return dev->submit_kwqes(dev, wqes, 1);
3413}
3414
3415static int cnic_cm_update_pg(struct cnic_sock *csk)
3416{
3417	struct cnic_dev *dev = csk->dev;
3418	struct l4_kwq_update_pg *l4kwqe;
3419	struct kwqe *wqes[1];
3420
3421	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3422	memset(l4kwqe, 0, sizeof(*l4kwqe));
3423	wqes[0] = (struct kwqe *) l4kwqe;
3424
3425	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3426	l4kwqe->flags =
3427		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3428	l4kwqe->pg_cid = csk->pg_cid;
3429
3430	l4kwqe->da0 = csk->ha[0];
3431	l4kwqe->da1 = csk->ha[1];
3432	l4kwqe->da2 = csk->ha[2];
3433	l4kwqe->da3 = csk->ha[3];
3434	l4kwqe->da4 = csk->ha[4];
3435	l4kwqe->da5 = csk->ha[5];
3436
3437	l4kwqe->pg_host_opaque = csk->l5_cid;
3438	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3439
3440	return dev->submit_kwqes(dev, wqes, 1);
3441}
3442
3443static int cnic_cm_upload_pg(struct cnic_sock *csk)
3444{
3445	struct cnic_dev *dev = csk->dev;
3446	struct l4_kwq_upload *l4kwqe;
3447	struct kwqe *wqes[1];
3448
3449	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3450	memset(l4kwqe, 0, sizeof(*l4kwqe));
3451	wqes[0] = (struct kwqe *) l4kwqe;
3452
3453	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3454	l4kwqe->flags =
3455		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3456	l4kwqe->cid = csk->pg_cid;
3457
3458	return dev->submit_kwqes(dev, wqes, 1);
3459}
3460
3461static int cnic_cm_conn_req(struct cnic_sock *csk)
3462{
3463	struct cnic_dev *dev = csk->dev;
3464	struct l4_kwq_connect_req1 *l4kwqe1;
3465	struct l4_kwq_connect_req2 *l4kwqe2;
3466	struct l4_kwq_connect_req3 *l4kwqe3;
3467	struct kwqe *wqes[3];
3468	u8 tcp_flags = 0;
3469	int num_wqes = 2;
3470
3471	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3472	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3473	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3474	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3475	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3476	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3477
3478	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3479	l4kwqe3->flags =
3480		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3481	l4kwqe3->ka_timeout = csk->ka_timeout;
3482	l4kwqe3->ka_interval = csk->ka_interval;
3483	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3484	l4kwqe3->tos = csk->tos;
3485	l4kwqe3->ttl = csk->ttl;
3486	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3487	l4kwqe3->pmtu = csk->mtu;
3488	l4kwqe3->rcv_buf = csk->rcv_buf;
3489	l4kwqe3->snd_buf = csk->snd_buf;
3490	l4kwqe3->seed = csk->seed;
3491
3492	wqes[0] = (struct kwqe *) l4kwqe1;
3493	if (test_bit(SK_F_IPV6, &csk->flags)) {
3494		wqes[1] = (struct kwqe *) l4kwqe2;
3495		wqes[2] = (struct kwqe *) l4kwqe3;
3496		num_wqes = 3;
3497
3498		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3499		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3500		l4kwqe2->flags =
3501			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3502			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3503		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3504		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3505		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3506		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3507		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3508		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3509		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3510			       sizeof(struct tcphdr);
3511	} else {
3512		wqes[1] = (struct kwqe *) l4kwqe3;
3513		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3514			       sizeof(struct tcphdr);
3515	}
3516
3517	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3518	l4kwqe1->flags =
3519		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3520		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3521	l4kwqe1->cid = csk->cid;
3522	l4kwqe1->pg_cid = csk->pg_cid;
3523	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3524	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3525	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3526	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3527	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3528		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3529	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3530		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3531	if (csk->tcp_flags & SK_TCP_NAGLE)
3532		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3533	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3534		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3535	if (csk->tcp_flags & SK_TCP_SACK)
3536		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3537	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3538		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3539
3540	l4kwqe1->tcp_flags = tcp_flags;
3541
3542	return dev->submit_kwqes(dev, wqes, num_wqes);
3543}
3544
3545static int cnic_cm_close_req(struct cnic_sock *csk)
3546{
3547	struct cnic_dev *dev = csk->dev;
3548	struct l4_kwq_close_req *l4kwqe;
3549	struct kwqe *wqes[1];
3550
3551	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3552	memset(l4kwqe, 0, sizeof(*l4kwqe));
3553	wqes[0] = (struct kwqe *) l4kwqe;
3554
3555	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3556	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3557	l4kwqe->cid = csk->cid;
3558
3559	return dev->submit_kwqes(dev, wqes, 1);
3560}
3561
3562static int cnic_cm_abort_req(struct cnic_sock *csk)
3563{
3564	struct cnic_dev *dev = csk->dev;
3565	struct l4_kwq_reset_req *l4kwqe;
3566	struct kwqe *wqes[1];
3567
3568	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3569	memset(l4kwqe, 0, sizeof(*l4kwqe));
3570	wqes[0] = (struct kwqe *) l4kwqe;
3571
3572	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3573	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3574	l4kwqe->cid = csk->cid;
3575
3576	return dev->submit_kwqes(dev, wqes, 1);
3577}
3578
3579static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3580			  u32 l5_cid, struct cnic_sock **csk, void *context)
3581{
3582	struct cnic_local *cp = dev->cnic_priv;
3583	struct cnic_sock *csk1;
3584
3585	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3586		return -EINVAL;
3587
3588	if (cp->ctx_tbl) {
3589		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3590
3591		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3592			return -EAGAIN;
3593	}
3594
3595	csk1 = &cp->csk_tbl[l5_cid];
3596	if (atomic_read(&csk1->ref_count))
3597		return -EAGAIN;
3598
3599	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3600		return -EBUSY;
3601
3602	csk1->dev = dev;
3603	csk1->cid = cid;
3604	csk1->l5_cid = l5_cid;
3605	csk1->ulp_type = ulp_type;
3606	csk1->context = context;
3607
3608	csk1->ka_timeout = DEF_KA_TIMEOUT;
3609	csk1->ka_interval = DEF_KA_INTERVAL;
3610	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3611	csk1->tos = DEF_TOS;
3612	csk1->ttl = DEF_TTL;
3613	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3614	csk1->rcv_buf = DEF_RCV_BUF;
3615	csk1->snd_buf = DEF_SND_BUF;
3616	csk1->seed = DEF_SEED;
3617	csk1->tcp_flags = 0;
3618
3619	*csk = csk1;
3620	return 0;
3621}
3622
3623static void cnic_cm_cleanup(struct cnic_sock *csk)
3624{
3625	if (csk->src_port) {
3626		struct cnic_dev *dev = csk->dev;
3627		struct cnic_local *cp = dev->cnic_priv;
3628
3629		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3630		csk->src_port = 0;
3631	}
3632}
3633
3634static void cnic_close_conn(struct cnic_sock *csk)
3635{
3636	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3637		cnic_cm_upload_pg(csk);
3638		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3639	}
3640	cnic_cm_cleanup(csk);
3641}
3642
3643static int cnic_cm_destroy(struct cnic_sock *csk)
3644{
3645	if (!cnic_in_use(csk))
3646		return -EINVAL;
3647
3648	csk_hold(csk);
3649	clear_bit(SK_F_INUSE, &csk->flags);
3650	smp_mb__after_atomic();
3651	while (atomic_read(&csk->ref_count) != 1)
3652		msleep(1);
3653	cnic_cm_cleanup(csk);
3654
3655	csk->flags = 0;
3656	csk_put(csk);
3657	return 0;
3658}
3659
3660static inline u16 cnic_get_vlan(struct net_device *dev,
3661				struct net_device **vlan_dev)
3662{
3663	if (is_vlan_dev(dev)) {
3664		*vlan_dev = vlan_dev_real_dev(dev);
3665		return vlan_dev_vlan_id(dev);
3666	}
3667	*vlan_dev = dev;
3668	return 0;
3669}
3670
3671static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3672			     struct dst_entry **dst)
3673{
3674#if defined(CONFIG_INET)
3675	struct rtable *rt;
3676
3677	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3678	if (!IS_ERR(rt)) {
3679		*dst = &rt->dst;
3680		return 0;
3681	}
3682	return PTR_ERR(rt);
3683#else
3684	return -ENETUNREACH;
3685#endif
3686}
3687
3688static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3689			     struct dst_entry **dst)
3690{
3691#if IS_ENABLED(CONFIG_IPV6)
3692	struct flowi6 fl6;
3693
3694	memset(&fl6, 0, sizeof(fl6));
3695	fl6.daddr = dst_addr->sin6_addr;
3696	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3697		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3698
3699	*dst = ip6_route_output(&init_net, NULL, &fl6);
3700	if ((*dst)->error) {
3701		dst_release(*dst);
3702		*dst = NULL;
3703		return -ENETUNREACH;
3704	} else
3705		return 0;
3706#endif
3707
3708	return -ENETUNREACH;
3709}
3710
3711static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3712					   int ulp_type)
3713{
3714	struct cnic_dev *dev = NULL;
3715	struct dst_entry *dst;
3716	struct net_device *netdev = NULL;
3717	int err = -ENETUNREACH;
3718
3719	if (dst_addr->sin_family == AF_INET)
3720		err = cnic_get_v4_route(dst_addr, &dst);
3721	else if (dst_addr->sin_family == AF_INET6) {
3722		struct sockaddr_in6 *dst_addr6 =
3723			(struct sockaddr_in6 *) dst_addr;
3724
3725		err = cnic_get_v6_route(dst_addr6, &dst);
3726	} else
3727		return NULL;
3728
3729	if (err)
3730		return NULL;
3731
3732	if (!dst->dev)
3733		goto done;
3734
3735	cnic_get_vlan(dst->dev, &netdev);
3736
3737	dev = cnic_from_netdev(netdev);
3738
3739done:
3740	dst_release(dst);
3741	if (dev)
3742		cnic_put(dev);
3743	return dev;
3744}
3745
3746static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3747{
3748	struct cnic_dev *dev = csk->dev;
3749	struct cnic_local *cp = dev->cnic_priv;
3750
3751	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3752}
3753
3754static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3755{
3756	struct cnic_dev *dev = csk->dev;
3757	struct cnic_local *cp = dev->cnic_priv;
3758	int is_v6, rc = 0;
3759	struct dst_entry *dst = NULL;
3760	struct net_device *realdev;
3761	__be16 local_port;
3762	u32 port_id;
3763
3764	if (saddr->local.v6.sin6_family == AF_INET6 &&
3765	    saddr->remote.v6.sin6_family == AF_INET6)
3766		is_v6 = 1;
3767	else if (saddr->local.v4.sin_family == AF_INET &&
3768		 saddr->remote.v4.sin_family == AF_INET)
3769		is_v6 = 0;
3770	else
3771		return -EINVAL;
3772
3773	clear_bit(SK_F_IPV6, &csk->flags);
3774
3775	if (is_v6) {
3776		set_bit(SK_F_IPV6, &csk->flags);
3777		cnic_get_v6_route(&saddr->remote.v6, &dst);
3778
3779		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3780		       sizeof(struct in6_addr));
3781		csk->dst_port = saddr->remote.v6.sin6_port;
3782		local_port = saddr->local.v6.sin6_port;
3783
3784	} else {
3785		cnic_get_v4_route(&saddr->remote.v4, &dst);
3786
3787		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3788		csk->dst_port = saddr->remote.v4.sin_port;
3789		local_port = saddr->local.v4.sin_port;
3790	}
3791
3792	csk->vlan_id = 0;
3793	csk->mtu = dev->netdev->mtu;
3794	if (dst && dst->dev) {
3795		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3796		if (realdev == dev->netdev) {
3797			csk->vlan_id = vlan;
3798			csk->mtu = dst_mtu(dst);
3799		}
3800	}
3801
3802	port_id = be16_to_cpu(local_port);
3803	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3804	    port_id < CNIC_LOCAL_PORT_MAX) {
3805		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3806			port_id = 0;
3807	} else
3808		port_id = 0;
3809
3810	if (!port_id) {
3811		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3812		if (port_id == -1) {
3813			rc = -ENOMEM;
3814			goto err_out;
3815		}
3816		local_port = cpu_to_be16(port_id);
3817	}
3818	csk->src_port = local_port;
3819
3820err_out:
3821	dst_release(dst);
3822	return rc;
3823}
3824
3825static void cnic_init_csk_state(struct cnic_sock *csk)
3826{
3827	csk->state = 0;
3828	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3829	clear_bit(SK_F_CLOSING, &csk->flags);
3830}
3831
3832static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3833{
3834	struct cnic_local *cp = csk->dev->cnic_priv;
3835	int err = 0;
3836
3837	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3838		return -EOPNOTSUPP;
3839
3840	if (!cnic_in_use(csk))
3841		return -EINVAL;
3842
3843	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3844		return -EINVAL;
3845
3846	cnic_init_csk_state(csk);
3847
3848	err = cnic_get_route(csk, saddr);
3849	if (err)
3850		goto err_out;
3851
3852	err = cnic_resolve_addr(csk, saddr);
3853	if (!err)
3854		return 0;
3855
3856err_out:
3857	clear_bit(SK_F_CONNECT_START, &csk->flags);
3858	return err;
3859}
3860
3861static int cnic_cm_abort(struct cnic_sock *csk)
3862{
3863	struct cnic_local *cp = csk->dev->cnic_priv;
3864	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3865
3866	if (!cnic_in_use(csk))
3867		return -EINVAL;
3868
3869	if (cnic_abort_prep(csk))
3870		return cnic_cm_abort_req(csk);
3871
3872	/* Getting here means that we haven't started connect, or
3873	 * connect was not successful, or it has been reset by the target.
3874	 */
3875
3876	cp->close_conn(csk, opcode);
3877	if (csk->state != opcode) {
3878		/* Wait for remote reset sequence to complete */
3879		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3880			msleep(1);
3881
3882		return -EALREADY;
3883	}
3884
3885	return 0;
3886}
3887
3888static int cnic_cm_close(struct cnic_sock *csk)
3889{
3890	if (!cnic_in_use(csk))
3891		return -EINVAL;
3892
3893	if (cnic_close_prep(csk)) {
3894		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3895		return cnic_cm_close_req(csk);
3896	} else {
3897		/* Wait for remote reset sequence to complete */
3898		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3899			msleep(1);
3900
3901		return -EALREADY;
3902	}
3903	return 0;
3904}
3905
3906static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3907			   u8 opcode)
3908{
3909	struct cnic_ulp_ops *ulp_ops;
3910	int ulp_type = csk->ulp_type;
3911
3912	rcu_read_lock();
3913	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3914	if (ulp_ops) {
3915		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3916			ulp_ops->cm_connect_complete(csk);
3917		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3918			ulp_ops->cm_close_complete(csk);
3919		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3920			ulp_ops->cm_remote_abort(csk);
3921		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3922			ulp_ops->cm_abort_complete(csk);
3923		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3924			ulp_ops->cm_remote_close(csk);
3925	}
3926	rcu_read_unlock();
3927}
3928
3929static int cnic_cm_set_pg(struct cnic_sock *csk)
3930{
3931	if (cnic_offld_prep(csk)) {
3932		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3933			cnic_cm_update_pg(csk);
3934		else
3935			cnic_cm_offload_pg(csk);
3936	}
3937	return 0;
3938}
3939
3940static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3941{
3942	struct cnic_local *cp = dev->cnic_priv;
3943	u32 l5_cid = kcqe->pg_host_opaque;
3944	u8 opcode = kcqe->op_code;
3945	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3946
3947	csk_hold(csk);
3948	if (!cnic_in_use(csk))
3949		goto done;
3950
3951	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3952		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3953		goto done;
3954	}
3955	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3956	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3957		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3958		cnic_cm_upcall(cp, csk,
3959			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3960		goto done;
3961	}
3962
3963	csk->pg_cid = kcqe->pg_cid;
3964	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3965	cnic_cm_conn_req(csk);
3966
3967done:
3968	csk_put(csk);
3969}
3970
3971static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3972{
3973	struct cnic_local *cp = dev->cnic_priv;
3974	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3975	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3976	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3977
3978	ctx->timestamp = jiffies;
3979	ctx->wait_cond = 1;
3980	wake_up(&ctx->waitq);
3981}
3982
3983static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3984{
3985	struct cnic_local *cp = dev->cnic_priv;
3986	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3987	u8 opcode = l4kcqe->op_code;
3988	u32 l5_cid;
3989	struct cnic_sock *csk;
3990
3991	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3992		cnic_process_fcoe_term_conn(dev, kcqe);
3993		return;
3994	}
3995	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3996	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3997		cnic_cm_process_offld_pg(dev, l4kcqe);
3998		return;
3999	}
4000
4001	l5_cid = l4kcqe->conn_id;
4002	if (opcode & 0x80)
4003		l5_cid = l4kcqe->cid;
4004	if (l5_cid >= MAX_CM_SK_TBL_SZ)
4005		return;
4006
4007	csk = &cp->csk_tbl[l5_cid];
4008	csk_hold(csk);
4009
4010	if (!cnic_in_use(csk)) {
4011		csk_put(csk);
4012		return;
4013	}
4014
4015	switch (opcode) {
4016	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4017		if (l4kcqe->status != 0) {
4018			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4019			cnic_cm_upcall(cp, csk,
4020				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4021		}
4022		break;
4023	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4024		if (l4kcqe->status == 0)
4025			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4026		else if (l4kcqe->status ==
4027			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4028			set_bit(SK_F_HW_ERR, &csk->flags);
4029
4030		smp_mb__before_atomic();
4031		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4032		cnic_cm_upcall(cp, csk, opcode);
4033		break;
4034
4035	case L5CM_RAMROD_CMD_ID_CLOSE: {
4036		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4037
4038		if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4039			break;
4040
4041		netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4042			    l4kcqe->status, l5kcqe->completion_status);
4043		opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4044	}
4045		fallthrough;
4046	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4047	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4048	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4049	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4050	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4051		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4052			set_bit(SK_F_HW_ERR, &csk->flags);
4053
4054		cp->close_conn(csk, opcode);
4055		break;
4056
4057	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4058		/* after we already sent CLOSE_REQ */
4059		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4060		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4061		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4062			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4063		else
4064			cnic_cm_upcall(cp, csk, opcode);
4065		break;
4066	}
4067	csk_put(csk);
4068}
4069
4070static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4071{
4072	struct cnic_dev *dev = data;
4073	int i;
4074
4075	for (i = 0; i < num; i++)
4076		cnic_cm_process_kcqe(dev, kcqe[i]);
4077}
4078
4079static struct cnic_ulp_ops cm_ulp_ops = {
4080	.indicate_kcqes		= cnic_cm_indicate_kcqe,
4081};
4082
4083static void cnic_cm_free_mem(struct cnic_dev *dev)
4084{
4085	struct cnic_local *cp = dev->cnic_priv;
4086
4087	kvfree(cp->csk_tbl);
4088	cp->csk_tbl = NULL;
4089	cnic_free_id_tbl(&cp->csk_port_tbl);
4090}
4091
4092static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4093{
4094	struct cnic_local *cp = dev->cnic_priv;
4095	u32 port_id;
4096	int i;
4097
4098	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4099			       GFP_KERNEL);
4100	if (!cp->csk_tbl)
4101		return -ENOMEM;
4102
4103	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4104		atomic_set(&cp->csk_tbl[i].ref_count, 0);
4105
4106	port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
 
4107	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4108			     CNIC_LOCAL_PORT_MIN, port_id)) {
4109		cnic_cm_free_mem(dev);
4110		return -ENOMEM;
4111	}
4112	return 0;
4113}
4114
4115static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4116{
4117	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4118		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4119		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4120		csk->state = opcode;
4121	}
4122
4123	/* 1. If event opcode matches the expected event in csk->state
4124	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4125	 *    event
4126	 * 3. If the expected event is 0, meaning the connection was never
4127	 *    never established, we accept the opcode from cm_abort.
4128	 */
4129	if (opcode == csk->state || csk->state == 0 ||
4130	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4131	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4132		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4133			if (csk->state == 0)
4134				csk->state = opcode;
4135			return 1;
4136		}
4137	}
4138	return 0;
4139}
4140
4141static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4142{
4143	struct cnic_dev *dev = csk->dev;
4144	struct cnic_local *cp = dev->cnic_priv;
4145
4146	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4147		cnic_cm_upcall(cp, csk, opcode);
4148		return;
4149	}
4150
4151	clear_bit(SK_F_CONNECT_START, &csk->flags);
4152	cnic_close_conn(csk);
4153	csk->state = opcode;
4154	cnic_cm_upcall(cp, csk, opcode);
4155}
4156
4157static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4158{
4159}
4160
4161static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4162{
4163	u32 seed;
4164
4165	seed = get_random_u32();
4166	cnic_ctx_wr(dev, 45, 0, seed);
4167	return 0;
4168}
4169
4170static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4171{
4172	struct cnic_dev *dev = csk->dev;
4173	struct cnic_local *cp = dev->cnic_priv;
4174	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4175	union l5cm_specific_data l5_data;
4176	u32 cmd = 0;
4177	int close_complete = 0;
4178
4179	switch (opcode) {
4180	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4181	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4182	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4183		if (cnic_ready_to_close(csk, opcode)) {
4184			if (test_bit(SK_F_HW_ERR, &csk->flags))
4185				close_complete = 1;
4186			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4187				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4188			else
4189				close_complete = 1;
4190		}
4191		break;
4192	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4193		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4194		break;
4195	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4196		close_complete = 1;
4197		break;
4198	}
4199	if (cmd) {
4200		memset(&l5_data, 0, sizeof(l5_data));
4201
4202		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4203				    &l5_data);
4204	} else if (close_complete) {
4205		ctx->timestamp = jiffies;
4206		cnic_close_conn(csk);
4207		cnic_cm_upcall(cp, csk, csk->state);
4208	}
4209}
4210
4211static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4212{
4213	struct cnic_local *cp = dev->cnic_priv;
4214
4215	if (!cp->ctx_tbl)
4216		return;
4217
4218	if (!netif_running(dev->netdev))
4219		return;
4220
4221	cnic_bnx2x_delete_wait(dev, 0);
4222
4223	cancel_delayed_work(&cp->delete_task);
4224	flush_workqueue(cnic_wq);
4225
4226	if (atomic_read(&cp->iscsi_conn) != 0)
4227		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4228			    atomic_read(&cp->iscsi_conn));
4229}
4230
4231static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4232{
4233	struct bnx2x *bp = netdev_priv(dev->netdev);
4234	u32 pfid = bp->pfid;
4235	u32 port = BP_PORT(bp);
4236
4237	cnic_init_bnx2x_mac(dev);
4238	cnic_bnx2x_set_tcp_options(dev, 0, 1);
4239
4240	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4241		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4242
4243	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4244		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4245	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4246		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4247		DEF_MAX_DA_COUNT);
4248
4249	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4250		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4251	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4252		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4253	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4254		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4255	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4256		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4257
4258	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4259		DEF_MAX_CWND);
4260	return 0;
4261}
4262
4263static void cnic_delete_task(struct work_struct *work)
4264{
4265	struct cnic_local *cp;
4266	struct cnic_dev *dev;
4267	u32 i;
4268	int need_resched = 0;
4269
4270	cp = container_of(work, struct cnic_local, delete_task.work);
4271	dev = cp->dev;
4272
4273	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4274		struct drv_ctl_info info;
4275
4276		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4277
4278		memset(&info, 0, sizeof(struct drv_ctl_info));
4279		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4280		cp->ethdev->drv_ctl(dev->netdev, &info);
4281	}
4282
4283	for (i = 0; i < cp->max_cid_space; i++) {
4284		struct cnic_context *ctx = &cp->ctx_tbl[i];
4285		int err;
4286
4287		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4288		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4289			continue;
4290
4291		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4292			need_resched = 1;
4293			continue;
4294		}
4295
4296		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4297			continue;
4298
4299		err = cnic_bnx2x_destroy_ramrod(dev, i);
4300
4301		cnic_free_bnx2x_conn_resc(dev, i);
4302		if (!err) {
4303			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4304				atomic_dec(&cp->iscsi_conn);
4305
4306			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4307		}
4308	}
4309
4310	if (need_resched)
4311		queue_delayed_work(cnic_wq, &cp->delete_task,
4312				   msecs_to_jiffies(10));
4313
4314}
4315
4316static int cnic_cm_open(struct cnic_dev *dev)
4317{
4318	struct cnic_local *cp = dev->cnic_priv;
4319	int err;
4320
4321	err = cnic_cm_alloc_mem(dev);
4322	if (err)
4323		return err;
4324
4325	err = cp->start_cm(dev);
4326
4327	if (err)
4328		goto err_out;
4329
4330	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4331
4332	dev->cm_create = cnic_cm_create;
4333	dev->cm_destroy = cnic_cm_destroy;
4334	dev->cm_connect = cnic_cm_connect;
4335	dev->cm_abort = cnic_cm_abort;
4336	dev->cm_close = cnic_cm_close;
4337	dev->cm_select_dev = cnic_cm_select_dev;
4338
4339	cp->ulp_handle[CNIC_ULP_L4] = dev;
4340	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4341	return 0;
4342
4343err_out:
4344	cnic_cm_free_mem(dev);
4345	return err;
4346}
4347
4348static int cnic_cm_shutdown(struct cnic_dev *dev)
4349{
4350	struct cnic_local *cp = dev->cnic_priv;
4351	int i;
4352
4353	if (!cp->csk_tbl)
4354		return 0;
4355
4356	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4357		struct cnic_sock *csk = &cp->csk_tbl[i];
4358
4359		clear_bit(SK_F_INUSE, &csk->flags);
4360		cnic_cm_cleanup(csk);
4361	}
4362	cnic_cm_free_mem(dev);
4363
4364	return 0;
4365}
4366
4367static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4368{
4369	u32 cid_addr;
4370	int i;
4371
4372	cid_addr = GET_CID_ADDR(cid);
4373
4374	for (i = 0; i < CTX_SIZE; i += 4)
4375		cnic_ctx_wr(dev, cid_addr, i, 0);
4376}
4377
4378static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4379{
4380	struct cnic_local *cp = dev->cnic_priv;
4381	int ret = 0, i;
4382	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4383
4384	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4385		return 0;
4386
4387	for (i = 0; i < cp->ctx_blks; i++) {
4388		int j;
4389		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4390		u32 val;
4391
4392		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4393
4394		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4395			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4396		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4397			(u64) cp->ctx_arr[i].mapping >> 32);
4398		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4399			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4400		for (j = 0; j < 10; j++) {
4401
4402			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4403			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4404				break;
4405			udelay(5);
4406		}
4407		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4408			ret = -EBUSY;
4409			break;
4410		}
4411	}
4412	return ret;
4413}
4414
4415static void cnic_free_irq(struct cnic_dev *dev)
4416{
4417	struct cnic_local *cp = dev->cnic_priv;
4418	struct cnic_eth_dev *ethdev = cp->ethdev;
4419
4420	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4421		cp->disable_int_sync(dev);
4422		tasklet_kill(&cp->cnic_irq_task);
4423		free_irq(ethdev->irq_arr[0].vector, dev);
4424	}
4425}
4426
4427static int cnic_request_irq(struct cnic_dev *dev)
4428{
4429	struct cnic_local *cp = dev->cnic_priv;
4430	struct cnic_eth_dev *ethdev = cp->ethdev;
4431	int err;
4432
4433	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4434	if (err)
4435		tasklet_disable(&cp->cnic_irq_task);
4436
4437	return err;
4438}
4439
4440static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4441{
4442	struct cnic_local *cp = dev->cnic_priv;
4443	struct cnic_eth_dev *ethdev = cp->ethdev;
4444
4445	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4446		int err, i = 0;
4447		int sblk_num = cp->status_blk_num;
4448		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4449			   BNX2_HC_SB_CONFIG_1;
4450
4451		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4452
4453		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4454		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4455		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4456
4457		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4458		tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
 
4459		err = cnic_request_irq(dev);
4460		if (err)
4461			return err;
4462
4463		while (cp->status_blk.bnx2->status_completion_producer_index &&
4464		       i < 10) {
4465			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4466				1 << (11 + sblk_num));
4467			udelay(10);
4468			i++;
4469			barrier();
4470		}
4471		if (cp->status_blk.bnx2->status_completion_producer_index) {
4472			cnic_free_irq(dev);
4473			goto failed;
4474		}
4475
4476	} else {
4477		struct status_block *sblk = cp->status_blk.gen;
4478		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4479		int i = 0;
4480
4481		while (sblk->status_completion_producer_index && i < 10) {
4482			CNIC_WR(dev, BNX2_HC_COMMAND,
4483				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4484			udelay(10);
4485			i++;
4486			barrier();
4487		}
4488		if (sblk->status_completion_producer_index)
4489			goto failed;
4490
4491	}
4492	return 0;
4493
4494failed:
4495	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4496	return -EBUSY;
4497}
4498
4499static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4500{
4501	struct cnic_local *cp = dev->cnic_priv;
4502	struct cnic_eth_dev *ethdev = cp->ethdev;
4503
4504	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4505		return;
4506
4507	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4508		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4509}
4510
4511static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4512{
4513	struct cnic_local *cp = dev->cnic_priv;
4514	struct cnic_eth_dev *ethdev = cp->ethdev;
4515
4516	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4517		return;
4518
4519	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4520		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4521	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4522	synchronize_irq(ethdev->irq_arr[0].vector);
4523}
4524
4525static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4526{
4527	struct cnic_local *cp = dev->cnic_priv;
4528	struct cnic_eth_dev *ethdev = cp->ethdev;
4529	struct cnic_uio_dev *udev = cp->udev;
4530	u32 cid_addr, tx_cid, sb_id;
4531	u32 val, offset0, offset1, offset2, offset3;
4532	int i;
4533	struct bnx2_tx_bd *txbd;
4534	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4535	struct status_block *s_blk = cp->status_blk.gen;
4536
4537	sb_id = cp->status_blk_num;
4538	tx_cid = 20;
4539	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4540	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4541		struct status_block_msix *sblk = cp->status_blk.bnx2;
4542
4543		tx_cid = TX_TSS_CID + sb_id - 1;
4544		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4545			(TX_TSS_CID << 7));
4546		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4547	}
4548	cp->tx_cons = *cp->tx_cons_ptr;
4549
4550	cid_addr = GET_CID_ADDR(tx_cid);
4551	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4552		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4553
4554		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4555			cnic_ctx_wr(dev, cid_addr2, i, 0);
4556
4557		offset0 = BNX2_L2CTX_TYPE_XI;
4558		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4559		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4560		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4561	} else {
4562		cnic_init_context(dev, tx_cid);
4563		cnic_init_context(dev, tx_cid + 1);
4564
4565		offset0 = BNX2_L2CTX_TYPE;
4566		offset1 = BNX2_L2CTX_CMD_TYPE;
4567		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4568		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4569	}
4570	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4571	cnic_ctx_wr(dev, cid_addr, offset0, val);
4572
4573	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4574	cnic_ctx_wr(dev, cid_addr, offset1, val);
4575
4576	txbd = udev->l2_ring;
4577
4578	buf_map = udev->l2_buf_map;
4579	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4580		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4581		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4582	}
4583	val = (u64) ring_map >> 32;
4584	cnic_ctx_wr(dev, cid_addr, offset2, val);
4585	txbd->tx_bd_haddr_hi = val;
4586
4587	val = (u64) ring_map & 0xffffffff;
4588	cnic_ctx_wr(dev, cid_addr, offset3, val);
4589	txbd->tx_bd_haddr_lo = val;
4590}
4591
4592static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4593{
4594	struct cnic_local *cp = dev->cnic_priv;
4595	struct cnic_eth_dev *ethdev = cp->ethdev;
4596	struct cnic_uio_dev *udev = cp->udev;
4597	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4598	int i;
4599	struct bnx2_rx_bd *rxbd;
4600	struct status_block *s_blk = cp->status_blk.gen;
4601	dma_addr_t ring_map = udev->l2_ring_map;
4602
4603	sb_id = cp->status_blk_num;
4604	cnic_init_context(dev, 2);
4605	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4606	coal_reg = BNX2_HC_COMMAND;
4607	coal_val = CNIC_RD(dev, coal_reg);
4608	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4609		struct status_block_msix *sblk = cp->status_blk.bnx2;
4610
4611		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4612		coal_reg = BNX2_HC_COALESCE_NOW;
4613		coal_val = 1 << (11 + sb_id);
4614	}
4615	i = 0;
4616	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4617		CNIC_WR(dev, coal_reg, coal_val);
4618		udelay(10);
4619		i++;
4620		barrier();
4621	}
4622	cp->rx_cons = *cp->rx_cons_ptr;
4623
4624	cid_addr = GET_CID_ADDR(2);
4625	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4626	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4627	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4628
4629	if (sb_id == 0)
4630		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4631	else
4632		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4633	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4634
4635	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4636	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4637		dma_addr_t buf_map;
4638		int n = (i % cp->l2_rx_ring_size) + 1;
4639
4640		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4641		rxbd->rx_bd_len = cp->l2_single_buf_size;
4642		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4643		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4644		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4645	}
4646	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4647	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4648	rxbd->rx_bd_haddr_hi = val;
4649
4650	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4651	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4652	rxbd->rx_bd_haddr_lo = val;
4653
4654	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4655	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4656}
4657
4658static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4659{
4660	struct kwqe *wqes[1], l2kwqe;
4661
4662	memset(&l2kwqe, 0, sizeof(l2kwqe));
4663	wqes[0] = &l2kwqe;
4664	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4665			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4666			       KWQE_OPCODE_SHIFT) | 2;
4667	dev->submit_kwqes(dev, wqes, 1);
4668}
4669
4670static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4671{
4672	struct cnic_local *cp = dev->cnic_priv;
4673	u32 val;
4674
4675	val = cp->func << 2;
4676
4677	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4678
4679	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4680			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4681	dev->mac_addr[0] = (u8) (val >> 8);
4682	dev->mac_addr[1] = (u8) val;
4683
4684	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4685
4686	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4687			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4688	dev->mac_addr[2] = (u8) (val >> 24);
4689	dev->mac_addr[3] = (u8) (val >> 16);
4690	dev->mac_addr[4] = (u8) (val >> 8);
4691	dev->mac_addr[5] = (u8) val;
4692
4693	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4694
4695	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4696	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4697		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4698
4699	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4700	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4701	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4702}
4703
4704static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4705{
4706	struct cnic_local *cp = dev->cnic_priv;
4707	struct cnic_eth_dev *ethdev = cp->ethdev;
4708	struct status_block *sblk = cp->status_blk.gen;
4709	u32 val, kcq_cid_addr, kwq_cid_addr;
4710	int err;
4711
4712	cnic_set_bnx2_mac(dev);
4713
4714	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4715	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4716	if (CNIC_PAGE_BITS > 12)
4717		val |= (12 - 8)  << 4;
4718	else
4719		val |= (CNIC_PAGE_BITS - 8)  << 4;
4720
4721	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4722
4723	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4724	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4725	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4726
4727	err = cnic_setup_5709_context(dev, 1);
4728	if (err)
4729		return err;
4730
4731	cnic_init_context(dev, KWQ_CID);
4732	cnic_init_context(dev, KCQ_CID);
4733
4734	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4735	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4736
4737	cp->max_kwq_idx = MAX_KWQ_IDX;
4738	cp->kwq_prod_idx = 0;
4739	cp->kwq_con_idx = 0;
4740	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4741
4742	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4743		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4744	else
4745		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4746
4747	/* Initialize the kernel work queue context. */
4748	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4749	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4750	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4751
4752	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4753	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4754
4755	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4756	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4757
4758	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4759	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4760
4761	val = (u32) cp->kwq_info.pgtbl_map;
4762	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4763
4764	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4765	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4766
4767	cp->kcq1.sw_prod_idx = 0;
4768	cp->kcq1.hw_prod_idx_ptr =
4769		&sblk->status_completion_producer_index;
4770
4771	cp->kcq1.status_idx_ptr = &sblk->status_idx;
4772
4773	/* Initialize the kernel complete queue context. */
4774	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4775	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4776	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4777
4778	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4779	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4780
4781	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4782	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4783
4784	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4785	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4786
4787	val = (u32) cp->kcq1.dma.pgtbl_map;
4788	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4789
4790	cp->int_num = 0;
4791	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4792		struct status_block_msix *msblk = cp->status_blk.bnx2;
4793		u32 sb_id = cp->status_blk_num;
4794		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4795
4796		cp->kcq1.hw_prod_idx_ptr =
4797			&msblk->status_completion_producer_index;
4798		cp->kcq1.status_idx_ptr = &msblk->status_idx;
4799		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4800		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4801		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4802		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4803	}
4804
4805	/* Enable Commnad Scheduler notification when we write to the
4806	 * host producer index of the kernel contexts. */
4807	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4808
4809	/* Enable Command Scheduler notification when we write to either
4810	 * the Send Queue or Receive Queue producer indexes of the kernel
4811	 * bypass contexts. */
4812	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4813	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4814
4815	/* Notify COM when the driver post an application buffer. */
4816	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4817
4818	/* Set the CP and COM doorbells.  These two processors polls the
4819	 * doorbell for a non zero value before running.  This must be done
4820	 * after setting up the kernel queue contexts. */
4821	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4822	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4823
4824	cnic_init_bnx2_tx_ring(dev);
4825	cnic_init_bnx2_rx_ring(dev);
4826
4827	err = cnic_init_bnx2_irq(dev);
4828	if (err) {
4829		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4830		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4831		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4832		return err;
4833	}
4834
4835	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4836
4837	return 0;
4838}
4839
4840static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4841{
4842	struct cnic_local *cp = dev->cnic_priv;
4843	struct cnic_eth_dev *ethdev = cp->ethdev;
4844	u32 start_offset = ethdev->ctx_tbl_offset;
4845	int i;
4846
4847	for (i = 0; i < cp->ctx_blks; i++) {
4848		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4849		dma_addr_t map = ctx->mapping;
4850
4851		if (cp->ctx_align) {
4852			unsigned long mask = cp->ctx_align - 1;
4853
4854			map = (map + mask) & ~mask;
4855		}
4856
4857		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4858	}
4859}
4860
4861static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4862{
4863	struct cnic_local *cp = dev->cnic_priv;
4864	struct cnic_eth_dev *ethdev = cp->ethdev;
4865	int err = 0;
4866
4867	tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
 
4868	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4869		err = cnic_request_irq(dev);
4870
4871	return err;
4872}
4873
4874static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4875						u16 sb_id, u8 sb_index,
4876						u8 disable)
4877{
4878	struct bnx2x *bp = netdev_priv(dev->netdev);
4879
4880	u32 addr = BAR_CSTRORM_INTMEM +
4881			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4882			offsetof(struct hc_status_block_data_e1x, index_data) +
4883			sizeof(struct hc_index_data)*sb_index +
4884			offsetof(struct hc_index_data, flags);
4885	u16 flags = CNIC_RD16(dev, addr);
4886	/* clear and set */
4887	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4888	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4889		  HC_INDEX_DATA_HC_ENABLED);
4890	CNIC_WR16(dev, addr, flags);
4891}
4892
4893static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4894{
4895	struct cnic_local *cp = dev->cnic_priv;
4896	struct bnx2x *bp = netdev_priv(dev->netdev);
4897	u8 sb_id = cp->status_blk_num;
4898
4899	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4900			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4901			offsetof(struct hc_status_block_data_e1x, index_data) +
4902			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4903			offsetof(struct hc_index_data, timeout), 64 / 4);
4904	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4905}
4906
4907static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4908{
4909}
4910
4911static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4912				    struct client_init_ramrod_data *data)
4913{
4914	struct cnic_local *cp = dev->cnic_priv;
4915	struct bnx2x *bp = netdev_priv(dev->netdev);
4916	struct cnic_uio_dev *udev = cp->udev;
4917	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4918	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4919	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4920	int i;
4921	u32 cli = cp->ethdev->iscsi_l2_client_id;
4922	u32 val;
4923
4924	memset(txbd, 0, CNIC_PAGE_SIZE);
4925
4926	buf_map = udev->l2_buf_map;
4927	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4928		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4929		struct eth_tx_parse_bd_e1x *pbd_e1x =
4930			&((txbd + 1)->parse_bd_e1x);
4931		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4932		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4933
4934		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4935		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4936		reg_bd->addr_hi = start_bd->addr_hi;
4937		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4938		start_bd->nbytes = cpu_to_le16(0x10);
4939		start_bd->nbd = cpu_to_le16(3);
4940		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4941		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4942		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4943
4944		if (BNX2X_CHIP_IS_E2_PLUS(bp))
4945			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4946				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4947		else
4948			pbd_e1x->global_data = (UNICAST_ADDRESS <<
4949				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4950	}
4951
4952	val = (u64) ring_map >> 32;
4953	txbd->next_bd.addr_hi = cpu_to_le32(val);
4954
4955	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4956
4957	val = (u64) ring_map & 0xffffffff;
4958	txbd->next_bd.addr_lo = cpu_to_le32(val);
4959
4960	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4961
4962	/* Other ramrod params */
4963	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4964	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4965
4966	/* reset xstorm per client statistics */
4967	if (cli < MAX_STAT_COUNTER_ID) {
4968		data->general.statistics_zero_flg = 1;
4969		data->general.statistics_en_flg = 1;
4970		data->general.statistics_counter_id = cli;
4971	}
4972
4973	cp->tx_cons_ptr =
4974		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4975}
4976
4977static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4978				    struct client_init_ramrod_data *data)
4979{
4980	struct cnic_local *cp = dev->cnic_priv;
4981	struct bnx2x *bp = netdev_priv(dev->netdev);
4982	struct cnic_uio_dev *udev = cp->udev;
4983	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4984				CNIC_PAGE_SIZE);
4985	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4986				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4987	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4988	int i;
4989	u32 cli = cp->ethdev->iscsi_l2_client_id;
4990	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4991	u32 val;
4992	dma_addr_t ring_map = udev->l2_ring_map;
4993
4994	/* General data */
4995	data->general.client_id = cli;
4996	data->general.activate_flg = 1;
4997	data->general.sp_client_id = cli;
4998	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4999	data->general.func_id = bp->pfid;
5000
5001	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5002		dma_addr_t buf_map;
5003		int n = (i % cp->l2_rx_ring_size) + 1;
5004
5005		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5006		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5007		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5008	}
5009
5010	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5011	rxbd->addr_hi = cpu_to_le32(val);
5012	data->rx.bd_page_base.hi = cpu_to_le32(val);
5013
5014	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5015	rxbd->addr_lo = cpu_to_le32(val);
5016	data->rx.bd_page_base.lo = cpu_to_le32(val);
5017
5018	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5019	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5020	rxcqe->addr_hi = cpu_to_le32(val);
5021	data->rx.cqe_page_base.hi = cpu_to_le32(val);
5022
5023	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5024	rxcqe->addr_lo = cpu_to_le32(val);
5025	data->rx.cqe_page_base.lo = cpu_to_le32(val);
5026
5027	/* Other ramrod params */
5028	data->rx.client_qzone_id = cl_qzone_id;
5029	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5030	data->rx.status_block_id = BNX2X_DEF_SB_ID;
5031
5032	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5033
5034	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5035	data->rx.outer_vlan_removal_enable_flg = 1;
5036	data->rx.silent_vlan_removal_flg = 1;
5037	data->rx.silent_vlan_value = 0;
5038	data->rx.silent_vlan_mask = 0xffff;
5039
5040	cp->rx_cons_ptr =
5041		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5042	cp->rx_cons = *cp->rx_cons_ptr;
5043}
5044
5045static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5046{
5047	struct cnic_local *cp = dev->cnic_priv;
5048	struct bnx2x *bp = netdev_priv(dev->netdev);
5049	u32 pfid = bp->pfid;
5050
5051	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5052			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5053	cp->kcq1.sw_prod_idx = 0;
5054
5055	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5056		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5057
5058		cp->kcq1.hw_prod_idx_ptr =
5059			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5060		cp->kcq1.status_idx_ptr =
5061			&sb->sb.running_index[SM_RX_ID];
5062	} else {
5063		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5064
5065		cp->kcq1.hw_prod_idx_ptr =
5066			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5067		cp->kcq1.status_idx_ptr =
5068			&sb->sb.running_index[SM_RX_ID];
5069	}
5070
5071	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5072		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5073
5074		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5075					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5076		cp->kcq2.sw_prod_idx = 0;
5077		cp->kcq2.hw_prod_idx_ptr =
5078			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5079		cp->kcq2.status_idx_ptr =
5080			&sb->sb.running_index[SM_RX_ID];
5081	}
5082}
5083
5084static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5085{
5086	struct cnic_local *cp = dev->cnic_priv;
5087	struct bnx2x *bp = netdev_priv(dev->netdev);
5088	struct cnic_eth_dev *ethdev = cp->ethdev;
5089	int ret;
5090	u32 pfid;
5091
5092	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5093	cp->func = bp->pf_num;
5094
5095	pfid = bp->pfid;
5096
5097	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5098			       cp->iscsi_start_cid, 0);
5099
5100	if (ret)
5101		return -ENOMEM;
5102
5103	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5104		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5105					cp->fcoe_start_cid, 0);
5106
5107		if (ret)
5108			return -ENOMEM;
5109	}
5110
5111	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5112
5113	cnic_init_bnx2x_kcq(dev);
5114
5115	/* Only 1 EQ */
5116	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5117	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5118		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5119	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5121		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5122	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5124		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5125	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5127		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5128	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5129		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5130		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5131	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5132		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5133	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5134		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5135	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5136		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5137		HC_INDEX_ISCSI_EQ_CONS);
5138
5139	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5141		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5142	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5143		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5144		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5145
5146	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5147		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5148
5149	cnic_setup_bnx2x_context(dev);
5150
5151	ret = cnic_init_bnx2x_irq(dev);
5152	if (ret)
5153		return ret;
5154
5155	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5156	return 0;
5157}
5158
5159static void cnic_init_rings(struct cnic_dev *dev)
5160{
5161	struct cnic_local *cp = dev->cnic_priv;
5162	struct bnx2x *bp = netdev_priv(dev->netdev);
5163	struct cnic_uio_dev *udev = cp->udev;
5164
5165	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5166		return;
5167
5168	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5169		cnic_init_bnx2_tx_ring(dev);
5170		cnic_init_bnx2_rx_ring(dev);
5171		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5172	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5173		u32 cli = cp->ethdev->iscsi_l2_client_id;
5174		u32 cid = cp->ethdev->iscsi_l2_cid;
5175		u32 cl_qzone_id;
5176		struct client_init_ramrod_data *data;
5177		union l5cm_specific_data l5_data;
5178		struct ustorm_eth_rx_producers rx_prods = {0};
5179		u32 off, i, *cid_ptr;
5180
5181		rx_prods.bd_prod = 0;
5182		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5183		barrier();
5184
5185		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5186
5187		off = BAR_USTRORM_INTMEM +
5188			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
5189			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5190			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5191
5192		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5193			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5194
5195		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5196
5197		data = udev->l2_buf;
5198		cid_ptr = udev->l2_buf + 12;
5199
5200		memset(data, 0, sizeof(*data));
5201
5202		cnic_init_bnx2x_tx_ring(dev, data);
5203		cnic_init_bnx2x_rx_ring(dev, data);
5204
5205		data->general.fp_hsi_ver =  ETH_FP_HSI_VERSION;
5206
5207		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5208		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5209
5210		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5211
5212		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5213			cid, ETH_CONNECTION_TYPE, &l5_data);
5214
5215		i = 0;
5216		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5217		       ++i < 10)
5218			msleep(1);
5219
5220		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5221			netdev_err(dev->netdev,
5222				"iSCSI CLIENT_SETUP did not complete\n");
5223		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5224		cnic_ring_ctl(dev, cid, cli, 1);
5225		*cid_ptr = cid >> 4;
5226		*(cid_ptr + 1) = cid * bp->db_size;
5227		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5228	}
5229}
5230
5231static void cnic_shutdown_rings(struct cnic_dev *dev)
5232{
5233	struct cnic_local *cp = dev->cnic_priv;
5234	struct cnic_uio_dev *udev = cp->udev;
5235	void *rx_ring;
5236
5237	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5238		return;
5239
5240	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5241		cnic_shutdown_bnx2_rx_ring(dev);
5242	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5243		u32 cli = cp->ethdev->iscsi_l2_client_id;
5244		u32 cid = cp->ethdev->iscsi_l2_cid;
5245		union l5cm_specific_data l5_data;
5246		int i;
5247
5248		cnic_ring_ctl(dev, cid, cli, 0);
5249
5250		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5251
5252		l5_data.phy_address.lo = cli;
5253		l5_data.phy_address.hi = 0;
5254		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5255			cid, ETH_CONNECTION_TYPE, &l5_data);
5256		i = 0;
5257		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5258		       ++i < 10)
5259			msleep(1);
5260
5261		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5262			netdev_err(dev->netdev,
5263				"iSCSI CLIENT_HALT did not complete\n");
5264		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5265
5266		memset(&l5_data, 0, sizeof(l5_data));
5267		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5268			cid, NONE_CONNECTION_TYPE, &l5_data);
5269		msleep(10);
5270	}
5271	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5272	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5273	memset(rx_ring, 0, CNIC_PAGE_SIZE);
5274}
5275
5276static int cnic_register_netdev(struct cnic_dev *dev)
5277{
5278	struct cnic_local *cp = dev->cnic_priv;
5279	struct cnic_eth_dev *ethdev = cp->ethdev;
5280	int err;
5281
5282	if (!ethdev)
5283		return -ENODEV;
5284
5285	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5286		return 0;
5287
5288	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5289	if (err)
5290		netdev_err(dev->netdev, "register_cnic failed\n");
5291
5292	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
5293	 * can change after firmware is downloaded.
5294	 */
5295	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5296	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5297		dev->max_iscsi_conn = 0;
5298
5299	return err;
5300}
5301
5302static void cnic_unregister_netdev(struct cnic_dev *dev)
5303{
5304	struct cnic_local *cp = dev->cnic_priv;
5305	struct cnic_eth_dev *ethdev = cp->ethdev;
5306
5307	if (!ethdev)
5308		return;
5309
5310	ethdev->drv_unregister_cnic(dev->netdev);
5311}
5312
5313static int cnic_start_hw(struct cnic_dev *dev)
5314{
5315	struct cnic_local *cp = dev->cnic_priv;
5316	struct cnic_eth_dev *ethdev = cp->ethdev;
5317	int err;
5318
5319	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5320		return -EALREADY;
5321
5322	dev->regview = ethdev->io_base;
5323	pci_dev_get(dev->pcidev);
5324	cp->func = PCI_FUNC(dev->pcidev->devfn);
5325	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5326	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5327
5328	err = cp->alloc_resc(dev);
5329	if (err) {
5330		netdev_err(dev->netdev, "allocate resource failure\n");
5331		goto err1;
5332	}
5333
5334	err = cp->start_hw(dev);
5335	if (err)
5336		goto err1;
5337
5338	err = cnic_cm_open(dev);
5339	if (err)
5340		goto err1;
5341
5342	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5343
5344	cp->enable_int(dev);
5345
5346	return 0;
5347
5348err1:
5349	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5350		cp->stop_hw(dev);
5351	else
5352		cp->free_resc(dev);
5353	pci_dev_put(dev->pcidev);
5354	return err;
5355}
5356
5357static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5358{
5359	cnic_disable_bnx2_int_sync(dev);
5360
5361	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5362	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5363
5364	cnic_init_context(dev, KWQ_CID);
5365	cnic_init_context(dev, KCQ_CID);
5366
5367	cnic_setup_5709_context(dev, 0);
5368	cnic_free_irq(dev);
5369
5370	cnic_free_resc(dev);
5371}
5372
5373
5374static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5375{
5376	struct cnic_local *cp = dev->cnic_priv;
5377	struct bnx2x *bp = netdev_priv(dev->netdev);
5378	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5379	u32 sb_id = cp->status_blk_num;
5380	u32 idx_off, syn_off;
5381
5382	cnic_free_irq(dev);
5383
5384	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5385		idx_off = offsetof(struct hc_status_block_e2, index_values) +
5386			  (hc_index * sizeof(u16));
5387
5388		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5389	} else {
5390		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5391			  (hc_index * sizeof(u16));
5392
5393		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5394	}
5395	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5396	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5397		  idx_off, 0);
5398
5399	*cp->kcq1.hw_prod_idx_ptr = 0;
5400	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5401		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5402	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5403	cnic_free_resc(dev);
5404}
5405
5406static void cnic_stop_hw(struct cnic_dev *dev)
5407{
5408	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5409		struct cnic_local *cp = dev->cnic_priv;
5410		int i = 0;
5411
5412		/* Need to wait for the ring shutdown event to complete
5413		 * before clearing the CNIC_UP flag.
5414		 */
5415		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5416			msleep(100);
5417			i++;
5418		}
5419		cnic_shutdown_rings(dev);
5420		cp->stop_cm(dev);
5421		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5422		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5423		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5424		synchronize_rcu();
5425		cnic_cm_shutdown(dev);
5426		cp->stop_hw(dev);
5427		pci_dev_put(dev->pcidev);
5428	}
5429}
5430
5431static void cnic_free_dev(struct cnic_dev *dev)
5432{
5433	int i = 0;
5434
5435	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5436		msleep(100);
5437		i++;
5438	}
5439	if (atomic_read(&dev->ref_count) != 0)
5440		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5441
5442	netdev_info(dev->netdev, "Removed CNIC device\n");
5443	dev_put(dev->netdev);
5444	kfree(dev);
5445}
5446
5447static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5448				struct cnic_fc_npiv_tbl *npiv_tbl)
5449{
5450	struct cnic_local *cp = dev->cnic_priv;
5451	struct bnx2x *bp = netdev_priv(dev->netdev);
5452	int ret;
5453
5454	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5455		return -EAGAIN;     /* bnx2x is down */
5456
5457	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5458		return -EINVAL;
5459
5460	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5461	return ret;
5462}
5463
5464static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5465				       struct pci_dev *pdev)
5466{
5467	struct cnic_dev *cdev;
5468	struct cnic_local *cp;
5469	int alloc_size;
5470
5471	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5472
5473	cdev = kzalloc(alloc_size, GFP_KERNEL);
5474	if (cdev == NULL)
5475		return NULL;
5476
5477	cdev->netdev = dev;
5478	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5479	cdev->register_device = cnic_register_device;
5480	cdev->unregister_device = cnic_unregister_device;
5481	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5482	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5483	atomic_set(&cdev->ref_count, 0);
5484
5485	cp = cdev->cnic_priv;
5486	cp->dev = cdev;
5487	cp->l2_single_buf_size = 0x400;
5488	cp->l2_rx_ring_size = 3;
5489
5490	spin_lock_init(&cp->cnic_ulp_lock);
5491
5492	netdev_info(dev, "Added CNIC device\n");
5493
5494	return cdev;
5495}
5496
5497static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5498{
5499	struct pci_dev *pdev;
5500	struct cnic_dev *cdev;
5501	struct cnic_local *cp;
5502	struct bnx2 *bp = netdev_priv(dev);
5503	struct cnic_eth_dev *ethdev = NULL;
5504
5505	if (bp->cnic_probe)
5506		ethdev = (bp->cnic_probe)(dev);
5507
5508	if (!ethdev)
5509		return NULL;
5510
5511	pdev = ethdev->pdev;
5512	if (!pdev)
5513		return NULL;
5514
5515	dev_hold(dev);
5516	pci_dev_get(pdev);
5517	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5518	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5519	    (pdev->revision < 0x10)) {
5520		pci_dev_put(pdev);
5521		goto cnic_err;
5522	}
5523	pci_dev_put(pdev);
5524
5525	cdev = cnic_alloc_dev(dev, pdev);
5526	if (cdev == NULL)
5527		goto cnic_err;
5528
5529	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5530	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5531
5532	cp = cdev->cnic_priv;
5533	cp->ethdev = ethdev;
5534	cdev->pcidev = pdev;
5535	cp->chip_id = ethdev->chip_id;
5536
5537	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5538
5539	cp->cnic_ops = &cnic_bnx2_ops;
5540	cp->start_hw = cnic_start_bnx2_hw;
5541	cp->stop_hw = cnic_stop_bnx2_hw;
5542	cp->setup_pgtbl = cnic_setup_page_tbl;
5543	cp->alloc_resc = cnic_alloc_bnx2_resc;
5544	cp->free_resc = cnic_free_resc;
5545	cp->start_cm = cnic_cm_init_bnx2_hw;
5546	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5547	cp->enable_int = cnic_enable_bnx2_int;
5548	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5549	cp->close_conn = cnic_close_bnx2_conn;
5550	return cdev;
5551
5552cnic_err:
5553	dev_put(dev);
5554	return NULL;
5555}
5556
5557static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5558{
5559	struct pci_dev *pdev;
5560	struct cnic_dev *cdev;
5561	struct cnic_local *cp;
5562	struct bnx2x *bp = netdev_priv(dev);
5563	struct cnic_eth_dev *ethdev = NULL;
5564
5565	if (bp->cnic_probe)
5566		ethdev = bp->cnic_probe(dev);
5567
5568	if (!ethdev)
5569		return NULL;
5570
5571	pdev = ethdev->pdev;
5572	if (!pdev)
5573		return NULL;
5574
5575	dev_hold(dev);
5576	cdev = cnic_alloc_dev(dev, pdev);
5577	if (cdev == NULL) {
5578		dev_put(dev);
5579		return NULL;
5580	}
5581
5582	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5583	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5584
5585	cp = cdev->cnic_priv;
5586	cp->ethdev = ethdev;
5587	cdev->pcidev = pdev;
5588	cp->chip_id = ethdev->chip_id;
5589
5590	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5591
5592	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5593		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5594	if (CNIC_SUPPORTS_FCOE(bp)) {
5595		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5596		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5597	}
5598
5599	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5600		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5601
5602	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5603
5604	cp->cnic_ops = &cnic_bnx2x_ops;
5605	cp->start_hw = cnic_start_bnx2x_hw;
5606	cp->stop_hw = cnic_stop_bnx2x_hw;
5607	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5608	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5609	cp->free_resc = cnic_free_resc;
5610	cp->start_cm = cnic_cm_init_bnx2x_hw;
5611	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5612	cp->enable_int = cnic_enable_bnx2x_int;
5613	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5614	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5615		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5616		cp->arm_int = cnic_arm_bnx2x_e2_msix;
5617	} else {
5618		cp->ack_int = cnic_ack_bnx2x_msix;
5619		cp->arm_int = cnic_arm_bnx2x_msix;
5620	}
5621	cp->close_conn = cnic_close_bnx2x_conn;
5622	return cdev;
5623}
5624
5625static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5626{
5627	struct ethtool_drvinfo drvinfo;
5628	struct cnic_dev *cdev = NULL;
5629
5630	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5631		memset(&drvinfo, 0, sizeof(drvinfo));
5632		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5633
5634		if (!strcmp(drvinfo.driver, "bnx2"))
5635			cdev = init_bnx2_cnic(dev);
5636		if (!strcmp(drvinfo.driver, "bnx2x"))
5637			cdev = init_bnx2x_cnic(dev);
5638		if (cdev) {
5639			write_lock(&cnic_dev_lock);
5640			list_add(&cdev->list, &cnic_dev_list);
5641			write_unlock(&cnic_dev_lock);
5642		}
5643	}
5644	return cdev;
5645}
5646
5647static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5648			      u16 vlan_id)
5649{
5650	int if_type;
5651
5652	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5653		struct cnic_ulp_ops *ulp_ops;
5654		void *ctx;
5655
5656		mutex_lock(&cnic_lock);
5657		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5658						lockdep_is_held(&cnic_lock));
5659		if (!ulp_ops || !ulp_ops->indicate_netevent) {
5660			mutex_unlock(&cnic_lock);
5661			continue;
5662		}
5663
5664		ctx = cp->ulp_handle[if_type];
5665
5666		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5667		mutex_unlock(&cnic_lock);
5668
5669		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5670
5671		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5672	}
5673}
5674
5675/* netdev event handler */
5676static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5677							 void *ptr)
5678{
5679	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5680	struct cnic_dev *dev;
5681	int new_dev = 0;
5682
5683	dev = cnic_from_netdev(netdev);
5684
5685	if (!dev && event == NETDEV_REGISTER) {
5686		/* Check for the hot-plug device */
5687		dev = is_cnic_dev(netdev);
5688		if (dev) {
5689			new_dev = 1;
5690			cnic_hold(dev);
5691		}
5692	}
5693	if (dev) {
5694		struct cnic_local *cp = dev->cnic_priv;
5695
5696		if (new_dev)
5697			cnic_ulp_init(dev);
5698		else if (event == NETDEV_UNREGISTER)
5699			cnic_ulp_exit(dev);
5700
5701		if (event == NETDEV_UP) {
5702			if (cnic_register_netdev(dev) != 0) {
5703				cnic_put(dev);
5704				goto done;
5705			}
5706			if (!cnic_start_hw(dev))
5707				cnic_ulp_start(dev);
5708		}
5709
5710		cnic_rcv_netevent(cp, event, 0);
5711
5712		if (event == NETDEV_GOING_DOWN) {
5713			cnic_ulp_stop(dev);
5714			cnic_stop_hw(dev);
5715			cnic_unregister_netdev(dev);
5716		} else if (event == NETDEV_UNREGISTER) {
5717			write_lock(&cnic_dev_lock);
5718			list_del_init(&dev->list);
5719			write_unlock(&cnic_dev_lock);
5720
5721			cnic_put(dev);
5722			cnic_free_dev(dev);
5723			goto done;
5724		}
5725		cnic_put(dev);
5726	} else {
5727		struct net_device *realdev;
5728		u16 vid;
5729
5730		vid = cnic_get_vlan(netdev, &realdev);
5731		if (realdev) {
5732			dev = cnic_from_netdev(realdev);
5733			if (dev) {
5734				vid |= VLAN_CFI_MASK;	/* make non-zero */
5735				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5736				cnic_put(dev);
5737			}
5738		}
5739	}
5740done:
5741	return NOTIFY_DONE;
5742}
5743
5744static struct notifier_block cnic_netdev_notifier = {
5745	.notifier_call = cnic_netdev_event
5746};
5747
5748static void cnic_release(void)
5749{
5750	struct cnic_uio_dev *udev;
5751
5752	while (!list_empty(&cnic_udev_list)) {
5753		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5754				  list);
5755		cnic_free_uio(udev);
5756	}
5757}
5758
5759static int __init cnic_init(void)
5760{
5761	int rc = 0;
5762
5763	pr_info("%s", version);
5764
5765	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5766	if (rc) {
5767		cnic_release();
5768		return rc;
5769	}
5770
5771	cnic_wq = create_singlethread_workqueue("cnic_wq");
5772	if (!cnic_wq) {
5773		cnic_release();
5774		unregister_netdevice_notifier(&cnic_netdev_notifier);
5775		return -ENOMEM;
5776	}
5777
5778	return 0;
5779}
5780
5781static void __exit cnic_exit(void)
5782{
5783	unregister_netdevice_notifier(&cnic_netdev_notifier);
5784	cnic_release();
5785	destroy_workqueue(cnic_wq);
5786}
5787
5788module_init(cnic_init);
5789module_exit(cnic_exit);
v5.9
   1/* cnic.c: QLogic CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
  12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/module.h>
  18
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/pci.h>
  24#include <linux/init.h>
  25#include <linux/netdevice.h>
  26#include <linux/uio_driver.h>
  27#include <linux/in.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/delay.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/random.h>
  34#if IS_ENABLED(CONFIG_VLAN_8021Q)
  35#define BCM_VLAN 1
  36#endif
  37#include <net/ip.h>
  38#include <net/tcp.h>
  39#include <net/route.h>
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <net/ip6_checksum.h>
  43#include <scsi/iscsi_if.h>
  44
  45#define BCM_CNIC	1
  46#include "cnic_if.h"
  47#include "bnx2.h"
  48#include "bnx2x/bnx2x.h"
  49#include "bnx2x/bnx2x_reg.h"
  50#include "bnx2x/bnx2x_fw_defs.h"
  51#include "bnx2x/bnx2x_hsi.h"
  52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
  53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
  54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
  55#include "cnic.h"
  56#include "cnic_defs.h"
  57
  58#define CNIC_MODULE_NAME	"cnic"
  59
  60static char version[] =
  61	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  62
  63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  64	      "Chen (zongxi@broadcom.com");
  65MODULE_DESCRIPTION("QLogic cnic Driver");
  66MODULE_LICENSE("GPL");
  67MODULE_VERSION(CNIC_MODULE_VERSION);
  68
  69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  70static LIST_HEAD(cnic_dev_list);
  71static LIST_HEAD(cnic_udev_list);
  72static DEFINE_RWLOCK(cnic_dev_lock);
  73static DEFINE_MUTEX(cnic_lock);
  74
  75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  76
  77/* helper function, assuming cnic_lock is held */
  78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  79{
  80	return rcu_dereference_protected(cnic_ulp_tbl[type],
  81					 lockdep_is_held(&cnic_lock));
  82}
  83
  84static int cnic_service_bnx2(void *, void *);
  85static int cnic_service_bnx2x(void *, void *);
  86static int cnic_ctl(void *, struct cnic_ctl_info *);
  87
  88static struct cnic_ops cnic_bnx2_ops = {
  89	.cnic_owner	= THIS_MODULE,
  90	.cnic_handler	= cnic_service_bnx2,
  91	.cnic_ctl	= cnic_ctl,
  92};
  93
  94static struct cnic_ops cnic_bnx2x_ops = {
  95	.cnic_owner	= THIS_MODULE,
  96	.cnic_handler	= cnic_service_bnx2x,
  97	.cnic_ctl	= cnic_ctl,
  98};
  99
 100static struct workqueue_struct *cnic_wq;
 101
 102static void cnic_shutdown_rings(struct cnic_dev *);
 103static void cnic_init_rings(struct cnic_dev *);
 104static int cnic_cm_set_pg(struct cnic_sock *);
 105
 106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 107{
 108	struct cnic_uio_dev *udev = uinfo->priv;
 109	struct cnic_dev *dev;
 110
 111	if (!capable(CAP_NET_ADMIN))
 112		return -EPERM;
 113
 114	if (udev->uio_dev != -1)
 115		return -EBUSY;
 116
 117	rtnl_lock();
 118	dev = udev->dev;
 119
 120	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 121		rtnl_unlock();
 122		return -ENODEV;
 123	}
 124
 125	udev->uio_dev = iminor(inode);
 126
 127	cnic_shutdown_rings(dev);
 128	cnic_init_rings(dev);
 129	rtnl_unlock();
 130
 131	return 0;
 132}
 133
 134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 135{
 136	struct cnic_uio_dev *udev = uinfo->priv;
 137
 138	udev->uio_dev = -1;
 139	return 0;
 140}
 141
 142static inline void cnic_hold(struct cnic_dev *dev)
 143{
 144	atomic_inc(&dev->ref_count);
 145}
 146
 147static inline void cnic_put(struct cnic_dev *dev)
 148{
 149	atomic_dec(&dev->ref_count);
 150}
 151
 152static inline void csk_hold(struct cnic_sock *csk)
 153{
 154	atomic_inc(&csk->ref_count);
 155}
 156
 157static inline void csk_put(struct cnic_sock *csk)
 158{
 159	atomic_dec(&csk->ref_count);
 160}
 161
 162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 163{
 164	struct cnic_dev *cdev;
 165
 166	read_lock(&cnic_dev_lock);
 167	list_for_each_entry(cdev, &cnic_dev_list, list) {
 168		if (netdev == cdev->netdev) {
 169			cnic_hold(cdev);
 170			read_unlock(&cnic_dev_lock);
 171			return cdev;
 172		}
 173	}
 174	read_unlock(&cnic_dev_lock);
 175	return NULL;
 176}
 177
 178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 179{
 180	atomic_inc(&ulp_ops->ref_count);
 181}
 182
 183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 184{
 185	atomic_dec(&ulp_ops->ref_count);
 186}
 187
 188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 189{
 190	struct cnic_local *cp = dev->cnic_priv;
 191	struct cnic_eth_dev *ethdev = cp->ethdev;
 192	struct drv_ctl_info info;
 193	struct drv_ctl_io *io = &info.data.io;
 194
 195	memset(&info, 0, sizeof(struct drv_ctl_info));
 196	info.cmd = DRV_CTL_CTX_WR_CMD;
 197	io->cid_addr = cid_addr;
 198	io->offset = off;
 199	io->data = val;
 200	ethdev->drv_ctl(dev->netdev, &info);
 201}
 202
 203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 204{
 205	struct cnic_local *cp = dev->cnic_priv;
 206	struct cnic_eth_dev *ethdev = cp->ethdev;
 207	struct drv_ctl_info info;
 208	struct drv_ctl_io *io = &info.data.io;
 209
 210	memset(&info, 0, sizeof(struct drv_ctl_info));
 211	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 212	io->offset = off;
 213	io->dma_addr = addr;
 214	ethdev->drv_ctl(dev->netdev, &info);
 215}
 216
 217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 218{
 219	struct cnic_local *cp = dev->cnic_priv;
 220	struct cnic_eth_dev *ethdev = cp->ethdev;
 221	struct drv_ctl_info info;
 222	struct drv_ctl_l2_ring *ring = &info.data.ring;
 223
 224	memset(&info, 0, sizeof(struct drv_ctl_info));
 225	if (start)
 226		info.cmd = DRV_CTL_START_L2_CMD;
 227	else
 228		info.cmd = DRV_CTL_STOP_L2_CMD;
 229
 230	ring->cid = cid;
 231	ring->client_id = cl_id;
 232	ethdev->drv_ctl(dev->netdev, &info);
 233}
 234
 235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 236{
 237	struct cnic_local *cp = dev->cnic_priv;
 238	struct cnic_eth_dev *ethdev = cp->ethdev;
 239	struct drv_ctl_info info;
 240	struct drv_ctl_io *io = &info.data.io;
 241
 242	memset(&info, 0, sizeof(struct drv_ctl_info));
 243	info.cmd = DRV_CTL_IO_WR_CMD;
 244	io->offset = off;
 245	io->data = val;
 246	ethdev->drv_ctl(dev->netdev, &info);
 247}
 248
 249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 250{
 251	struct cnic_local *cp = dev->cnic_priv;
 252	struct cnic_eth_dev *ethdev = cp->ethdev;
 253	struct drv_ctl_info info;
 254	struct drv_ctl_io *io = &info.data.io;
 255
 256	memset(&info, 0, sizeof(struct drv_ctl_info));
 257	info.cmd = DRV_CTL_IO_RD_CMD;
 258	io->offset = off;
 259	ethdev->drv_ctl(dev->netdev, &info);
 260	return io->data;
 261}
 262
 263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 264{
 265	struct cnic_local *cp = dev->cnic_priv;
 266	struct cnic_eth_dev *ethdev = cp->ethdev;
 267	struct drv_ctl_info info;
 268	struct fcoe_capabilities *fcoe_cap =
 269		&info.data.register_data.fcoe_features;
 270
 271	memset(&info, 0, sizeof(struct drv_ctl_info));
 272	if (reg) {
 273		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 274		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
 275			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
 276	} else {
 277		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 278	}
 279
 280	info.data.ulp_type = ulp_type;
 281	info.drv_state = state;
 282	ethdev->drv_ctl(dev->netdev, &info);
 283}
 284
 285static int cnic_in_use(struct cnic_sock *csk)
 286{
 287	return test_bit(SK_F_INUSE, &csk->flags);
 288}
 289
 290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 291{
 292	struct cnic_local *cp = dev->cnic_priv;
 293	struct cnic_eth_dev *ethdev = cp->ethdev;
 294	struct drv_ctl_info info;
 295
 296	memset(&info, 0, sizeof(struct drv_ctl_info));
 297	info.cmd = cmd;
 298	info.data.credit.credit_count = count;
 299	ethdev->drv_ctl(dev->netdev, &info);
 300}
 301
 302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 303{
 304	u32 i;
 305
 306	if (!cp->ctx_tbl)
 307		return -EINVAL;
 308
 309	for (i = 0; i < cp->max_cid_space; i++) {
 310		if (cp->ctx_tbl[i].cid == cid) {
 311			*l5_cid = i;
 312			return 0;
 313		}
 314	}
 315	return -EINVAL;
 316}
 317
 318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 319			   struct cnic_sock *csk)
 320{
 321	struct iscsi_path path_req;
 322	char *buf = NULL;
 323	u16 len = 0;
 324	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 325	struct cnic_ulp_ops *ulp_ops;
 326	struct cnic_uio_dev *udev = cp->udev;
 327	int rc = 0, retry = 0;
 328
 329	if (!udev || udev->uio_dev == -1)
 330		return -ENODEV;
 331
 332	if (csk) {
 333		len = sizeof(path_req);
 334		buf = (char *) &path_req;
 335		memset(&path_req, 0, len);
 336
 337		msg_type = ISCSI_KEVENT_PATH_REQ;
 338		path_req.handle = (u64) csk->l5_cid;
 339		if (test_bit(SK_F_IPV6, &csk->flags)) {
 340			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 341			       sizeof(struct in6_addr));
 342			path_req.ip_addr_len = 16;
 343		} else {
 344			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 345			       sizeof(struct in_addr));
 346			path_req.ip_addr_len = 4;
 347		}
 348		path_req.vlan_id = csk->vlan_id;
 349		path_req.pmtu = csk->mtu;
 350	}
 351
 352	while (retry < 3) {
 353		rc = 0;
 354		rcu_read_lock();
 355		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
 356		if (ulp_ops)
 357			rc = ulp_ops->iscsi_nl_send_msg(
 358				cp->ulp_handle[CNIC_ULP_ISCSI],
 359				msg_type, buf, len);
 360		rcu_read_unlock();
 361		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 362			break;
 363
 364		msleep(100);
 365		retry++;
 366	}
 367	return rc;
 368}
 369
 370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 371
 372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 373				  char *buf, u16 len)
 374{
 375	int rc = -EINVAL;
 376
 377	switch (msg_type) {
 378	case ISCSI_UEVENT_PATH_UPDATE: {
 379		struct cnic_local *cp;
 380		u32 l5_cid;
 381		struct cnic_sock *csk;
 382		struct iscsi_path *path_resp;
 383
 384		if (len < sizeof(*path_resp))
 385			break;
 386
 387		path_resp = (struct iscsi_path *) buf;
 388		cp = dev->cnic_priv;
 389		l5_cid = (u32) path_resp->handle;
 390		if (l5_cid >= MAX_CM_SK_TBL_SZ)
 391			break;
 392
 393		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
 394			rc = -ENODEV;
 395			break;
 396		}
 397		csk = &cp->csk_tbl[l5_cid];
 398		csk_hold(csk);
 399		if (cnic_in_use(csk) &&
 400		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 401
 402			csk->vlan_id = path_resp->vlan_id;
 403
 404			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
 405			if (test_bit(SK_F_IPV6, &csk->flags))
 406				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 407				       sizeof(struct in6_addr));
 408			else
 409				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 410				       sizeof(struct in_addr));
 411
 412			if (is_valid_ether_addr(csk->ha)) {
 413				cnic_cm_set_pg(csk);
 414			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 415				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 416
 417				cnic_cm_upcall(cp, csk,
 418					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 419				clear_bit(SK_F_CONNECT_START, &csk->flags);
 420			}
 421		}
 422		csk_put(csk);
 423		rc = 0;
 424	}
 425	}
 426
 427	return rc;
 428}
 429
 430static int cnic_offld_prep(struct cnic_sock *csk)
 431{
 432	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 433		return 0;
 434
 435	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 436		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 437		return 0;
 438	}
 439
 440	return 1;
 441}
 442
 443static int cnic_close_prep(struct cnic_sock *csk)
 444{
 445	clear_bit(SK_F_CONNECT_START, &csk->flags);
 446	smp_mb__after_atomic();
 447
 448	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 449		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 450			msleep(1);
 451
 452		return 1;
 453	}
 454	return 0;
 455}
 456
 457static int cnic_abort_prep(struct cnic_sock *csk)
 458{
 459	clear_bit(SK_F_CONNECT_START, &csk->flags);
 460	smp_mb__after_atomic();
 461
 462	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 463		msleep(1);
 464
 465	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 466		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 467		return 1;
 468	}
 469
 470	return 0;
 471}
 472
 473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 474{
 475	struct cnic_dev *dev;
 476
 477	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 478		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 479		return -EINVAL;
 480	}
 481	mutex_lock(&cnic_lock);
 482	if (cnic_ulp_tbl_prot(ulp_type)) {
 483		pr_err("%s: Type %d has already been registered\n",
 484		       __func__, ulp_type);
 485		mutex_unlock(&cnic_lock);
 486		return -EBUSY;
 487	}
 488
 489	read_lock(&cnic_dev_lock);
 490	list_for_each_entry(dev, &cnic_dev_list, list) {
 491		struct cnic_local *cp = dev->cnic_priv;
 492
 493		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 494	}
 495	read_unlock(&cnic_dev_lock);
 496
 497	atomic_set(&ulp_ops->ref_count, 0);
 498	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 499	mutex_unlock(&cnic_lock);
 500
 501	/* Prevent race conditions with netdev_event */
 502	rtnl_lock();
 503	list_for_each_entry(dev, &cnic_dev_list, list) {
 504		struct cnic_local *cp = dev->cnic_priv;
 505
 506		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 507			ulp_ops->cnic_init(dev);
 508	}
 509	rtnl_unlock();
 510
 511	return 0;
 512}
 513
 514int cnic_unregister_driver(int ulp_type)
 515{
 516	struct cnic_dev *dev;
 517	struct cnic_ulp_ops *ulp_ops;
 518	int i = 0;
 519
 520	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 521		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 522		return -EINVAL;
 523	}
 524	mutex_lock(&cnic_lock);
 525	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 526	if (!ulp_ops) {
 527		pr_err("%s: Type %d has not been registered\n",
 528		       __func__, ulp_type);
 529		goto out_unlock;
 530	}
 531	read_lock(&cnic_dev_lock);
 532	list_for_each_entry(dev, &cnic_dev_list, list) {
 533		struct cnic_local *cp = dev->cnic_priv;
 534
 535		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 536			pr_err("%s: Type %d still has devices registered\n",
 537			       __func__, ulp_type);
 538			read_unlock(&cnic_dev_lock);
 539			goto out_unlock;
 540		}
 541	}
 542	read_unlock(&cnic_dev_lock);
 543
 544	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 545
 546	mutex_unlock(&cnic_lock);
 547	synchronize_rcu();
 548	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 549		msleep(100);
 550		i++;
 551	}
 552
 553	if (atomic_read(&ulp_ops->ref_count) != 0)
 554		pr_warn("%s: Failed waiting for ref count to go to zero\n",
 555			__func__);
 556	return 0;
 557
 558out_unlock:
 559	mutex_unlock(&cnic_lock);
 560	return -EINVAL;
 561}
 562
 563static int cnic_start_hw(struct cnic_dev *);
 564static void cnic_stop_hw(struct cnic_dev *);
 565
 566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 567				void *ulp_ctx)
 568{
 569	struct cnic_local *cp = dev->cnic_priv;
 570	struct cnic_ulp_ops *ulp_ops;
 571
 572	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 573		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 574		return -EINVAL;
 575	}
 576	mutex_lock(&cnic_lock);
 577	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 578		pr_err("%s: Driver with type %d has not been registered\n",
 579		       __func__, ulp_type);
 580		mutex_unlock(&cnic_lock);
 581		return -EAGAIN;
 582	}
 583	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 584		pr_err("%s: Type %d has already been registered to this device\n",
 585		       __func__, ulp_type);
 586		mutex_unlock(&cnic_lock);
 587		return -EBUSY;
 588	}
 589
 590	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 591	cp->ulp_handle[ulp_type] = ulp_ctx;
 592	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 593	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 594	cnic_hold(dev);
 595
 596	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 597		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 598			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 599
 600	mutex_unlock(&cnic_lock);
 601
 602	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 603
 604	return 0;
 605
 606}
 607EXPORT_SYMBOL(cnic_register_driver);
 608
 609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 610{
 611	struct cnic_local *cp = dev->cnic_priv;
 612	int i = 0;
 613
 614	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 615		pr_err("%s: Bad type %d\n", __func__, ulp_type);
 616		return -EINVAL;
 617	}
 618
 619	if (ulp_type == CNIC_ULP_ISCSI)
 620		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 621
 622	mutex_lock(&cnic_lock);
 623	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 624		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 625		cnic_put(dev);
 626	} else {
 627		pr_err("%s: device not registered to this ulp type %d\n",
 628		       __func__, ulp_type);
 629		mutex_unlock(&cnic_lock);
 630		return -EINVAL;
 631	}
 632	mutex_unlock(&cnic_lock);
 633
 634	if (ulp_type == CNIC_ULP_FCOE)
 635		dev->fcoe_cap = NULL;
 636
 637	synchronize_rcu();
 638
 639	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 640	       i < 20) {
 641		msleep(100);
 642		i++;
 643	}
 644	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 645		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 646
 647	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 648		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
 649	else
 650		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 651
 652	return 0;
 653}
 654EXPORT_SYMBOL(cnic_unregister_driver);
 655
 656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 657			    u32 next)
 658{
 659	id_tbl->start = start_id;
 660	id_tbl->max = size;
 661	id_tbl->next = next;
 662	spin_lock_init(&id_tbl->lock);
 663	id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
 664	if (!id_tbl->table)
 665		return -ENOMEM;
 666
 667	return 0;
 668}
 669
 670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 671{
 672	kfree(id_tbl->table);
 673	id_tbl->table = NULL;
 674}
 675
 676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 677{
 678	int ret = -1;
 679
 680	id -= id_tbl->start;
 681	if (id >= id_tbl->max)
 682		return ret;
 683
 684	spin_lock(&id_tbl->lock);
 685	if (!test_bit(id, id_tbl->table)) {
 686		set_bit(id, id_tbl->table);
 687		ret = 0;
 688	}
 689	spin_unlock(&id_tbl->lock);
 690	return ret;
 691}
 692
 693/* Returns -1 if not successful */
 694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 695{
 696	u32 id;
 697
 698	spin_lock(&id_tbl->lock);
 699	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 700	if (id >= id_tbl->max) {
 701		id = -1;
 702		if (id_tbl->next != 0) {
 703			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 704			if (id >= id_tbl->next)
 705				id = -1;
 706		}
 707	}
 708
 709	if (id < id_tbl->max) {
 710		set_bit(id, id_tbl->table);
 711		id_tbl->next = (id + 1) & (id_tbl->max - 1);
 712		id += id_tbl->start;
 713	}
 714
 715	spin_unlock(&id_tbl->lock);
 716
 717	return id;
 718}
 719
 720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 721{
 722	if (id == -1)
 723		return;
 724
 725	id -= id_tbl->start;
 726	if (id >= id_tbl->max)
 727		return;
 728
 729	clear_bit(id, id_tbl->table);
 730}
 731
 732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 733{
 734	int i;
 735
 736	if (!dma->pg_arr)
 737		return;
 738
 739	for (i = 0; i < dma->num_pages; i++) {
 740		if (dma->pg_arr[i]) {
 741			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
 742					  dma->pg_arr[i], dma->pg_map_arr[i]);
 743			dma->pg_arr[i] = NULL;
 744		}
 745	}
 746	if (dma->pgtbl) {
 747		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 748				  dma->pgtbl, dma->pgtbl_map);
 749		dma->pgtbl = NULL;
 750	}
 751	kfree(dma->pg_arr);
 752	dma->pg_arr = NULL;
 753	dma->num_pages = 0;
 754}
 755
 756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 757{
 758	int i;
 759	__le32 *page_table = (__le32 *) dma->pgtbl;
 760
 761	for (i = 0; i < dma->num_pages; i++) {
 762		/* Each entry needs to be in big endian format. */
 763		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 764		page_table++;
 765		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 766		page_table++;
 767	}
 768}
 769
 770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 771{
 772	int i;
 773	__le32 *page_table = (__le32 *) dma->pgtbl;
 774
 775	for (i = 0; i < dma->num_pages; i++) {
 776		/* Each entry needs to be in little endian format. */
 777		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 778		page_table++;
 779		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 780		page_table++;
 781	}
 782}
 783
 784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 785			  int pages, int use_pg_tbl)
 786{
 787	int i, size;
 788	struct cnic_local *cp = dev->cnic_priv;
 789
 790	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 791	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 792	if (dma->pg_arr == NULL)
 793		return -ENOMEM;
 794
 795	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 796	dma->num_pages = pages;
 797
 798	for (i = 0; i < pages; i++) {
 799		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 800						    CNIC_PAGE_SIZE,
 801						    &dma->pg_map_arr[i],
 802						    GFP_ATOMIC);
 803		if (dma->pg_arr[i] == NULL)
 804			goto error;
 805	}
 806	if (!use_pg_tbl)
 807		return 0;
 808
 809	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
 810			  ~(CNIC_PAGE_SIZE - 1);
 811	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 812					&dma->pgtbl_map, GFP_ATOMIC);
 813	if (dma->pgtbl == NULL)
 814		goto error;
 815
 816	cp->setup_pgtbl(dev, dma);
 817
 818	return 0;
 819
 820error:
 821	cnic_free_dma(dev, dma);
 822	return -ENOMEM;
 823}
 824
 825static void cnic_free_context(struct cnic_dev *dev)
 826{
 827	struct cnic_local *cp = dev->cnic_priv;
 828	int i;
 829
 830	for (i = 0; i < cp->ctx_blks; i++) {
 831		if (cp->ctx_arr[i].ctx) {
 832			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 833					  cp->ctx_arr[i].ctx,
 834					  cp->ctx_arr[i].mapping);
 835			cp->ctx_arr[i].ctx = NULL;
 836		}
 837	}
 838}
 839
 840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 841{
 842	if (udev->l2_buf) {
 843		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 844				  udev->l2_buf, udev->l2_buf_map);
 845		udev->l2_buf = NULL;
 846	}
 847
 848	if (udev->l2_ring) {
 849		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 850				  udev->l2_ring, udev->l2_ring_map);
 851		udev->l2_ring = NULL;
 852	}
 853
 854}
 855
 856static void __cnic_free_uio(struct cnic_uio_dev *udev)
 857{
 858	uio_unregister_device(&udev->cnic_uinfo);
 859
 860	__cnic_free_uio_rings(udev);
 861
 862	pci_dev_put(udev->pdev);
 863	kfree(udev);
 864}
 865
 866static void cnic_free_uio(struct cnic_uio_dev *udev)
 867{
 868	if (!udev)
 869		return;
 870
 871	write_lock(&cnic_dev_lock);
 872	list_del_init(&udev->list);
 873	write_unlock(&cnic_dev_lock);
 874	__cnic_free_uio(udev);
 875}
 876
 877static void cnic_free_resc(struct cnic_dev *dev)
 878{
 879	struct cnic_local *cp = dev->cnic_priv;
 880	struct cnic_uio_dev *udev = cp->udev;
 881
 882	if (udev) {
 883		udev->dev = NULL;
 884		cp->udev = NULL;
 885		if (udev->uio_dev == -1)
 886			__cnic_free_uio_rings(udev);
 887	}
 888
 889	cnic_free_context(dev);
 890	kfree(cp->ctx_arr);
 891	cp->ctx_arr = NULL;
 892	cp->ctx_blks = 0;
 893
 894	cnic_free_dma(dev, &cp->gbl_buf_info);
 895	cnic_free_dma(dev, &cp->kwq_info);
 896	cnic_free_dma(dev, &cp->kwq_16_data_info);
 897	cnic_free_dma(dev, &cp->kcq2.dma);
 898	cnic_free_dma(dev, &cp->kcq1.dma);
 899	kfree(cp->iscsi_tbl);
 900	cp->iscsi_tbl = NULL;
 901	kfree(cp->ctx_tbl);
 902	cp->ctx_tbl = NULL;
 903
 904	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 905	cnic_free_id_tbl(&cp->cid_tbl);
 906}
 907
 908static int cnic_alloc_context(struct cnic_dev *dev)
 909{
 910	struct cnic_local *cp = dev->cnic_priv;
 911
 912	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 913		int i, k, arr_size;
 914
 915		cp->ctx_blk_size = CNIC_PAGE_SIZE;
 916		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
 917		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 918			   sizeof(struct cnic_ctx);
 919		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 920		if (cp->ctx_arr == NULL)
 921			return -ENOMEM;
 922
 923		k = 0;
 924		for (i = 0; i < 2; i++) {
 925			u32 j, reg, off, lo, hi;
 926
 927			if (i == 0)
 928				off = BNX2_PG_CTX_MAP;
 929			else
 930				off = BNX2_ISCSI_CTX_MAP;
 931
 932			reg = cnic_reg_rd_ind(dev, off);
 933			lo = reg >> 16;
 934			hi = reg & 0xffff;
 935			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 936				cp->ctx_arr[k].cid = j;
 937		}
 938
 939		cp->ctx_blks = k;
 940		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 941			cp->ctx_blks = 0;
 942			return -ENOMEM;
 943		}
 944
 945		for (i = 0; i < cp->ctx_blks; i++) {
 946			cp->ctx_arr[i].ctx =
 947				dma_alloc_coherent(&dev->pcidev->dev,
 948						   CNIC_PAGE_SIZE,
 949						   &cp->ctx_arr[i].mapping,
 950						   GFP_KERNEL);
 951			if (cp->ctx_arr[i].ctx == NULL)
 952				return -ENOMEM;
 953		}
 954	}
 955	return 0;
 956}
 957
 958static u16 cnic_bnx2_next_idx(u16 idx)
 959{
 960	return idx + 1;
 961}
 962
 963static u16 cnic_bnx2_hw_idx(u16 idx)
 964{
 965	return idx;
 966}
 967
 968static u16 cnic_bnx2x_next_idx(u16 idx)
 969{
 970	idx++;
 971	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 972		idx++;
 973
 974	return idx;
 975}
 976
 977static u16 cnic_bnx2x_hw_idx(u16 idx)
 978{
 979	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 980		idx++;
 981	return idx;
 982}
 983
 984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 985			  bool use_pg_tbl)
 986{
 987	int err, i, use_page_tbl = 0;
 988	struct kcqe **kcq;
 989
 990	if (use_pg_tbl)
 991		use_page_tbl = 1;
 992
 993	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 994	if (err)
 995		return err;
 996
 997	kcq = (struct kcqe **) info->dma.pg_arr;
 998	info->kcq = kcq;
 999
1000	info->next_idx = cnic_bnx2_next_idx;
1001	info->hw_idx = cnic_bnx2_hw_idx;
1002	if (use_pg_tbl)
1003		return 0;
1004
1005	info->next_idx = cnic_bnx2x_next_idx;
1006	info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008	for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009		struct bnx2x_bd_chain_next *next =
1010			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011		int j = i + 1;
1012
1013		if (j >= KCQ_PAGE_CNT)
1014			j = 0;
1015		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017	}
1018	return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023	struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025	if (udev->l2_ring)
1026		return 0;
1027
1028	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030					   &udev->l2_ring_map,
1031					   GFP_KERNEL | __GFP_COMP);
1032	if (!udev->l2_ring)
1033		return -ENOMEM;
1034
1035	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1036	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1037	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1038					  &udev->l2_buf_map,
1039					  GFP_KERNEL | __GFP_COMP);
1040	if (!udev->l2_buf) {
1041		__cnic_free_uio_rings(udev);
1042		return -ENOMEM;
1043	}
1044
1045	return 0;
1046
1047}
1048
1049static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1050{
1051	struct cnic_local *cp = dev->cnic_priv;
1052	struct cnic_uio_dev *udev;
1053
1054	list_for_each_entry(udev, &cnic_udev_list, list) {
1055		if (udev->pdev == dev->pcidev) {
1056			udev->dev = dev;
1057			if (__cnic_alloc_uio_rings(udev, pages)) {
1058				udev->dev = NULL;
1059				return -ENOMEM;
1060			}
1061			cp->udev = udev;
1062			return 0;
1063		}
1064	}
1065
1066	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1067	if (!udev)
1068		return -ENOMEM;
1069
1070	udev->uio_dev = -1;
1071
1072	udev->dev = dev;
1073	udev->pdev = dev->pcidev;
1074
1075	if (__cnic_alloc_uio_rings(udev, pages))
1076		goto err_udev;
1077
1078	list_add(&udev->list, &cnic_udev_list);
1079
1080	pci_dev_get(udev->pdev);
1081
1082	cp->udev = udev;
1083
1084	return 0;
1085
1086 err_udev:
1087	kfree(udev);
1088	return -ENOMEM;
1089}
1090
1091static int cnic_init_uio(struct cnic_dev *dev)
1092{
1093	struct cnic_local *cp = dev->cnic_priv;
1094	struct cnic_uio_dev *udev = cp->udev;
1095	struct uio_info *uinfo;
1096	int ret = 0;
1097
1098	if (!udev)
1099		return -ENOMEM;
1100
1101	uinfo = &udev->cnic_uinfo;
1102
1103	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1104	uinfo->mem[0].internal_addr = dev->regview;
1105	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1106
1107	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1108		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1109						     TX_MAX_TSS_RINGS + 1);
1110		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1111					CNIC_PAGE_MASK;
1112		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1113			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1114		else
1115			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1116
1117		uinfo->name = "bnx2_cnic";
1118	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1119		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1120
1121		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1122			CNIC_PAGE_MASK;
1123		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1124
1125		uinfo->name = "bnx2x_cnic";
1126	}
1127
1128	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1129
1130	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1131	uinfo->mem[2].size = udev->l2_ring_size;
1132	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1133
1134	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1135	uinfo->mem[3].size = udev->l2_buf_size;
1136	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1137
1138	uinfo->version = CNIC_MODULE_VERSION;
1139	uinfo->irq = UIO_IRQ_CUSTOM;
1140
1141	uinfo->open = cnic_uio_open;
1142	uinfo->release = cnic_uio_close;
1143
1144	if (udev->uio_dev == -1) {
1145		if (!uinfo->priv) {
1146			uinfo->priv = udev;
1147
1148			ret = uio_register_device(&udev->pdev->dev, uinfo);
1149		}
1150	} else {
1151		cnic_init_rings(dev);
1152	}
1153
1154	return ret;
1155}
1156
1157static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1158{
1159	struct cnic_local *cp = dev->cnic_priv;
1160	int ret;
1161
1162	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1163	if (ret)
1164		goto error;
1165	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1166
1167	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1168	if (ret)
1169		goto error;
1170
1171	ret = cnic_alloc_context(dev);
1172	if (ret)
1173		goto error;
1174
1175	ret = cnic_alloc_uio_rings(dev, 2);
1176	if (ret)
1177		goto error;
1178
1179	ret = cnic_init_uio(dev);
1180	if (ret)
1181		goto error;
1182
1183	return 0;
1184
1185error:
1186	cnic_free_resc(dev);
1187	return ret;
1188}
1189
1190static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1191{
1192	struct cnic_local *cp = dev->cnic_priv;
1193	struct bnx2x *bp = netdev_priv(dev->netdev);
1194	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1195	int total_mem, blks, i;
1196
1197	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1198	blks = total_mem / ctx_blk_size;
1199	if (total_mem % ctx_blk_size)
1200		blks++;
1201
1202	if (blks > cp->ethdev->ctx_tbl_len)
1203		return -ENOMEM;
1204
1205	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1206	if (cp->ctx_arr == NULL)
1207		return -ENOMEM;
1208
1209	cp->ctx_blks = blks;
1210	cp->ctx_blk_size = ctx_blk_size;
1211	if (!CHIP_IS_E1(bp))
1212		cp->ctx_align = 0;
1213	else
1214		cp->ctx_align = ctx_blk_size;
1215
1216	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1217
1218	for (i = 0; i < blks; i++) {
1219		cp->ctx_arr[i].ctx =
1220			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1221					   &cp->ctx_arr[i].mapping,
1222					   GFP_KERNEL);
1223		if (cp->ctx_arr[i].ctx == NULL)
1224			return -ENOMEM;
1225
1226		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1227			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1228				cnic_free_context(dev);
1229				cp->ctx_blk_size += cp->ctx_align;
1230				i = -1;
1231				continue;
1232			}
1233		}
1234	}
1235	return 0;
1236}
1237
1238static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1239{
1240	struct cnic_local *cp = dev->cnic_priv;
1241	struct bnx2x *bp = netdev_priv(dev->netdev);
1242	struct cnic_eth_dev *ethdev = cp->ethdev;
1243	u32 start_cid = ethdev->starting_cid;
1244	int i, j, n, ret, pages;
1245	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1246
1247	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1248	cp->iscsi_start_cid = start_cid;
1249	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1250
1251	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1252		cp->max_cid_space += dev->max_fcoe_conn;
1253		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1254		if (!cp->fcoe_init_cid)
1255			cp->fcoe_init_cid = 0x10;
1256	}
1257
1258	cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1259				GFP_KERNEL);
1260	if (!cp->iscsi_tbl)
1261		goto error;
1262
1263	cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1264			      GFP_KERNEL);
1265	if (!cp->ctx_tbl)
1266		goto error;
1267
1268	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1269		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1270		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1271	}
1272
1273	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1274		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1275
1276	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1277		CNIC_PAGE_SIZE;
1278
1279	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1280	if (ret)
1281		goto error;
1282
1283	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1284	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1285		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1286
1287		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1288		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1289						   off;
1290
1291		if ((i % n) == (n - 1))
1292			j++;
1293	}
1294
1295	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1296	if (ret)
1297		goto error;
1298
1299	if (CNIC_SUPPORTS_FCOE(bp)) {
1300		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1301		if (ret)
1302			goto error;
1303	}
1304
1305	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1306	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1307	if (ret)
1308		goto error;
1309
1310	ret = cnic_alloc_bnx2x_context(dev);
1311	if (ret)
1312		goto error;
1313
1314	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1315		return 0;
1316
1317	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1318
1319	cp->l2_rx_ring_size = 15;
1320
1321	ret = cnic_alloc_uio_rings(dev, 4);
1322	if (ret)
1323		goto error;
1324
1325	ret = cnic_init_uio(dev);
1326	if (ret)
1327		goto error;
1328
1329	return 0;
1330
1331error:
1332	cnic_free_resc(dev);
1333	return -ENOMEM;
1334}
1335
1336static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1337{
1338	return cp->max_kwq_idx -
1339		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1340}
1341
1342static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1343				  u32 num_wqes)
1344{
1345	struct cnic_local *cp = dev->cnic_priv;
1346	struct kwqe *prod_qe;
1347	u16 prod, sw_prod, i;
1348
1349	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1350		return -EAGAIN;		/* bnx2 is down */
1351
1352	spin_lock_bh(&cp->cnic_ulp_lock);
1353	if (num_wqes > cnic_kwq_avail(cp) &&
1354	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1355		spin_unlock_bh(&cp->cnic_ulp_lock);
1356		return -EAGAIN;
1357	}
1358
1359	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1360
1361	prod = cp->kwq_prod_idx;
1362	sw_prod = prod & MAX_KWQ_IDX;
1363	for (i = 0; i < num_wqes; i++) {
1364		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1365		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1366		prod++;
1367		sw_prod = prod & MAX_KWQ_IDX;
1368	}
1369	cp->kwq_prod_idx = prod;
1370
1371	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1372
1373	spin_unlock_bh(&cp->cnic_ulp_lock);
1374	return 0;
1375}
1376
1377static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1378				   union l5cm_specific_data *l5_data)
1379{
1380	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1381	dma_addr_t map;
1382
1383	map = ctx->kwqe_data_mapping;
1384	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1385	l5_data->phy_address.hi = (u64) map >> 32;
1386	return ctx->kwqe_data;
1387}
1388
1389static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1390				u32 type, union l5cm_specific_data *l5_data)
1391{
1392	struct cnic_local *cp = dev->cnic_priv;
1393	struct bnx2x *bp = netdev_priv(dev->netdev);
1394	struct l5cm_spe kwqe;
1395	struct kwqe_16 *kwq[1];
1396	u16 type_16;
1397	int ret;
1398
1399	kwqe.hdr.conn_and_cmd_data =
1400		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1401			     BNX2X_HW_CID(bp, cid)));
1402
1403	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1404	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1405		   SPE_HDR_FUNCTION_ID;
1406
1407	kwqe.hdr.type = cpu_to_le16(type_16);
1408	kwqe.hdr.reserved1 = 0;
1409	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1410	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1411
1412	kwq[0] = (struct kwqe_16 *) &kwqe;
1413
1414	spin_lock_bh(&cp->cnic_ulp_lock);
1415	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1416	spin_unlock_bh(&cp->cnic_ulp_lock);
1417
1418	if (ret == 1)
1419		return 0;
1420
1421	return ret;
1422}
1423
1424static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1425				   struct kcqe *cqes[], u32 num_cqes)
1426{
1427	struct cnic_local *cp = dev->cnic_priv;
1428	struct cnic_ulp_ops *ulp_ops;
1429
1430	rcu_read_lock();
1431	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1432	if (likely(ulp_ops)) {
1433		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1434					  cqes, num_cqes);
1435	}
1436	rcu_read_unlock();
1437}
1438
1439static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1440				       int en_tcp_dack)
1441{
1442	struct bnx2x *bp = netdev_priv(dev->netdev);
1443	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1444	u16 tstorm_flags = 0;
1445
1446	if (time_stamps) {
1447		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1448		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1449	}
1450	if (en_tcp_dack)
1451		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1452
1453	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1454		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1455
1456	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1457		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1458}
1459
1460static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1461{
1462	struct cnic_local *cp = dev->cnic_priv;
1463	struct bnx2x *bp = netdev_priv(dev->netdev);
1464	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1465	int hq_bds, pages;
1466	u32 pfid = bp->pfid;
1467
1468	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1469	cp->num_ccells = req1->num_ccells_per_conn;
1470	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1471			      cp->num_iscsi_tasks;
1472	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1473			BNX2X_ISCSI_R2TQE_SIZE;
1474	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1475	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1476	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1477	cp->num_cqs = req1->num_cqs;
1478
1479	if (!dev->max_iscsi_conn)
1480		return 0;
1481
1482	/* init Tstorm RAM */
1483	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1484		  req1->rq_num_wqes);
1485	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1486		  CNIC_PAGE_SIZE);
1487	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1488		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1489	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1490		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1491		  req1->num_tasks_per_conn);
1492
1493	/* init Ustorm RAM */
1494	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1495		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1496		  req1->rq_buffer_size);
1497	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1498		  CNIC_PAGE_SIZE);
1499	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1500		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1501	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1502		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1503		  req1->num_tasks_per_conn);
1504	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1505		  req1->rq_num_wqes);
1506	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1507		  req1->cq_num_wqes);
1508	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1509		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1510
1511	/* init Xstorm RAM */
1512	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1513		  CNIC_PAGE_SIZE);
1514	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1515		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1516	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1517		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1518		  req1->num_tasks_per_conn);
1519	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1520		  hq_bds);
1521	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1522		  req1->num_tasks_per_conn);
1523	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1524		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1525
1526	/* init Cstorm RAM */
1527	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1528		  CNIC_PAGE_SIZE);
1529	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1530		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1531	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1532		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1533		  req1->num_tasks_per_conn);
1534	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1535		  req1->cq_num_wqes);
1536	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1537		  hq_bds);
1538
1539	cnic_bnx2x_set_tcp_options(dev,
1540			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1541			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1542
1543	return 0;
1544}
1545
1546static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1547{
1548	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1549	struct bnx2x *bp = netdev_priv(dev->netdev);
1550	u32 pfid = bp->pfid;
1551	struct iscsi_kcqe kcqe;
1552	struct kcqe *cqes[1];
1553
1554	memset(&kcqe, 0, sizeof(kcqe));
1555	if (!dev->max_iscsi_conn) {
1556		kcqe.completion_status =
1557			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1558		goto done;
1559	}
1560
1561	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1563	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1564		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1565		req2->error_bit_map[1]);
1566
1567	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1568		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1569	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1571	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1572		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1573		req2->error_bit_map[1]);
1574
1575	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1576		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1577
1578	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1579
1580done:
1581	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1582	cqes[0] = (struct kcqe *) &kcqe;
1583	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1584
1585	return 0;
1586}
1587
1588static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1589{
1590	struct cnic_local *cp = dev->cnic_priv;
1591	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1592
1593	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1594		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1595
1596		cnic_free_dma(dev, &iscsi->hq_info);
1597		cnic_free_dma(dev, &iscsi->r2tq_info);
1598		cnic_free_dma(dev, &iscsi->task_array_info);
1599		cnic_free_id(&cp->cid_tbl, ctx->cid);
1600	} else {
1601		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1602	}
1603
1604	ctx->cid = 0;
1605}
1606
1607static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1608{
1609	u32 cid;
1610	int ret, pages;
1611	struct cnic_local *cp = dev->cnic_priv;
1612	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1613	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1614
1615	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1616		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1617		if (cid == -1) {
1618			ret = -ENOMEM;
1619			goto error;
1620		}
1621		ctx->cid = cid;
1622		return 0;
1623	}
1624
1625	cid = cnic_alloc_new_id(&cp->cid_tbl);
1626	if (cid == -1) {
1627		ret = -ENOMEM;
1628		goto error;
1629	}
1630
1631	ctx->cid = cid;
1632	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1633
1634	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1635	if (ret)
1636		goto error;
1637
1638	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1639	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1640	if (ret)
1641		goto error;
1642
1643	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1644	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1645	if (ret)
1646		goto error;
1647
1648	return 0;
1649
1650error:
1651	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1652	return ret;
1653}
1654
1655static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1656				struct regpair *ctx_addr)
1657{
1658	struct cnic_local *cp = dev->cnic_priv;
1659	struct cnic_eth_dev *ethdev = cp->ethdev;
1660	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1661	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1662	unsigned long align_off = 0;
1663	dma_addr_t ctx_map;
1664	void *ctx;
1665
1666	if (cp->ctx_align) {
1667		unsigned long mask = cp->ctx_align - 1;
1668
1669		if (cp->ctx_arr[blk].mapping & mask)
1670			align_off = cp->ctx_align -
1671				    (cp->ctx_arr[blk].mapping & mask);
1672	}
1673	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1674		(off * BNX2X_CONTEXT_MEM_SIZE);
1675	ctx = cp->ctx_arr[blk].ctx + align_off +
1676	      (off * BNX2X_CONTEXT_MEM_SIZE);
1677	if (init)
1678		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1679
1680	ctx_addr->lo = ctx_map & 0xffffffff;
1681	ctx_addr->hi = (u64) ctx_map >> 32;
1682	return ctx;
1683}
1684
1685static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1686				u32 num)
1687{
1688	struct cnic_local *cp = dev->cnic_priv;
1689	struct bnx2x *bp = netdev_priv(dev->netdev);
1690	struct iscsi_kwqe_conn_offload1 *req1 =
1691			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1692	struct iscsi_kwqe_conn_offload2 *req2 =
1693			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1694	struct iscsi_kwqe_conn_offload3 *req3;
1695	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1696	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1697	u32 cid = ctx->cid;
1698	u32 hw_cid = BNX2X_HW_CID(bp, cid);
1699	struct iscsi_context *ictx;
1700	struct regpair context_addr;
1701	int i, j, n = 2, n_max;
1702	u8 port = BP_PORT(bp);
1703
1704	ctx->ctx_flags = 0;
1705	if (!req2->num_additional_wqes)
1706		return -EINVAL;
1707
1708	n_max = req2->num_additional_wqes + 2;
1709
1710	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1711	if (ictx == NULL)
1712		return -ENOMEM;
1713
1714	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1715
1716	ictx->xstorm_ag_context.hq_prod = 1;
1717
1718	ictx->xstorm_st_context.iscsi.first_burst_length =
1719		ISCSI_DEF_FIRST_BURST_LEN;
1720	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1721		ISCSI_DEF_MAX_RECV_SEG_LEN;
1722	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1723		req1->sq_page_table_addr_lo;
1724	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1725		req1->sq_page_table_addr_hi;
1726	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1727	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1728	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1729		iscsi->hq_info.pgtbl_map & 0xffffffff;
1730	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1731		(u64) iscsi->hq_info.pgtbl_map >> 32;
1732	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1733		iscsi->hq_info.pgtbl[0];
1734	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1735		iscsi->hq_info.pgtbl[1];
1736	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1737		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1738	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1739		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1740	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1741		iscsi->r2tq_info.pgtbl[0];
1742	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1743		iscsi->r2tq_info.pgtbl[1];
1744	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1745		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1746	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1747		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1748	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1749		BNX2X_ISCSI_PBL_NOT_CACHED;
1750	ictx->xstorm_st_context.iscsi.flags.flags |=
1751		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1752	ictx->xstorm_st_context.iscsi.flags.flags |=
1753		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1754	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1755		ETH_P_8021Q;
1756	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1757	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1758
1759		port = 0;
1760	}
1761	ictx->xstorm_st_context.common.flags =
1762		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1763	ictx->xstorm_st_context.common.flags =
1764		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1765
1766	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1767	/* TSTORM requires the base address of RQ DB & not PTE */
1768	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1769		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1770	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1771		req2->rq_page_table_addr_hi;
1772	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1773	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1774	ictx->tstorm_st_context.tcp.flags2 |=
1775		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1776	ictx->tstorm_st_context.tcp.ooo_support_mode =
1777		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1778
1779	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1780
1781	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1782		req2->rq_page_table_addr_lo;
1783	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1784		req2->rq_page_table_addr_hi;
1785	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1786	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1787	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1788		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1789	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1790		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1791	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1792		iscsi->r2tq_info.pgtbl[0];
1793	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1794		iscsi->r2tq_info.pgtbl[1];
1795	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1796		req1->cq_page_table_addr_lo;
1797	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1798		req1->cq_page_table_addr_hi;
1799	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1800	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1801	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1802	ictx->ustorm_st_context.task_pbe_cache_index =
1803		BNX2X_ISCSI_PBL_NOT_CACHED;
1804	ictx->ustorm_st_context.task_pdu_cache_index =
1805		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1806
1807	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1808		if (j == 3) {
1809			if (n >= n_max)
1810				break;
1811			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1812			j = 0;
1813		}
1814		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1815		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1816			req3->qp_first_pte[j].hi;
1817		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1818			req3->qp_first_pte[j].lo;
1819	}
1820
1821	ictx->ustorm_st_context.task_pbl_base.lo =
1822		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1823	ictx->ustorm_st_context.task_pbl_base.hi =
1824		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1825	ictx->ustorm_st_context.tce_phy_addr.lo =
1826		iscsi->task_array_info.pgtbl[0];
1827	ictx->ustorm_st_context.tce_phy_addr.hi =
1828		iscsi->task_array_info.pgtbl[1];
1829	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1830	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1831	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1832	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1833		ISCSI_DEF_MAX_BURST_LEN;
1834	ictx->ustorm_st_context.negotiated_rx |=
1835		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1836		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1837
1838	ictx->cstorm_st_context.hq_pbl_base.lo =
1839		iscsi->hq_info.pgtbl_map & 0xffffffff;
1840	ictx->cstorm_st_context.hq_pbl_base.hi =
1841		(u64) iscsi->hq_info.pgtbl_map >> 32;
1842	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1843	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1844	ictx->cstorm_st_context.task_pbl_base.lo =
1845		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1846	ictx->cstorm_st_context.task_pbl_base.hi =
1847		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1848	/* CSTORM and USTORM initialization is different, CSTORM requires
1849	 * CQ DB base & not PTE addr */
1850	ictx->cstorm_st_context.cq_db_base.lo =
1851		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1852	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1853	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1854	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1855	for (i = 0; i < cp->num_cqs; i++) {
1856		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1857			ISCSI_INITIAL_SN;
1858		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1859			ISCSI_INITIAL_SN;
1860	}
1861
1862	ictx->xstorm_ag_context.cdu_reserved =
1863		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1864				       ISCSI_CONNECTION_TYPE);
1865	ictx->ustorm_ag_context.cdu_usage =
1866		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1867				       ISCSI_CONNECTION_TYPE);
1868	return 0;
1869
1870}
1871
1872static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1873				   u32 num, int *work)
1874{
1875	struct iscsi_kwqe_conn_offload1 *req1;
1876	struct iscsi_kwqe_conn_offload2 *req2;
1877	struct cnic_local *cp = dev->cnic_priv;
1878	struct bnx2x *bp = netdev_priv(dev->netdev);
1879	struct cnic_context *ctx;
1880	struct iscsi_kcqe kcqe;
1881	struct kcqe *cqes[1];
1882	u32 l5_cid;
1883	int ret = 0;
1884
1885	if (num < 2) {
1886		*work = num;
1887		return -EINVAL;
1888	}
1889
1890	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1891	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1892	if ((num - 2) < req2->num_additional_wqes) {
1893		*work = num;
1894		return -EINVAL;
1895	}
1896	*work = 2 + req2->num_additional_wqes;
1897
1898	l5_cid = req1->iscsi_conn_id;
1899	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1900		return -EINVAL;
1901
1902	memset(&kcqe, 0, sizeof(kcqe));
1903	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1904	kcqe.iscsi_conn_id = l5_cid;
1905	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1906
1907	ctx = &cp->ctx_tbl[l5_cid];
1908	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1909		kcqe.completion_status =
1910			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1911		goto done;
1912	}
1913
1914	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1915		atomic_dec(&cp->iscsi_conn);
1916		goto done;
1917	}
1918	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1919	if (ret) {
1920		atomic_dec(&cp->iscsi_conn);
1921		goto done;
1922	}
1923	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1924	if (ret < 0) {
1925		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1926		atomic_dec(&cp->iscsi_conn);
1927		goto done;
1928	}
1929
1930	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1931	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1932
1933done:
1934	cqes[0] = (struct kcqe *) &kcqe;
1935	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1936	return 0;
1937}
1938
1939
1940static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1941{
1942	struct cnic_local *cp = dev->cnic_priv;
1943	struct iscsi_kwqe_conn_update *req =
1944		(struct iscsi_kwqe_conn_update *) kwqe;
1945	void *data;
1946	union l5cm_specific_data l5_data;
1947	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1948	int ret;
1949
1950	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1951		return -EINVAL;
1952
1953	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1954	if (!data)
1955		return -ENOMEM;
1956
1957	memcpy(data, kwqe, sizeof(struct kwqe));
1958
1959	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1960			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1961	return ret;
1962}
1963
1964static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1965{
1966	struct cnic_local *cp = dev->cnic_priv;
1967	struct bnx2x *bp = netdev_priv(dev->netdev);
1968	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1969	union l5cm_specific_data l5_data;
1970	int ret;
1971	u32 hw_cid;
1972
1973	init_waitqueue_head(&ctx->waitq);
1974	ctx->wait_cond = 0;
1975	memset(&l5_data, 0, sizeof(l5_data));
1976	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1977
1978	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1979				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1980
1981	if (ret == 0) {
1982		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1983		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1984			return -EBUSY;
1985	}
1986
1987	return 0;
1988}
1989
1990static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1991{
1992	struct cnic_local *cp = dev->cnic_priv;
1993	struct iscsi_kwqe_conn_destroy *req =
1994		(struct iscsi_kwqe_conn_destroy *) kwqe;
1995	u32 l5_cid = req->reserved0;
1996	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1997	int ret = 0;
1998	struct iscsi_kcqe kcqe;
1999	struct kcqe *cqes[1];
2000
2001	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2002		goto skip_cfc_delete;
2003
2004	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2005		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2006
2007		if (delta > (2 * HZ))
2008			delta = 0;
2009
2010		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2011		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2012		goto destroy_reply;
2013	}
2014
2015	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2016
2017skip_cfc_delete:
2018	cnic_free_bnx2x_conn_resc(dev, l5_cid);
2019
2020	if (!ret) {
2021		atomic_dec(&cp->iscsi_conn);
2022		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2023	}
2024
2025destroy_reply:
2026	memset(&kcqe, 0, sizeof(kcqe));
2027	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2028	kcqe.iscsi_conn_id = l5_cid;
2029	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2030	kcqe.iscsi_conn_context_id = req->context_id;
2031
2032	cqes[0] = (struct kcqe *) &kcqe;
2033	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2034
2035	return 0;
2036}
2037
2038static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2039				      struct l4_kwq_connect_req1 *kwqe1,
2040				      struct l4_kwq_connect_req3 *kwqe3,
2041				      struct l5cm_active_conn_buffer *conn_buf)
2042{
2043	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2044	struct l5cm_xstorm_conn_buffer *xstorm_buf =
2045		&conn_buf->xstorm_conn_buffer;
2046	struct l5cm_tstorm_conn_buffer *tstorm_buf =
2047		&conn_buf->tstorm_conn_buffer;
2048	struct regpair context_addr;
2049	u32 cid = BNX2X_SW_CID(kwqe1->cid);
2050	struct in6_addr src_ip, dst_ip;
2051	int i;
2052	u32 *addrp;
2053
2054	addrp = (u32 *) &conn_addr->local_ip_addr;
2055	for (i = 0; i < 4; i++, addrp++)
2056		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2057
2058	addrp = (u32 *) &conn_addr->remote_ip_addr;
2059	for (i = 0; i < 4; i++, addrp++)
2060		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2061
2062	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2063
2064	xstorm_buf->context_addr.hi = context_addr.hi;
2065	xstorm_buf->context_addr.lo = context_addr.lo;
2066	xstorm_buf->mss = 0xffff;
2067	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2068	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2069		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2070	xstorm_buf->pseudo_header_checksum =
2071		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2072
2073	if (kwqe3->ka_timeout) {
2074		tstorm_buf->ka_enable = 1;
2075		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2076		tstorm_buf->ka_interval = kwqe3->ka_interval;
2077		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2078	}
2079	tstorm_buf->max_rt_time = 0xffffffff;
2080}
2081
2082static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2083{
2084	struct bnx2x *bp = netdev_priv(dev->netdev);
2085	u32 pfid = bp->pfid;
2086	u8 *mac = dev->mac_addr;
2087
2088	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2089		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2090	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2091		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2092	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2094	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2096	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2098	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2099		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2100
2101	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2103	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2104		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2105		 mac[4]);
2106	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2108	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2109		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2110		 mac[2]);
2111	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2112		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2113	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2114		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2115		 mac[0]);
2116}
2117
2118static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2119			      u32 num, int *work)
2120{
2121	struct cnic_local *cp = dev->cnic_priv;
2122	struct bnx2x *bp = netdev_priv(dev->netdev);
2123	struct l4_kwq_connect_req1 *kwqe1 =
2124		(struct l4_kwq_connect_req1 *) wqes[0];
2125	struct l4_kwq_connect_req3 *kwqe3;
2126	struct l5cm_active_conn_buffer *conn_buf;
2127	struct l5cm_conn_addr_params *conn_addr;
2128	union l5cm_specific_data l5_data;
2129	u32 l5_cid = kwqe1->pg_cid;
2130	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2131	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2132	int ret;
2133
2134	if (num < 2) {
2135		*work = num;
2136		return -EINVAL;
2137	}
2138
2139	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2140		*work = 3;
2141	else
2142		*work = 2;
2143
2144	if (num < *work) {
2145		*work = num;
2146		return -EINVAL;
2147	}
2148
2149	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2150		netdev_err(dev->netdev, "conn_buf size too big\n");
2151		return -ENOMEM;
2152	}
2153	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2154	if (!conn_buf)
2155		return -ENOMEM;
2156
2157	memset(conn_buf, 0, sizeof(*conn_buf));
2158
2159	conn_addr = &conn_buf->conn_addr_buf;
2160	conn_addr->remote_addr_0 = csk->ha[0];
2161	conn_addr->remote_addr_1 = csk->ha[1];
2162	conn_addr->remote_addr_2 = csk->ha[2];
2163	conn_addr->remote_addr_3 = csk->ha[3];
2164	conn_addr->remote_addr_4 = csk->ha[4];
2165	conn_addr->remote_addr_5 = csk->ha[5];
2166
2167	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2168		struct l4_kwq_connect_req2 *kwqe2 =
2169			(struct l4_kwq_connect_req2 *) wqes[1];
2170
2171		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2172		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2173		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2174
2175		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2176		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2177		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2178		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2179	}
2180	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2181
2182	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2183	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2184	conn_addr->local_tcp_port = kwqe1->src_port;
2185	conn_addr->remote_tcp_port = kwqe1->dst_port;
2186
2187	conn_addr->pmtu = kwqe3->pmtu;
2188	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2189
2190	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2191		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2192
2193	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2194			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2195	if (!ret)
2196		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2197
2198	return ret;
2199}
2200
2201static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2202{
2203	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2204	union l5cm_specific_data l5_data;
2205	int ret;
2206
2207	memset(&l5_data, 0, sizeof(l5_data));
2208	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2209			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2210	return ret;
2211}
2212
2213static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2214{
2215	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2216	union l5cm_specific_data l5_data;
2217	int ret;
2218
2219	memset(&l5_data, 0, sizeof(l5_data));
2220	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2221			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2222	return ret;
2223}
2224static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2225{
2226	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2227	struct l4_kcq kcqe;
2228	struct kcqe *cqes[1];
2229
2230	memset(&kcqe, 0, sizeof(kcqe));
2231	kcqe.pg_host_opaque = req->host_opaque;
2232	kcqe.pg_cid = req->host_opaque;
2233	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2234	cqes[0] = (struct kcqe *) &kcqe;
2235	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2236	return 0;
2237}
2238
2239static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2240{
2241	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2242	struct l4_kcq kcqe;
2243	struct kcqe *cqes[1];
2244
2245	memset(&kcqe, 0, sizeof(kcqe));
2246	kcqe.pg_host_opaque = req->pg_host_opaque;
2247	kcqe.pg_cid = req->pg_cid;
2248	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2249	cqes[0] = (struct kcqe *) &kcqe;
2250	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2251	return 0;
2252}
2253
2254static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2255{
2256	struct fcoe_kwqe_stat *req;
2257	struct fcoe_stat_ramrod_params *fcoe_stat;
2258	union l5cm_specific_data l5_data;
2259	struct cnic_local *cp = dev->cnic_priv;
2260	struct bnx2x *bp = netdev_priv(dev->netdev);
2261	int ret;
2262	u32 cid;
2263
2264	req = (struct fcoe_kwqe_stat *) kwqe;
2265	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2266
2267	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2268	if (!fcoe_stat)
2269		return -ENOMEM;
2270
2271	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2272	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2273
2274	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2275				  FCOE_CONNECTION_TYPE, &l5_data);
2276	return ret;
2277}
2278
2279static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2280				 u32 num, int *work)
2281{
2282	int ret;
2283	struct cnic_local *cp = dev->cnic_priv;
2284	struct bnx2x *bp = netdev_priv(dev->netdev);
2285	u32 cid;
2286	struct fcoe_init_ramrod_params *fcoe_init;
2287	struct fcoe_kwqe_init1 *req1;
2288	struct fcoe_kwqe_init2 *req2;
2289	struct fcoe_kwqe_init3 *req3;
2290	union l5cm_specific_data l5_data;
2291
2292	if (num < 3) {
2293		*work = num;
2294		return -EINVAL;
2295	}
2296	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2297	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2298	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2299	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2300		*work = 1;
2301		return -EINVAL;
2302	}
2303	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2304		*work = 2;
2305		return -EINVAL;
2306	}
2307
2308	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2309		netdev_err(dev->netdev, "fcoe_init size too big\n");
2310		return -ENOMEM;
2311	}
2312	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2313	if (!fcoe_init)
2314		return -ENOMEM;
2315
2316	memset(fcoe_init, 0, sizeof(*fcoe_init));
2317	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2318	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2319	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2320	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2321	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2322	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2323
2324	fcoe_init->sb_num = cp->status_blk_num;
2325	fcoe_init->eq_prod = MAX_KCQ_IDX;
2326	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2327	cp->kcq2.sw_prod_idx = 0;
2328
2329	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2330	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2331				  FCOE_CONNECTION_TYPE, &l5_data);
2332	*work = 3;
2333	return ret;
2334}
2335
2336static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2337				 u32 num, int *work)
2338{
2339	int ret = 0;
2340	u32 cid = -1, l5_cid;
2341	struct cnic_local *cp = dev->cnic_priv;
2342	struct bnx2x *bp = netdev_priv(dev->netdev);
2343	struct fcoe_kwqe_conn_offload1 *req1;
2344	struct fcoe_kwqe_conn_offload2 *req2;
2345	struct fcoe_kwqe_conn_offload3 *req3;
2346	struct fcoe_kwqe_conn_offload4 *req4;
2347	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2348	struct cnic_context *ctx;
2349	struct fcoe_context *fctx;
2350	struct regpair ctx_addr;
2351	union l5cm_specific_data l5_data;
2352	struct fcoe_kcqe kcqe;
2353	struct kcqe *cqes[1];
2354
2355	if (num < 4) {
2356		*work = num;
2357		return -EINVAL;
2358	}
2359	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2360	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2361	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2362	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2363
2364	*work = 4;
2365
2366	l5_cid = req1->fcoe_conn_id;
2367	if (l5_cid >= dev->max_fcoe_conn)
2368		goto err_reply;
2369
2370	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2371
2372	ctx = &cp->ctx_tbl[l5_cid];
2373	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2374		goto err_reply;
2375
2376	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2377	if (ret) {
2378		ret = 0;
2379		goto err_reply;
2380	}
2381	cid = ctx->cid;
2382
2383	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2384	if (fctx) {
2385		u32 hw_cid = BNX2X_HW_CID(bp, cid);
2386		u32 val;
2387
2388		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2389					     FCOE_CONNECTION_TYPE);
2390		fctx->xstorm_ag_context.cdu_reserved = val;
2391		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2392					     FCOE_CONNECTION_TYPE);
2393		fctx->ustorm_ag_context.cdu_usage = val;
2394	}
2395	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2396		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2397		goto err_reply;
2398	}
2399	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2400	if (!fcoe_offload)
2401		goto err_reply;
2402
2403	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2404	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2405	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2406	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2407	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2408
2409	cid = BNX2X_HW_CID(bp, cid);
2410	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2411				  FCOE_CONNECTION_TYPE, &l5_data);
2412	if (!ret)
2413		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2414
2415	return ret;
2416
2417err_reply:
2418	if (cid != -1)
2419		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2420
2421	memset(&kcqe, 0, sizeof(kcqe));
2422	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2423	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2424	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2425
2426	cqes[0] = (struct kcqe *) &kcqe;
2427	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2428	return ret;
2429}
2430
2431static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2432{
2433	struct fcoe_kwqe_conn_enable_disable *req;
2434	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2435	union l5cm_specific_data l5_data;
2436	int ret;
2437	u32 cid, l5_cid;
2438	struct cnic_local *cp = dev->cnic_priv;
2439
2440	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2441	cid = req->context_id;
2442	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2443
2444	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2445		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2446		return -ENOMEM;
2447	}
2448	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2449	if (!fcoe_enable)
2450		return -ENOMEM;
2451
2452	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2453	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2454	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2455				  FCOE_CONNECTION_TYPE, &l5_data);
2456	return ret;
2457}
2458
2459static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2460{
2461	struct fcoe_kwqe_conn_enable_disable *req;
2462	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2463	union l5cm_specific_data l5_data;
2464	int ret;
2465	u32 cid, l5_cid;
2466	struct cnic_local *cp = dev->cnic_priv;
2467
2468	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2469	cid = req->context_id;
2470	l5_cid = req->conn_id;
2471	if (l5_cid >= dev->max_fcoe_conn)
2472		return -EINVAL;
2473
2474	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2475
2476	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2477		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2478		return -ENOMEM;
2479	}
2480	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2481	if (!fcoe_disable)
2482		return -ENOMEM;
2483
2484	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2485	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2486	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2487				  FCOE_CONNECTION_TYPE, &l5_data);
2488	return ret;
2489}
2490
2491static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2492{
2493	struct fcoe_kwqe_conn_destroy *req;
2494	union l5cm_specific_data l5_data;
2495	int ret;
2496	u32 cid, l5_cid;
2497	struct cnic_local *cp = dev->cnic_priv;
2498	struct cnic_context *ctx;
2499	struct fcoe_kcqe kcqe;
2500	struct kcqe *cqes[1];
2501
2502	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2503	cid = req->context_id;
2504	l5_cid = req->conn_id;
2505	if (l5_cid >= dev->max_fcoe_conn)
2506		return -EINVAL;
2507
2508	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2509
2510	ctx = &cp->ctx_tbl[l5_cid];
2511
2512	init_waitqueue_head(&ctx->waitq);
2513	ctx->wait_cond = 0;
2514
2515	memset(&kcqe, 0, sizeof(kcqe));
2516	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2517	memset(&l5_data, 0, sizeof(l5_data));
2518	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2519				  FCOE_CONNECTION_TYPE, &l5_data);
2520	if (ret == 0) {
2521		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2522		if (ctx->wait_cond)
2523			kcqe.completion_status = 0;
2524	}
2525
2526	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2527	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2528
2529	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2530	kcqe.fcoe_conn_id = req->conn_id;
2531	kcqe.fcoe_conn_context_id = cid;
2532
2533	cqes[0] = (struct kcqe *) &kcqe;
2534	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2535	return ret;
2536}
2537
2538static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2539{
2540	struct cnic_local *cp = dev->cnic_priv;
2541	u32 i;
2542
2543	for (i = start_cid; i < cp->max_cid_space; i++) {
2544		struct cnic_context *ctx = &cp->ctx_tbl[i];
2545		int j;
2546
2547		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2548			msleep(10);
2549
2550		for (j = 0; j < 5; j++) {
2551			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2552				break;
2553			msleep(20);
2554		}
2555
2556		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2557			netdev_warn(dev->netdev, "CID %x not deleted\n",
2558				   ctx->cid);
2559	}
2560}
2561
2562static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2563{
2564	union l5cm_specific_data l5_data;
2565	struct cnic_local *cp = dev->cnic_priv;
2566	struct bnx2x *bp = netdev_priv(dev->netdev);
2567	int ret;
2568	u32 cid;
2569
2570	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2571
2572	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2573
2574	memset(&l5_data, 0, sizeof(l5_data));
2575	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2576				  FCOE_CONNECTION_TYPE, &l5_data);
2577	return ret;
2578}
2579
2580static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2581{
2582	struct cnic_local *cp = dev->cnic_priv;
2583	struct kcqe kcqe;
2584	struct kcqe *cqes[1];
2585	u32 cid;
2586	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2587	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2588	u32 kcqe_op;
2589	int ulp_type;
2590
2591	cid = kwqe->kwqe_info0;
2592	memset(&kcqe, 0, sizeof(kcqe));
2593
2594	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2595		u32 l5_cid = 0;
2596
2597		ulp_type = CNIC_ULP_FCOE;
2598		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2599			struct fcoe_kwqe_conn_enable_disable *req;
2600
2601			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2602			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2603			cid = req->context_id;
2604			l5_cid = req->conn_id;
2605		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2606			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2607		} else {
2608			return;
2609		}
2610		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2611		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2612		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2613		kcqe.kcqe_info2 = cid;
2614		kcqe.kcqe_info0 = l5_cid;
2615
2616	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2617		ulp_type = CNIC_ULP_ISCSI;
2618		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2619			cid = kwqe->kwqe_info1;
2620
2621		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2622		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2623		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2624		kcqe.kcqe_info2 = cid;
2625		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2626
2627	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2628		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2629
2630		ulp_type = CNIC_ULP_L4;
2631		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2632			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2633		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2634			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2635		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2636			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2637		else
2638			return;
2639
2640		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2641				    KCQE_FLAGS_LAYER_MASK_L4;
2642		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2643		l4kcqe->cid = cid;
2644		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2645	} else {
2646		return;
2647	}
2648
2649	cqes[0] = &kcqe;
2650	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2651}
2652
2653static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2654					 struct kwqe *wqes[], u32 num_wqes)
2655{
2656	int i, work, ret;
2657	u32 opcode;
2658	struct kwqe *kwqe;
2659
2660	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2661		return -EAGAIN;		/* bnx2 is down */
2662
2663	for (i = 0; i < num_wqes; ) {
2664		kwqe = wqes[i];
2665		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2666		work = 1;
2667
2668		switch (opcode) {
2669		case ISCSI_KWQE_OPCODE_INIT1:
2670			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2671			break;
2672		case ISCSI_KWQE_OPCODE_INIT2:
2673			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2674			break;
2675		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2676			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2677						     num_wqes - i, &work);
2678			break;
2679		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2680			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2681			break;
2682		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2683			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2684			break;
2685		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2686			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2687						 &work);
2688			break;
2689		case L4_KWQE_OPCODE_VALUE_CLOSE:
2690			ret = cnic_bnx2x_close(dev, kwqe);
2691			break;
2692		case L4_KWQE_OPCODE_VALUE_RESET:
2693			ret = cnic_bnx2x_reset(dev, kwqe);
2694			break;
2695		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2696			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2697			break;
2698		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2699			ret = cnic_bnx2x_update_pg(dev, kwqe);
2700			break;
2701		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2702			ret = 0;
2703			break;
2704		default:
2705			ret = 0;
2706			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2707				   opcode);
2708			break;
2709		}
2710		if (ret < 0) {
2711			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2712				   opcode);
2713
2714			/* Possibly bnx2x parity error, send completion
2715			 * to ulp drivers with error code to speed up
2716			 * cleanup and reset recovery.
2717			 */
2718			if (ret == -EIO || ret == -EAGAIN)
2719				cnic_bnx2x_kwqe_err(dev, kwqe);
2720		}
2721		i += work;
2722	}
2723	return 0;
2724}
2725
2726static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2727					struct kwqe *wqes[], u32 num_wqes)
2728{
2729	struct bnx2x *bp = netdev_priv(dev->netdev);
2730	int i, work, ret;
2731	u32 opcode;
2732	struct kwqe *kwqe;
2733
2734	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2735		return -EAGAIN;		/* bnx2 is down */
2736
2737	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2738		return -EINVAL;
2739
2740	for (i = 0; i < num_wqes; ) {
2741		kwqe = wqes[i];
2742		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2743		work = 1;
2744
2745		switch (opcode) {
2746		case FCOE_KWQE_OPCODE_INIT1:
2747			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2748						    num_wqes - i, &work);
2749			break;
2750		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2751			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2752						    num_wqes - i, &work);
2753			break;
2754		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2755			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2756			break;
2757		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2758			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2759			break;
2760		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2761			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2762			break;
2763		case FCOE_KWQE_OPCODE_DESTROY:
2764			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2765			break;
2766		case FCOE_KWQE_OPCODE_STAT:
2767			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2768			break;
2769		default:
2770			ret = 0;
2771			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2772				   opcode);
2773			break;
2774		}
2775		if (ret < 0) {
2776			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2777				   opcode);
2778
2779			/* Possibly bnx2x parity error, send completion
2780			 * to ulp drivers with error code to speed up
2781			 * cleanup and reset recovery.
2782			 */
2783			if (ret == -EIO || ret == -EAGAIN)
2784				cnic_bnx2x_kwqe_err(dev, kwqe);
2785		}
2786		i += work;
2787	}
2788	return 0;
2789}
2790
2791static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2792				   u32 num_wqes)
2793{
2794	int ret = -EINVAL;
2795	u32 layer_code;
2796
2797	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2798		return -EAGAIN;		/* bnx2x is down */
2799
2800	if (!num_wqes)
2801		return 0;
2802
2803	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2804	switch (layer_code) {
2805	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2806	case KWQE_FLAGS_LAYER_MASK_L4:
2807	case KWQE_FLAGS_LAYER_MASK_L2:
2808		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2809		break;
2810
2811	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2812		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2813		break;
2814	}
2815	return ret;
2816}
2817
2818static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2819{
2820	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2821		return KCQE_FLAGS_LAYER_MASK_L4;
2822
2823	return opflag & KCQE_FLAGS_LAYER_MASK;
2824}
2825
2826static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2827{
2828	struct cnic_local *cp = dev->cnic_priv;
2829	int i, j, comp = 0;
2830
2831	i = 0;
2832	j = 1;
2833	while (num_cqes) {
2834		struct cnic_ulp_ops *ulp_ops;
2835		int ulp_type;
2836		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2837		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2838
2839		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2840			comp++;
2841
2842		while (j < num_cqes) {
2843			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2844
2845			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2846				break;
2847
2848			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2849				comp++;
2850			j++;
2851		}
2852
2853		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2854			ulp_type = CNIC_ULP_RDMA;
2855		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2856			ulp_type = CNIC_ULP_ISCSI;
2857		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2858			ulp_type = CNIC_ULP_FCOE;
2859		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2860			ulp_type = CNIC_ULP_L4;
2861		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2862			goto end;
2863		else {
2864			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2865				   kcqe_op_flag);
2866			goto end;
2867		}
2868
2869		rcu_read_lock();
2870		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2871		if (likely(ulp_ops)) {
2872			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2873						  cp->completed_kcq + i, j);
2874		}
2875		rcu_read_unlock();
2876end:
2877		num_cqes -= j;
2878		i += j;
2879		j = 1;
2880	}
2881	if (unlikely(comp))
2882		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2883}
2884
2885static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2886{
2887	struct cnic_local *cp = dev->cnic_priv;
2888	u16 i, ri, hw_prod, last;
2889	struct kcqe *kcqe;
2890	int kcqe_cnt = 0, last_cnt = 0;
2891
2892	i = ri = last = info->sw_prod_idx;
2893	ri &= MAX_KCQ_IDX;
2894	hw_prod = *info->hw_prod_idx_ptr;
2895	hw_prod = info->hw_idx(hw_prod);
2896
2897	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2898		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2899		cp->completed_kcq[kcqe_cnt++] = kcqe;
2900		i = info->next_idx(i);
2901		ri = i & MAX_KCQ_IDX;
2902		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2903			last_cnt = kcqe_cnt;
2904			last = i;
2905		}
2906	}
2907
2908	info->sw_prod_idx = last;
2909	return last_cnt;
2910}
2911
2912static int cnic_l2_completion(struct cnic_local *cp)
2913{
2914	u16 hw_cons, sw_cons;
2915	struct cnic_uio_dev *udev = cp->udev;
2916	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2917					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2918	u32 cmd;
2919	int comp = 0;
2920
2921	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2922		return 0;
2923
2924	hw_cons = *cp->rx_cons_ptr;
2925	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2926		hw_cons++;
2927
2928	sw_cons = cp->rx_cons;
2929	while (sw_cons != hw_cons) {
2930		u8 cqe_fp_flags;
2931
2932		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2933		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2934		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2935			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2936			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2937			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2938			    cmd == RAMROD_CMD_ID_ETH_HALT)
2939				comp++;
2940		}
2941		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2942	}
2943	return comp;
2944}
2945
2946static void cnic_chk_pkt_rings(struct cnic_local *cp)
2947{
2948	u16 rx_cons, tx_cons;
2949	int comp = 0;
2950
2951	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2952		return;
2953
2954	rx_cons = *cp->rx_cons_ptr;
2955	tx_cons = *cp->tx_cons_ptr;
2956	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2957		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2958			comp = cnic_l2_completion(cp);
2959
2960		cp->tx_cons = tx_cons;
2961		cp->rx_cons = rx_cons;
2962
2963		if (cp->udev)
2964			uio_event_notify(&cp->udev->cnic_uinfo);
2965	}
2966	if (comp)
2967		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2968}
2969
2970static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2971{
2972	struct cnic_local *cp = dev->cnic_priv;
2973	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2974	int kcqe_cnt;
2975
2976	/* status block index must be read before reading other fields */
2977	rmb();
2978	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2979
2980	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2981
2982		service_kcqes(dev, kcqe_cnt);
2983
2984		/* Tell compiler that status_blk fields can change. */
2985		barrier();
2986		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2987		/* status block index must be read first */
2988		rmb();
2989		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2990	}
2991
2992	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2993
2994	cnic_chk_pkt_rings(cp);
2995
2996	return status_idx;
2997}
2998
2999static int cnic_service_bnx2(void *data, void *status_blk)
3000{
3001	struct cnic_dev *dev = data;
3002
3003	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3004		struct status_block *sblk = status_blk;
3005
3006		return sblk->status_idx;
3007	}
3008
3009	return cnic_service_bnx2_queues(dev);
3010}
3011
3012static void cnic_service_bnx2_msix(unsigned long data)
3013{
3014	struct cnic_dev *dev = (struct cnic_dev *) data;
3015	struct cnic_local *cp = dev->cnic_priv;
3016
3017	cp->last_status_idx = cnic_service_bnx2_queues(dev);
3018
3019	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3020		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3021}
3022
3023static void cnic_doirq(struct cnic_dev *dev)
3024{
3025	struct cnic_local *cp = dev->cnic_priv;
3026
3027	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3028		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3029
3030		prefetch(cp->status_blk.gen);
3031		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3032
3033		tasklet_schedule(&cp->cnic_irq_task);
3034	}
3035}
3036
3037static irqreturn_t cnic_irq(int irq, void *dev_instance)
3038{
3039	struct cnic_dev *dev = dev_instance;
3040	struct cnic_local *cp = dev->cnic_priv;
3041
3042	if (cp->ack_int)
3043		cp->ack_int(dev);
3044
3045	cnic_doirq(dev);
3046
3047	return IRQ_HANDLED;
3048}
3049
3050static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3051				      u16 index, u8 op, u8 update)
3052{
3053	struct bnx2x *bp = netdev_priv(dev->netdev);
3054	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3055		       COMMAND_REG_INT_ACK);
3056	struct igu_ack_register igu_ack;
3057
3058	igu_ack.status_block_index = index;
3059	igu_ack.sb_id_and_flags =
3060			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3061			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3062			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3063			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3064
3065	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3066}
3067
3068static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3069			    u16 index, u8 op, u8 update)
3070{
3071	struct igu_regular cmd_data;
3072	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3073
3074	cmd_data.sb_id_and_flags =
3075		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3076		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3077		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3078		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3079
3080
3081	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3082}
3083
3084static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3085{
3086	struct cnic_local *cp = dev->cnic_priv;
3087
3088	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3089			   IGU_INT_DISABLE, 0);
3090}
3091
3092static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3093{
3094	struct cnic_local *cp = dev->cnic_priv;
3095
3096	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3097			IGU_INT_DISABLE, 0);
3098}
3099
3100static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3101{
3102	struct cnic_local *cp = dev->cnic_priv;
3103
3104	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3105			   IGU_INT_ENABLE, 1);
3106}
3107
3108static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3109{
3110	struct cnic_local *cp = dev->cnic_priv;
3111
3112	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3113			IGU_INT_ENABLE, 1);
3114}
3115
3116static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3117{
3118	u32 last_status = *info->status_idx_ptr;
3119	int kcqe_cnt;
3120
3121	/* status block index must be read before reading the KCQ */
3122	rmb();
3123	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3124
3125		service_kcqes(dev, kcqe_cnt);
3126
3127		/* Tell compiler that sblk fields can change. */
3128		barrier();
3129
3130		last_status = *info->status_idx_ptr;
3131		/* status block index must be read before reading the KCQ */
3132		rmb();
3133	}
3134	return last_status;
3135}
3136
3137static void cnic_service_bnx2x_bh(unsigned long data)
3138{
3139	struct cnic_dev *dev = (struct cnic_dev *) data;
3140	struct cnic_local *cp = dev->cnic_priv;
3141	struct bnx2x *bp = netdev_priv(dev->netdev);
3142	u32 status_idx, new_status_idx;
3143
3144	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3145		return;
3146
3147	while (1) {
3148		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3149
3150		CNIC_WR16(dev, cp->kcq1.io_addr,
3151			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3152
3153		if (!CNIC_SUPPORTS_FCOE(bp)) {
3154			cp->arm_int(dev, status_idx);
3155			break;
3156		}
3157
3158		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3159
3160		if (new_status_idx != status_idx)
3161			continue;
3162
3163		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3164			  MAX_KCQ_IDX);
3165
3166		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3167				status_idx, IGU_INT_ENABLE, 1);
3168
3169		break;
3170	}
3171}
3172
3173static int cnic_service_bnx2x(void *data, void *status_blk)
3174{
3175	struct cnic_dev *dev = data;
3176	struct cnic_local *cp = dev->cnic_priv;
3177
3178	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3179		cnic_doirq(dev);
3180
3181	cnic_chk_pkt_rings(cp);
3182
3183	return 0;
3184}
3185
3186static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3187{
3188	struct cnic_ulp_ops *ulp_ops;
3189
3190	if (if_type == CNIC_ULP_ISCSI)
3191		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3192
3193	mutex_lock(&cnic_lock);
3194	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3195					    lockdep_is_held(&cnic_lock));
3196	if (!ulp_ops) {
3197		mutex_unlock(&cnic_lock);
3198		return;
3199	}
3200	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3201	mutex_unlock(&cnic_lock);
3202
3203	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3204		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3205
3206	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3207}
3208
3209static void cnic_ulp_stop(struct cnic_dev *dev)
3210{
3211	struct cnic_local *cp = dev->cnic_priv;
3212	int if_type;
3213
3214	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3215		cnic_ulp_stop_one(cp, if_type);
3216}
3217
3218static void cnic_ulp_start(struct cnic_dev *dev)
3219{
3220	struct cnic_local *cp = dev->cnic_priv;
3221	int if_type;
3222
3223	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3224		struct cnic_ulp_ops *ulp_ops;
3225
3226		mutex_lock(&cnic_lock);
3227		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3228						    lockdep_is_held(&cnic_lock));
3229		if (!ulp_ops || !ulp_ops->cnic_start) {
3230			mutex_unlock(&cnic_lock);
3231			continue;
3232		}
3233		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3234		mutex_unlock(&cnic_lock);
3235
3236		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3237			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3238
3239		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3240	}
3241}
3242
3243static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3244{
3245	struct cnic_local *cp = dev->cnic_priv;
3246	struct cnic_ulp_ops *ulp_ops;
3247	int rc;
3248
3249	mutex_lock(&cnic_lock);
3250	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3251					    lockdep_is_held(&cnic_lock));
3252	if (ulp_ops && ulp_ops->cnic_get_stats)
3253		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3254	else
3255		rc = -ENODEV;
3256	mutex_unlock(&cnic_lock);
3257	return rc;
3258}
3259
3260static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3261{
3262	struct cnic_dev *dev = data;
3263	int ulp_type = CNIC_ULP_ISCSI;
3264
3265	switch (info->cmd) {
3266	case CNIC_CTL_STOP_CMD:
3267		cnic_hold(dev);
3268
3269		cnic_ulp_stop(dev);
3270		cnic_stop_hw(dev);
3271
3272		cnic_put(dev);
3273		break;
3274	case CNIC_CTL_START_CMD:
3275		cnic_hold(dev);
3276
3277		if (!cnic_start_hw(dev))
3278			cnic_ulp_start(dev);
3279
3280		cnic_put(dev);
3281		break;
3282	case CNIC_CTL_STOP_ISCSI_CMD: {
3283		struct cnic_local *cp = dev->cnic_priv;
3284		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3285		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3286		break;
3287	}
3288	case CNIC_CTL_COMPLETION_CMD: {
3289		struct cnic_ctl_completion *comp = &info->data.comp;
3290		u32 cid = BNX2X_SW_CID(comp->cid);
3291		u32 l5_cid;
3292		struct cnic_local *cp = dev->cnic_priv;
3293
3294		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3295			break;
3296
3297		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3298			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3299
3300			if (unlikely(comp->error)) {
3301				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3302				netdev_err(dev->netdev,
3303					   "CID %x CFC delete comp error %x\n",
3304					   cid, comp->error);
3305			}
3306
3307			ctx->wait_cond = 1;
3308			wake_up(&ctx->waitq);
3309		}
3310		break;
3311	}
3312	case CNIC_CTL_FCOE_STATS_GET_CMD:
3313		ulp_type = CNIC_ULP_FCOE;
3314		fallthrough;
3315	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3316		cnic_hold(dev);
3317		cnic_copy_ulp_stats(dev, ulp_type);
3318		cnic_put(dev);
3319		break;
3320
3321	default:
3322		return -EINVAL;
3323	}
3324	return 0;
3325}
3326
3327static void cnic_ulp_init(struct cnic_dev *dev)
3328{
3329	int i;
3330	struct cnic_local *cp = dev->cnic_priv;
3331
3332	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3333		struct cnic_ulp_ops *ulp_ops;
3334
3335		mutex_lock(&cnic_lock);
3336		ulp_ops = cnic_ulp_tbl_prot(i);
3337		if (!ulp_ops || !ulp_ops->cnic_init) {
3338			mutex_unlock(&cnic_lock);
3339			continue;
3340		}
3341		ulp_get(ulp_ops);
3342		mutex_unlock(&cnic_lock);
3343
3344		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3345			ulp_ops->cnic_init(dev);
3346
3347		ulp_put(ulp_ops);
3348	}
3349}
3350
3351static void cnic_ulp_exit(struct cnic_dev *dev)
3352{
3353	int i;
3354	struct cnic_local *cp = dev->cnic_priv;
3355
3356	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3357		struct cnic_ulp_ops *ulp_ops;
3358
3359		mutex_lock(&cnic_lock);
3360		ulp_ops = cnic_ulp_tbl_prot(i);
3361		if (!ulp_ops || !ulp_ops->cnic_exit) {
3362			mutex_unlock(&cnic_lock);
3363			continue;
3364		}
3365		ulp_get(ulp_ops);
3366		mutex_unlock(&cnic_lock);
3367
3368		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3369			ulp_ops->cnic_exit(dev);
3370
3371		ulp_put(ulp_ops);
3372	}
3373}
3374
3375static int cnic_cm_offload_pg(struct cnic_sock *csk)
3376{
3377	struct cnic_dev *dev = csk->dev;
3378	struct l4_kwq_offload_pg *l4kwqe;
3379	struct kwqe *wqes[1];
3380
3381	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3382	memset(l4kwqe, 0, sizeof(*l4kwqe));
3383	wqes[0] = (struct kwqe *) l4kwqe;
3384
3385	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3386	l4kwqe->flags =
3387		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3388	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3389
3390	l4kwqe->da0 = csk->ha[0];
3391	l4kwqe->da1 = csk->ha[1];
3392	l4kwqe->da2 = csk->ha[2];
3393	l4kwqe->da3 = csk->ha[3];
3394	l4kwqe->da4 = csk->ha[4];
3395	l4kwqe->da5 = csk->ha[5];
3396
3397	l4kwqe->sa0 = dev->mac_addr[0];
3398	l4kwqe->sa1 = dev->mac_addr[1];
3399	l4kwqe->sa2 = dev->mac_addr[2];
3400	l4kwqe->sa3 = dev->mac_addr[3];
3401	l4kwqe->sa4 = dev->mac_addr[4];
3402	l4kwqe->sa5 = dev->mac_addr[5];
3403
3404	l4kwqe->etype = ETH_P_IP;
3405	l4kwqe->ipid_start = DEF_IPID_START;
3406	l4kwqe->host_opaque = csk->l5_cid;
3407
3408	if (csk->vlan_id) {
3409		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3410		l4kwqe->vlan_tag = csk->vlan_id;
3411		l4kwqe->l2hdr_nbytes += 4;
3412	}
3413
3414	return dev->submit_kwqes(dev, wqes, 1);
3415}
3416
3417static int cnic_cm_update_pg(struct cnic_sock *csk)
3418{
3419	struct cnic_dev *dev = csk->dev;
3420	struct l4_kwq_update_pg *l4kwqe;
3421	struct kwqe *wqes[1];
3422
3423	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3424	memset(l4kwqe, 0, sizeof(*l4kwqe));
3425	wqes[0] = (struct kwqe *) l4kwqe;
3426
3427	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3428	l4kwqe->flags =
3429		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3430	l4kwqe->pg_cid = csk->pg_cid;
3431
3432	l4kwqe->da0 = csk->ha[0];
3433	l4kwqe->da1 = csk->ha[1];
3434	l4kwqe->da2 = csk->ha[2];
3435	l4kwqe->da3 = csk->ha[3];
3436	l4kwqe->da4 = csk->ha[4];
3437	l4kwqe->da5 = csk->ha[5];
3438
3439	l4kwqe->pg_host_opaque = csk->l5_cid;
3440	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3441
3442	return dev->submit_kwqes(dev, wqes, 1);
3443}
3444
3445static int cnic_cm_upload_pg(struct cnic_sock *csk)
3446{
3447	struct cnic_dev *dev = csk->dev;
3448	struct l4_kwq_upload *l4kwqe;
3449	struct kwqe *wqes[1];
3450
3451	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3452	memset(l4kwqe, 0, sizeof(*l4kwqe));
3453	wqes[0] = (struct kwqe *) l4kwqe;
3454
3455	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3456	l4kwqe->flags =
3457		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3458	l4kwqe->cid = csk->pg_cid;
3459
3460	return dev->submit_kwqes(dev, wqes, 1);
3461}
3462
3463static int cnic_cm_conn_req(struct cnic_sock *csk)
3464{
3465	struct cnic_dev *dev = csk->dev;
3466	struct l4_kwq_connect_req1 *l4kwqe1;
3467	struct l4_kwq_connect_req2 *l4kwqe2;
3468	struct l4_kwq_connect_req3 *l4kwqe3;
3469	struct kwqe *wqes[3];
3470	u8 tcp_flags = 0;
3471	int num_wqes = 2;
3472
3473	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3474	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3475	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3476	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3477	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3478	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3479
3480	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3481	l4kwqe3->flags =
3482		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3483	l4kwqe3->ka_timeout = csk->ka_timeout;
3484	l4kwqe3->ka_interval = csk->ka_interval;
3485	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3486	l4kwqe3->tos = csk->tos;
3487	l4kwqe3->ttl = csk->ttl;
3488	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3489	l4kwqe3->pmtu = csk->mtu;
3490	l4kwqe3->rcv_buf = csk->rcv_buf;
3491	l4kwqe3->snd_buf = csk->snd_buf;
3492	l4kwqe3->seed = csk->seed;
3493
3494	wqes[0] = (struct kwqe *) l4kwqe1;
3495	if (test_bit(SK_F_IPV6, &csk->flags)) {
3496		wqes[1] = (struct kwqe *) l4kwqe2;
3497		wqes[2] = (struct kwqe *) l4kwqe3;
3498		num_wqes = 3;
3499
3500		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3501		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3502		l4kwqe2->flags =
3503			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3504			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3505		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3506		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3507		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3508		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3509		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3510		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3511		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3512			       sizeof(struct tcphdr);
3513	} else {
3514		wqes[1] = (struct kwqe *) l4kwqe3;
3515		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3516			       sizeof(struct tcphdr);
3517	}
3518
3519	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3520	l4kwqe1->flags =
3521		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3522		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3523	l4kwqe1->cid = csk->cid;
3524	l4kwqe1->pg_cid = csk->pg_cid;
3525	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3526	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3527	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3528	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3529	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3530		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3531	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3532		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3533	if (csk->tcp_flags & SK_TCP_NAGLE)
3534		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3535	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3536		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3537	if (csk->tcp_flags & SK_TCP_SACK)
3538		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3539	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3540		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3541
3542	l4kwqe1->tcp_flags = tcp_flags;
3543
3544	return dev->submit_kwqes(dev, wqes, num_wqes);
3545}
3546
3547static int cnic_cm_close_req(struct cnic_sock *csk)
3548{
3549	struct cnic_dev *dev = csk->dev;
3550	struct l4_kwq_close_req *l4kwqe;
3551	struct kwqe *wqes[1];
3552
3553	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3554	memset(l4kwqe, 0, sizeof(*l4kwqe));
3555	wqes[0] = (struct kwqe *) l4kwqe;
3556
3557	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3558	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3559	l4kwqe->cid = csk->cid;
3560
3561	return dev->submit_kwqes(dev, wqes, 1);
3562}
3563
3564static int cnic_cm_abort_req(struct cnic_sock *csk)
3565{
3566	struct cnic_dev *dev = csk->dev;
3567	struct l4_kwq_reset_req *l4kwqe;
3568	struct kwqe *wqes[1];
3569
3570	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3571	memset(l4kwqe, 0, sizeof(*l4kwqe));
3572	wqes[0] = (struct kwqe *) l4kwqe;
3573
3574	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3575	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3576	l4kwqe->cid = csk->cid;
3577
3578	return dev->submit_kwqes(dev, wqes, 1);
3579}
3580
3581static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3582			  u32 l5_cid, struct cnic_sock **csk, void *context)
3583{
3584	struct cnic_local *cp = dev->cnic_priv;
3585	struct cnic_sock *csk1;
3586
3587	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3588		return -EINVAL;
3589
3590	if (cp->ctx_tbl) {
3591		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3592
3593		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3594			return -EAGAIN;
3595	}
3596
3597	csk1 = &cp->csk_tbl[l5_cid];
3598	if (atomic_read(&csk1->ref_count))
3599		return -EAGAIN;
3600
3601	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3602		return -EBUSY;
3603
3604	csk1->dev = dev;
3605	csk1->cid = cid;
3606	csk1->l5_cid = l5_cid;
3607	csk1->ulp_type = ulp_type;
3608	csk1->context = context;
3609
3610	csk1->ka_timeout = DEF_KA_TIMEOUT;
3611	csk1->ka_interval = DEF_KA_INTERVAL;
3612	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3613	csk1->tos = DEF_TOS;
3614	csk1->ttl = DEF_TTL;
3615	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3616	csk1->rcv_buf = DEF_RCV_BUF;
3617	csk1->snd_buf = DEF_SND_BUF;
3618	csk1->seed = DEF_SEED;
3619	csk1->tcp_flags = 0;
3620
3621	*csk = csk1;
3622	return 0;
3623}
3624
3625static void cnic_cm_cleanup(struct cnic_sock *csk)
3626{
3627	if (csk->src_port) {
3628		struct cnic_dev *dev = csk->dev;
3629		struct cnic_local *cp = dev->cnic_priv;
3630
3631		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3632		csk->src_port = 0;
3633	}
3634}
3635
3636static void cnic_close_conn(struct cnic_sock *csk)
3637{
3638	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3639		cnic_cm_upload_pg(csk);
3640		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3641	}
3642	cnic_cm_cleanup(csk);
3643}
3644
3645static int cnic_cm_destroy(struct cnic_sock *csk)
3646{
3647	if (!cnic_in_use(csk))
3648		return -EINVAL;
3649
3650	csk_hold(csk);
3651	clear_bit(SK_F_INUSE, &csk->flags);
3652	smp_mb__after_atomic();
3653	while (atomic_read(&csk->ref_count) != 1)
3654		msleep(1);
3655	cnic_cm_cleanup(csk);
3656
3657	csk->flags = 0;
3658	csk_put(csk);
3659	return 0;
3660}
3661
3662static inline u16 cnic_get_vlan(struct net_device *dev,
3663				struct net_device **vlan_dev)
3664{
3665	if (is_vlan_dev(dev)) {
3666		*vlan_dev = vlan_dev_real_dev(dev);
3667		return vlan_dev_vlan_id(dev);
3668	}
3669	*vlan_dev = dev;
3670	return 0;
3671}
3672
3673static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3674			     struct dst_entry **dst)
3675{
3676#if defined(CONFIG_INET)
3677	struct rtable *rt;
3678
3679	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3680	if (!IS_ERR(rt)) {
3681		*dst = &rt->dst;
3682		return 0;
3683	}
3684	return PTR_ERR(rt);
3685#else
3686	return -ENETUNREACH;
3687#endif
3688}
3689
3690static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3691			     struct dst_entry **dst)
3692{
3693#if IS_ENABLED(CONFIG_IPV6)
3694	struct flowi6 fl6;
3695
3696	memset(&fl6, 0, sizeof(fl6));
3697	fl6.daddr = dst_addr->sin6_addr;
3698	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3699		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3700
3701	*dst = ip6_route_output(&init_net, NULL, &fl6);
3702	if ((*dst)->error) {
3703		dst_release(*dst);
3704		*dst = NULL;
3705		return -ENETUNREACH;
3706	} else
3707		return 0;
3708#endif
3709
3710	return -ENETUNREACH;
3711}
3712
3713static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3714					   int ulp_type)
3715{
3716	struct cnic_dev *dev = NULL;
3717	struct dst_entry *dst;
3718	struct net_device *netdev = NULL;
3719	int err = -ENETUNREACH;
3720
3721	if (dst_addr->sin_family == AF_INET)
3722		err = cnic_get_v4_route(dst_addr, &dst);
3723	else if (dst_addr->sin_family == AF_INET6) {
3724		struct sockaddr_in6 *dst_addr6 =
3725			(struct sockaddr_in6 *) dst_addr;
3726
3727		err = cnic_get_v6_route(dst_addr6, &dst);
3728	} else
3729		return NULL;
3730
3731	if (err)
3732		return NULL;
3733
3734	if (!dst->dev)
3735		goto done;
3736
3737	cnic_get_vlan(dst->dev, &netdev);
3738
3739	dev = cnic_from_netdev(netdev);
3740
3741done:
3742	dst_release(dst);
3743	if (dev)
3744		cnic_put(dev);
3745	return dev;
3746}
3747
3748static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3749{
3750	struct cnic_dev *dev = csk->dev;
3751	struct cnic_local *cp = dev->cnic_priv;
3752
3753	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3754}
3755
3756static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3757{
3758	struct cnic_dev *dev = csk->dev;
3759	struct cnic_local *cp = dev->cnic_priv;
3760	int is_v6, rc = 0;
3761	struct dst_entry *dst = NULL;
3762	struct net_device *realdev;
3763	__be16 local_port;
3764	u32 port_id;
3765
3766	if (saddr->local.v6.sin6_family == AF_INET6 &&
3767	    saddr->remote.v6.sin6_family == AF_INET6)
3768		is_v6 = 1;
3769	else if (saddr->local.v4.sin_family == AF_INET &&
3770		 saddr->remote.v4.sin_family == AF_INET)
3771		is_v6 = 0;
3772	else
3773		return -EINVAL;
3774
3775	clear_bit(SK_F_IPV6, &csk->flags);
3776
3777	if (is_v6) {
3778		set_bit(SK_F_IPV6, &csk->flags);
3779		cnic_get_v6_route(&saddr->remote.v6, &dst);
3780
3781		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3782		       sizeof(struct in6_addr));
3783		csk->dst_port = saddr->remote.v6.sin6_port;
3784		local_port = saddr->local.v6.sin6_port;
3785
3786	} else {
3787		cnic_get_v4_route(&saddr->remote.v4, &dst);
3788
3789		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3790		csk->dst_port = saddr->remote.v4.sin_port;
3791		local_port = saddr->local.v4.sin_port;
3792	}
3793
3794	csk->vlan_id = 0;
3795	csk->mtu = dev->netdev->mtu;
3796	if (dst && dst->dev) {
3797		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3798		if (realdev == dev->netdev) {
3799			csk->vlan_id = vlan;
3800			csk->mtu = dst_mtu(dst);
3801		}
3802	}
3803
3804	port_id = be16_to_cpu(local_port);
3805	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3806	    port_id < CNIC_LOCAL_PORT_MAX) {
3807		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3808			port_id = 0;
3809	} else
3810		port_id = 0;
3811
3812	if (!port_id) {
3813		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3814		if (port_id == -1) {
3815			rc = -ENOMEM;
3816			goto err_out;
3817		}
3818		local_port = cpu_to_be16(port_id);
3819	}
3820	csk->src_port = local_port;
3821
3822err_out:
3823	dst_release(dst);
3824	return rc;
3825}
3826
3827static void cnic_init_csk_state(struct cnic_sock *csk)
3828{
3829	csk->state = 0;
3830	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3831	clear_bit(SK_F_CLOSING, &csk->flags);
3832}
3833
3834static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3835{
3836	struct cnic_local *cp = csk->dev->cnic_priv;
3837	int err = 0;
3838
3839	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3840		return -EOPNOTSUPP;
3841
3842	if (!cnic_in_use(csk))
3843		return -EINVAL;
3844
3845	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3846		return -EINVAL;
3847
3848	cnic_init_csk_state(csk);
3849
3850	err = cnic_get_route(csk, saddr);
3851	if (err)
3852		goto err_out;
3853
3854	err = cnic_resolve_addr(csk, saddr);
3855	if (!err)
3856		return 0;
3857
3858err_out:
3859	clear_bit(SK_F_CONNECT_START, &csk->flags);
3860	return err;
3861}
3862
3863static int cnic_cm_abort(struct cnic_sock *csk)
3864{
3865	struct cnic_local *cp = csk->dev->cnic_priv;
3866	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3867
3868	if (!cnic_in_use(csk))
3869		return -EINVAL;
3870
3871	if (cnic_abort_prep(csk))
3872		return cnic_cm_abort_req(csk);
3873
3874	/* Getting here means that we haven't started connect, or
3875	 * connect was not successful, or it has been reset by the target.
3876	 */
3877
3878	cp->close_conn(csk, opcode);
3879	if (csk->state != opcode) {
3880		/* Wait for remote reset sequence to complete */
3881		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3882			msleep(1);
3883
3884		return -EALREADY;
3885	}
3886
3887	return 0;
3888}
3889
3890static int cnic_cm_close(struct cnic_sock *csk)
3891{
3892	if (!cnic_in_use(csk))
3893		return -EINVAL;
3894
3895	if (cnic_close_prep(csk)) {
3896		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3897		return cnic_cm_close_req(csk);
3898	} else {
3899		/* Wait for remote reset sequence to complete */
3900		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3901			msleep(1);
3902
3903		return -EALREADY;
3904	}
3905	return 0;
3906}
3907
3908static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3909			   u8 opcode)
3910{
3911	struct cnic_ulp_ops *ulp_ops;
3912	int ulp_type = csk->ulp_type;
3913
3914	rcu_read_lock();
3915	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3916	if (ulp_ops) {
3917		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3918			ulp_ops->cm_connect_complete(csk);
3919		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3920			ulp_ops->cm_close_complete(csk);
3921		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3922			ulp_ops->cm_remote_abort(csk);
3923		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3924			ulp_ops->cm_abort_complete(csk);
3925		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3926			ulp_ops->cm_remote_close(csk);
3927	}
3928	rcu_read_unlock();
3929}
3930
3931static int cnic_cm_set_pg(struct cnic_sock *csk)
3932{
3933	if (cnic_offld_prep(csk)) {
3934		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3935			cnic_cm_update_pg(csk);
3936		else
3937			cnic_cm_offload_pg(csk);
3938	}
3939	return 0;
3940}
3941
3942static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3943{
3944	struct cnic_local *cp = dev->cnic_priv;
3945	u32 l5_cid = kcqe->pg_host_opaque;
3946	u8 opcode = kcqe->op_code;
3947	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3948
3949	csk_hold(csk);
3950	if (!cnic_in_use(csk))
3951		goto done;
3952
3953	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3954		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3955		goto done;
3956	}
3957	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3958	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3959		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3960		cnic_cm_upcall(cp, csk,
3961			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3962		goto done;
3963	}
3964
3965	csk->pg_cid = kcqe->pg_cid;
3966	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3967	cnic_cm_conn_req(csk);
3968
3969done:
3970	csk_put(csk);
3971}
3972
3973static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3974{
3975	struct cnic_local *cp = dev->cnic_priv;
3976	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3977	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3978	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3979
3980	ctx->timestamp = jiffies;
3981	ctx->wait_cond = 1;
3982	wake_up(&ctx->waitq);
3983}
3984
3985static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3986{
3987	struct cnic_local *cp = dev->cnic_priv;
3988	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3989	u8 opcode = l4kcqe->op_code;
3990	u32 l5_cid;
3991	struct cnic_sock *csk;
3992
3993	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3994		cnic_process_fcoe_term_conn(dev, kcqe);
3995		return;
3996	}
3997	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3998	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3999		cnic_cm_process_offld_pg(dev, l4kcqe);
4000		return;
4001	}
4002
4003	l5_cid = l4kcqe->conn_id;
4004	if (opcode & 0x80)
4005		l5_cid = l4kcqe->cid;
4006	if (l5_cid >= MAX_CM_SK_TBL_SZ)
4007		return;
4008
4009	csk = &cp->csk_tbl[l5_cid];
4010	csk_hold(csk);
4011
4012	if (!cnic_in_use(csk)) {
4013		csk_put(csk);
4014		return;
4015	}
4016
4017	switch (opcode) {
4018	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4019		if (l4kcqe->status != 0) {
4020			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4021			cnic_cm_upcall(cp, csk,
4022				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4023		}
4024		break;
4025	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4026		if (l4kcqe->status == 0)
4027			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4028		else if (l4kcqe->status ==
4029			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4030			set_bit(SK_F_HW_ERR, &csk->flags);
4031
4032		smp_mb__before_atomic();
4033		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4034		cnic_cm_upcall(cp, csk, opcode);
4035		break;
4036
4037	case L5CM_RAMROD_CMD_ID_CLOSE: {
4038		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4039
4040		if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4041			break;
4042
4043		netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4044			    l4kcqe->status, l5kcqe->completion_status);
4045		opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4046	}
4047		fallthrough;
4048	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4049	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4050	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4051	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4052	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4053		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4054			set_bit(SK_F_HW_ERR, &csk->flags);
4055
4056		cp->close_conn(csk, opcode);
4057		break;
4058
4059	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4060		/* after we already sent CLOSE_REQ */
4061		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4062		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4063		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4064			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4065		else
4066			cnic_cm_upcall(cp, csk, opcode);
4067		break;
4068	}
4069	csk_put(csk);
4070}
4071
4072static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4073{
4074	struct cnic_dev *dev = data;
4075	int i;
4076
4077	for (i = 0; i < num; i++)
4078		cnic_cm_process_kcqe(dev, kcqe[i]);
4079}
4080
4081static struct cnic_ulp_ops cm_ulp_ops = {
4082	.indicate_kcqes		= cnic_cm_indicate_kcqe,
4083};
4084
4085static void cnic_cm_free_mem(struct cnic_dev *dev)
4086{
4087	struct cnic_local *cp = dev->cnic_priv;
4088
4089	kvfree(cp->csk_tbl);
4090	cp->csk_tbl = NULL;
4091	cnic_free_id_tbl(&cp->csk_port_tbl);
4092}
4093
4094static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4095{
4096	struct cnic_local *cp = dev->cnic_priv;
4097	u32 port_id;
4098	int i;
4099
4100	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4101			       GFP_KERNEL);
4102	if (!cp->csk_tbl)
4103		return -ENOMEM;
4104
4105	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4106		atomic_set(&cp->csk_tbl[i].ref_count, 0);
4107
4108	port_id = prandom_u32();
4109	port_id %= CNIC_LOCAL_PORT_RANGE;
4110	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4111			     CNIC_LOCAL_PORT_MIN, port_id)) {
4112		cnic_cm_free_mem(dev);
4113		return -ENOMEM;
4114	}
4115	return 0;
4116}
4117
4118static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4119{
4120	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4121		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4122		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4123		csk->state = opcode;
4124	}
4125
4126	/* 1. If event opcode matches the expected event in csk->state
4127	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4128	 *    event
4129	 * 3. If the expected event is 0, meaning the connection was never
4130	 *    never established, we accept the opcode from cm_abort.
4131	 */
4132	if (opcode == csk->state || csk->state == 0 ||
4133	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4134	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4135		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4136			if (csk->state == 0)
4137				csk->state = opcode;
4138			return 1;
4139		}
4140	}
4141	return 0;
4142}
4143
4144static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4145{
4146	struct cnic_dev *dev = csk->dev;
4147	struct cnic_local *cp = dev->cnic_priv;
4148
4149	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4150		cnic_cm_upcall(cp, csk, opcode);
4151		return;
4152	}
4153
4154	clear_bit(SK_F_CONNECT_START, &csk->flags);
4155	cnic_close_conn(csk);
4156	csk->state = opcode;
4157	cnic_cm_upcall(cp, csk, opcode);
4158}
4159
4160static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4161{
4162}
4163
4164static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4165{
4166	u32 seed;
4167
4168	seed = prandom_u32();
4169	cnic_ctx_wr(dev, 45, 0, seed);
4170	return 0;
4171}
4172
4173static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4174{
4175	struct cnic_dev *dev = csk->dev;
4176	struct cnic_local *cp = dev->cnic_priv;
4177	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4178	union l5cm_specific_data l5_data;
4179	u32 cmd = 0;
4180	int close_complete = 0;
4181
4182	switch (opcode) {
4183	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4184	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4185	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4186		if (cnic_ready_to_close(csk, opcode)) {
4187			if (test_bit(SK_F_HW_ERR, &csk->flags))
4188				close_complete = 1;
4189			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4190				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4191			else
4192				close_complete = 1;
4193		}
4194		break;
4195	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4196		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4197		break;
4198	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4199		close_complete = 1;
4200		break;
4201	}
4202	if (cmd) {
4203		memset(&l5_data, 0, sizeof(l5_data));
4204
4205		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4206				    &l5_data);
4207	} else if (close_complete) {
4208		ctx->timestamp = jiffies;
4209		cnic_close_conn(csk);
4210		cnic_cm_upcall(cp, csk, csk->state);
4211	}
4212}
4213
4214static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4215{
4216	struct cnic_local *cp = dev->cnic_priv;
4217
4218	if (!cp->ctx_tbl)
4219		return;
4220
4221	if (!netif_running(dev->netdev))
4222		return;
4223
4224	cnic_bnx2x_delete_wait(dev, 0);
4225
4226	cancel_delayed_work(&cp->delete_task);
4227	flush_workqueue(cnic_wq);
4228
4229	if (atomic_read(&cp->iscsi_conn) != 0)
4230		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4231			    atomic_read(&cp->iscsi_conn));
4232}
4233
4234static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4235{
4236	struct bnx2x *bp = netdev_priv(dev->netdev);
4237	u32 pfid = bp->pfid;
4238	u32 port = BP_PORT(bp);
4239
4240	cnic_init_bnx2x_mac(dev);
4241	cnic_bnx2x_set_tcp_options(dev, 0, 1);
4242
4243	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4244		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4245
4246	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4247		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4248	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4249		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4250		DEF_MAX_DA_COUNT);
4251
4252	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4253		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4254	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4255		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4256	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4257		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4258	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4259		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4260
4261	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4262		DEF_MAX_CWND);
4263	return 0;
4264}
4265
4266static void cnic_delete_task(struct work_struct *work)
4267{
4268	struct cnic_local *cp;
4269	struct cnic_dev *dev;
4270	u32 i;
4271	int need_resched = 0;
4272
4273	cp = container_of(work, struct cnic_local, delete_task.work);
4274	dev = cp->dev;
4275
4276	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4277		struct drv_ctl_info info;
4278
4279		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4280
4281		memset(&info, 0, sizeof(struct drv_ctl_info));
4282		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4283		cp->ethdev->drv_ctl(dev->netdev, &info);
4284	}
4285
4286	for (i = 0; i < cp->max_cid_space; i++) {
4287		struct cnic_context *ctx = &cp->ctx_tbl[i];
4288		int err;
4289
4290		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4291		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4292			continue;
4293
4294		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4295			need_resched = 1;
4296			continue;
4297		}
4298
4299		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4300			continue;
4301
4302		err = cnic_bnx2x_destroy_ramrod(dev, i);
4303
4304		cnic_free_bnx2x_conn_resc(dev, i);
4305		if (!err) {
4306			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4307				atomic_dec(&cp->iscsi_conn);
4308
4309			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4310		}
4311	}
4312
4313	if (need_resched)
4314		queue_delayed_work(cnic_wq, &cp->delete_task,
4315				   msecs_to_jiffies(10));
4316
4317}
4318
4319static int cnic_cm_open(struct cnic_dev *dev)
4320{
4321	struct cnic_local *cp = dev->cnic_priv;
4322	int err;
4323
4324	err = cnic_cm_alloc_mem(dev);
4325	if (err)
4326		return err;
4327
4328	err = cp->start_cm(dev);
4329
4330	if (err)
4331		goto err_out;
4332
4333	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4334
4335	dev->cm_create = cnic_cm_create;
4336	dev->cm_destroy = cnic_cm_destroy;
4337	dev->cm_connect = cnic_cm_connect;
4338	dev->cm_abort = cnic_cm_abort;
4339	dev->cm_close = cnic_cm_close;
4340	dev->cm_select_dev = cnic_cm_select_dev;
4341
4342	cp->ulp_handle[CNIC_ULP_L4] = dev;
4343	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4344	return 0;
4345
4346err_out:
4347	cnic_cm_free_mem(dev);
4348	return err;
4349}
4350
4351static int cnic_cm_shutdown(struct cnic_dev *dev)
4352{
4353	struct cnic_local *cp = dev->cnic_priv;
4354	int i;
4355
4356	if (!cp->csk_tbl)
4357		return 0;
4358
4359	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4360		struct cnic_sock *csk = &cp->csk_tbl[i];
4361
4362		clear_bit(SK_F_INUSE, &csk->flags);
4363		cnic_cm_cleanup(csk);
4364	}
4365	cnic_cm_free_mem(dev);
4366
4367	return 0;
4368}
4369
4370static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4371{
4372	u32 cid_addr;
4373	int i;
4374
4375	cid_addr = GET_CID_ADDR(cid);
4376
4377	for (i = 0; i < CTX_SIZE; i += 4)
4378		cnic_ctx_wr(dev, cid_addr, i, 0);
4379}
4380
4381static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4382{
4383	struct cnic_local *cp = dev->cnic_priv;
4384	int ret = 0, i;
4385	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4386
4387	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4388		return 0;
4389
4390	for (i = 0; i < cp->ctx_blks; i++) {
4391		int j;
4392		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4393		u32 val;
4394
4395		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4396
4397		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4398			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4399		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4400			(u64) cp->ctx_arr[i].mapping >> 32);
4401		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4402			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4403		for (j = 0; j < 10; j++) {
4404
4405			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4406			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4407				break;
4408			udelay(5);
4409		}
4410		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4411			ret = -EBUSY;
4412			break;
4413		}
4414	}
4415	return ret;
4416}
4417
4418static void cnic_free_irq(struct cnic_dev *dev)
4419{
4420	struct cnic_local *cp = dev->cnic_priv;
4421	struct cnic_eth_dev *ethdev = cp->ethdev;
4422
4423	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4424		cp->disable_int_sync(dev);
4425		tasklet_kill(&cp->cnic_irq_task);
4426		free_irq(ethdev->irq_arr[0].vector, dev);
4427	}
4428}
4429
4430static int cnic_request_irq(struct cnic_dev *dev)
4431{
4432	struct cnic_local *cp = dev->cnic_priv;
4433	struct cnic_eth_dev *ethdev = cp->ethdev;
4434	int err;
4435
4436	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4437	if (err)
4438		tasklet_disable(&cp->cnic_irq_task);
4439
4440	return err;
4441}
4442
4443static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4444{
4445	struct cnic_local *cp = dev->cnic_priv;
4446	struct cnic_eth_dev *ethdev = cp->ethdev;
4447
4448	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4449		int err, i = 0;
4450		int sblk_num = cp->status_blk_num;
4451		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4452			   BNX2_HC_SB_CONFIG_1;
4453
4454		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4455
4456		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4457		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4458		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4459
4460		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4461		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4462			     (unsigned long) dev);
4463		err = cnic_request_irq(dev);
4464		if (err)
4465			return err;
4466
4467		while (cp->status_blk.bnx2->status_completion_producer_index &&
4468		       i < 10) {
4469			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4470				1 << (11 + sblk_num));
4471			udelay(10);
4472			i++;
4473			barrier();
4474		}
4475		if (cp->status_blk.bnx2->status_completion_producer_index) {
4476			cnic_free_irq(dev);
4477			goto failed;
4478		}
4479
4480	} else {
4481		struct status_block *sblk = cp->status_blk.gen;
4482		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4483		int i = 0;
4484
4485		while (sblk->status_completion_producer_index && i < 10) {
4486			CNIC_WR(dev, BNX2_HC_COMMAND,
4487				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4488			udelay(10);
4489			i++;
4490			barrier();
4491		}
4492		if (sblk->status_completion_producer_index)
4493			goto failed;
4494
4495	}
4496	return 0;
4497
4498failed:
4499	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4500	return -EBUSY;
4501}
4502
4503static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4504{
4505	struct cnic_local *cp = dev->cnic_priv;
4506	struct cnic_eth_dev *ethdev = cp->ethdev;
4507
4508	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4509		return;
4510
4511	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4512		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4513}
4514
4515static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4516{
4517	struct cnic_local *cp = dev->cnic_priv;
4518	struct cnic_eth_dev *ethdev = cp->ethdev;
4519
4520	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4521		return;
4522
4523	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4524		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4525	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4526	synchronize_irq(ethdev->irq_arr[0].vector);
4527}
4528
4529static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4530{
4531	struct cnic_local *cp = dev->cnic_priv;
4532	struct cnic_eth_dev *ethdev = cp->ethdev;
4533	struct cnic_uio_dev *udev = cp->udev;
4534	u32 cid_addr, tx_cid, sb_id;
4535	u32 val, offset0, offset1, offset2, offset3;
4536	int i;
4537	struct bnx2_tx_bd *txbd;
4538	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4539	struct status_block *s_blk = cp->status_blk.gen;
4540
4541	sb_id = cp->status_blk_num;
4542	tx_cid = 20;
4543	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4544	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4545		struct status_block_msix *sblk = cp->status_blk.bnx2;
4546
4547		tx_cid = TX_TSS_CID + sb_id - 1;
4548		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4549			(TX_TSS_CID << 7));
4550		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4551	}
4552	cp->tx_cons = *cp->tx_cons_ptr;
4553
4554	cid_addr = GET_CID_ADDR(tx_cid);
4555	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4556		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4557
4558		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4559			cnic_ctx_wr(dev, cid_addr2, i, 0);
4560
4561		offset0 = BNX2_L2CTX_TYPE_XI;
4562		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4563		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4564		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4565	} else {
4566		cnic_init_context(dev, tx_cid);
4567		cnic_init_context(dev, tx_cid + 1);
4568
4569		offset0 = BNX2_L2CTX_TYPE;
4570		offset1 = BNX2_L2CTX_CMD_TYPE;
4571		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4572		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4573	}
4574	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4575	cnic_ctx_wr(dev, cid_addr, offset0, val);
4576
4577	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4578	cnic_ctx_wr(dev, cid_addr, offset1, val);
4579
4580	txbd = udev->l2_ring;
4581
4582	buf_map = udev->l2_buf_map;
4583	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4584		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4585		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4586	}
4587	val = (u64) ring_map >> 32;
4588	cnic_ctx_wr(dev, cid_addr, offset2, val);
4589	txbd->tx_bd_haddr_hi = val;
4590
4591	val = (u64) ring_map & 0xffffffff;
4592	cnic_ctx_wr(dev, cid_addr, offset3, val);
4593	txbd->tx_bd_haddr_lo = val;
4594}
4595
4596static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4597{
4598	struct cnic_local *cp = dev->cnic_priv;
4599	struct cnic_eth_dev *ethdev = cp->ethdev;
4600	struct cnic_uio_dev *udev = cp->udev;
4601	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4602	int i;
4603	struct bnx2_rx_bd *rxbd;
4604	struct status_block *s_blk = cp->status_blk.gen;
4605	dma_addr_t ring_map = udev->l2_ring_map;
4606
4607	sb_id = cp->status_blk_num;
4608	cnic_init_context(dev, 2);
4609	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4610	coal_reg = BNX2_HC_COMMAND;
4611	coal_val = CNIC_RD(dev, coal_reg);
4612	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4613		struct status_block_msix *sblk = cp->status_blk.bnx2;
4614
4615		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4616		coal_reg = BNX2_HC_COALESCE_NOW;
4617		coal_val = 1 << (11 + sb_id);
4618	}
4619	i = 0;
4620	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4621		CNIC_WR(dev, coal_reg, coal_val);
4622		udelay(10);
4623		i++;
4624		barrier();
4625	}
4626	cp->rx_cons = *cp->rx_cons_ptr;
4627
4628	cid_addr = GET_CID_ADDR(2);
4629	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4630	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4631	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4632
4633	if (sb_id == 0)
4634		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4635	else
4636		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4637	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4638
4639	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4640	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4641		dma_addr_t buf_map;
4642		int n = (i % cp->l2_rx_ring_size) + 1;
4643
4644		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4645		rxbd->rx_bd_len = cp->l2_single_buf_size;
4646		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4647		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4648		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4649	}
4650	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4651	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4652	rxbd->rx_bd_haddr_hi = val;
4653
4654	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4655	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4656	rxbd->rx_bd_haddr_lo = val;
4657
4658	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4659	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4660}
4661
4662static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4663{
4664	struct kwqe *wqes[1], l2kwqe;
4665
4666	memset(&l2kwqe, 0, sizeof(l2kwqe));
4667	wqes[0] = &l2kwqe;
4668	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4669			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4670			       KWQE_OPCODE_SHIFT) | 2;
4671	dev->submit_kwqes(dev, wqes, 1);
4672}
4673
4674static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4675{
4676	struct cnic_local *cp = dev->cnic_priv;
4677	u32 val;
4678
4679	val = cp->func << 2;
4680
4681	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4682
4683	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4684			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4685	dev->mac_addr[0] = (u8) (val >> 8);
4686	dev->mac_addr[1] = (u8) val;
4687
4688	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4689
4690	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4691			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4692	dev->mac_addr[2] = (u8) (val >> 24);
4693	dev->mac_addr[3] = (u8) (val >> 16);
4694	dev->mac_addr[4] = (u8) (val >> 8);
4695	dev->mac_addr[5] = (u8) val;
4696
4697	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4698
4699	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4700	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4701		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4702
4703	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4704	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4705	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4706}
4707
4708static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4709{
4710	struct cnic_local *cp = dev->cnic_priv;
4711	struct cnic_eth_dev *ethdev = cp->ethdev;
4712	struct status_block *sblk = cp->status_blk.gen;
4713	u32 val, kcq_cid_addr, kwq_cid_addr;
4714	int err;
4715
4716	cnic_set_bnx2_mac(dev);
4717
4718	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4719	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4720	if (CNIC_PAGE_BITS > 12)
4721		val |= (12 - 8)  << 4;
4722	else
4723		val |= (CNIC_PAGE_BITS - 8)  << 4;
4724
4725	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4726
4727	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4728	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4729	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4730
4731	err = cnic_setup_5709_context(dev, 1);
4732	if (err)
4733		return err;
4734
4735	cnic_init_context(dev, KWQ_CID);
4736	cnic_init_context(dev, KCQ_CID);
4737
4738	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4739	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4740
4741	cp->max_kwq_idx = MAX_KWQ_IDX;
4742	cp->kwq_prod_idx = 0;
4743	cp->kwq_con_idx = 0;
4744	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4745
4746	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4747		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4748	else
4749		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4750
4751	/* Initialize the kernel work queue context. */
4752	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4753	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4754	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4755
4756	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4757	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4758
4759	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4760	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4761
4762	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4763	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4764
4765	val = (u32) cp->kwq_info.pgtbl_map;
4766	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4767
4768	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4769	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4770
4771	cp->kcq1.sw_prod_idx = 0;
4772	cp->kcq1.hw_prod_idx_ptr =
4773		&sblk->status_completion_producer_index;
4774
4775	cp->kcq1.status_idx_ptr = &sblk->status_idx;
4776
4777	/* Initialize the kernel complete queue context. */
4778	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4779	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4780	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4781
4782	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4783	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4784
4785	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4786	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4787
4788	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4789	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4790
4791	val = (u32) cp->kcq1.dma.pgtbl_map;
4792	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4793
4794	cp->int_num = 0;
4795	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4796		struct status_block_msix *msblk = cp->status_blk.bnx2;
4797		u32 sb_id = cp->status_blk_num;
4798		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4799
4800		cp->kcq1.hw_prod_idx_ptr =
4801			&msblk->status_completion_producer_index;
4802		cp->kcq1.status_idx_ptr = &msblk->status_idx;
4803		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4804		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4805		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4806		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4807	}
4808
4809	/* Enable Commnad Scheduler notification when we write to the
4810	 * host producer index of the kernel contexts. */
4811	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4812
4813	/* Enable Command Scheduler notification when we write to either
4814	 * the Send Queue or Receive Queue producer indexes of the kernel
4815	 * bypass contexts. */
4816	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4817	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4818
4819	/* Notify COM when the driver post an application buffer. */
4820	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4821
4822	/* Set the CP and COM doorbells.  These two processors polls the
4823	 * doorbell for a non zero value before running.  This must be done
4824	 * after setting up the kernel queue contexts. */
4825	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4826	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4827
4828	cnic_init_bnx2_tx_ring(dev);
4829	cnic_init_bnx2_rx_ring(dev);
4830
4831	err = cnic_init_bnx2_irq(dev);
4832	if (err) {
4833		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4834		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4835		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4836		return err;
4837	}
4838
4839	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4840
4841	return 0;
4842}
4843
4844static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4845{
4846	struct cnic_local *cp = dev->cnic_priv;
4847	struct cnic_eth_dev *ethdev = cp->ethdev;
4848	u32 start_offset = ethdev->ctx_tbl_offset;
4849	int i;
4850
4851	for (i = 0; i < cp->ctx_blks; i++) {
4852		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4853		dma_addr_t map = ctx->mapping;
4854
4855		if (cp->ctx_align) {
4856			unsigned long mask = cp->ctx_align - 1;
4857
4858			map = (map + mask) & ~mask;
4859		}
4860
4861		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4862	}
4863}
4864
4865static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4866{
4867	struct cnic_local *cp = dev->cnic_priv;
4868	struct cnic_eth_dev *ethdev = cp->ethdev;
4869	int err = 0;
4870
4871	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4872		     (unsigned long) dev);
4873	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4874		err = cnic_request_irq(dev);
4875
4876	return err;
4877}
4878
4879static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4880						u16 sb_id, u8 sb_index,
4881						u8 disable)
4882{
4883	struct bnx2x *bp = netdev_priv(dev->netdev);
4884
4885	u32 addr = BAR_CSTRORM_INTMEM +
4886			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4887			offsetof(struct hc_status_block_data_e1x, index_data) +
4888			sizeof(struct hc_index_data)*sb_index +
4889			offsetof(struct hc_index_data, flags);
4890	u16 flags = CNIC_RD16(dev, addr);
4891	/* clear and set */
4892	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4893	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4894		  HC_INDEX_DATA_HC_ENABLED);
4895	CNIC_WR16(dev, addr, flags);
4896}
4897
4898static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4899{
4900	struct cnic_local *cp = dev->cnic_priv;
4901	struct bnx2x *bp = netdev_priv(dev->netdev);
4902	u8 sb_id = cp->status_blk_num;
4903
4904	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4905			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4906			offsetof(struct hc_status_block_data_e1x, index_data) +
4907			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4908			offsetof(struct hc_index_data, timeout), 64 / 4);
4909	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4910}
4911
4912static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4913{
4914}
4915
4916static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4917				    struct client_init_ramrod_data *data)
4918{
4919	struct cnic_local *cp = dev->cnic_priv;
4920	struct bnx2x *bp = netdev_priv(dev->netdev);
4921	struct cnic_uio_dev *udev = cp->udev;
4922	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4923	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4924	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4925	int i;
4926	u32 cli = cp->ethdev->iscsi_l2_client_id;
4927	u32 val;
4928
4929	memset(txbd, 0, CNIC_PAGE_SIZE);
4930
4931	buf_map = udev->l2_buf_map;
4932	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4933		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4934		struct eth_tx_parse_bd_e1x *pbd_e1x =
4935			&((txbd + 1)->parse_bd_e1x);
4936		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4937		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4938
4939		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4940		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4941		reg_bd->addr_hi = start_bd->addr_hi;
4942		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4943		start_bd->nbytes = cpu_to_le16(0x10);
4944		start_bd->nbd = cpu_to_le16(3);
4945		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4946		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4947		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4948
4949		if (BNX2X_CHIP_IS_E2_PLUS(bp))
4950			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4951				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4952		else
4953			pbd_e1x->global_data = (UNICAST_ADDRESS <<
4954				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4955	}
4956
4957	val = (u64) ring_map >> 32;
4958	txbd->next_bd.addr_hi = cpu_to_le32(val);
4959
4960	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4961
4962	val = (u64) ring_map & 0xffffffff;
4963	txbd->next_bd.addr_lo = cpu_to_le32(val);
4964
4965	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4966
4967	/* Other ramrod params */
4968	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4969	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4970
4971	/* reset xstorm per client statistics */
4972	if (cli < MAX_STAT_COUNTER_ID) {
4973		data->general.statistics_zero_flg = 1;
4974		data->general.statistics_en_flg = 1;
4975		data->general.statistics_counter_id = cli;
4976	}
4977
4978	cp->tx_cons_ptr =
4979		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4980}
4981
4982static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4983				    struct client_init_ramrod_data *data)
4984{
4985	struct cnic_local *cp = dev->cnic_priv;
4986	struct bnx2x *bp = netdev_priv(dev->netdev);
4987	struct cnic_uio_dev *udev = cp->udev;
4988	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4989				CNIC_PAGE_SIZE);
4990	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4991				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4992	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4993	int i;
4994	u32 cli = cp->ethdev->iscsi_l2_client_id;
4995	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4996	u32 val;
4997	dma_addr_t ring_map = udev->l2_ring_map;
4998
4999	/* General data */
5000	data->general.client_id = cli;
5001	data->general.activate_flg = 1;
5002	data->general.sp_client_id = cli;
5003	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
5004	data->general.func_id = bp->pfid;
5005
5006	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5007		dma_addr_t buf_map;
5008		int n = (i % cp->l2_rx_ring_size) + 1;
5009
5010		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5011		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5012		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5013	}
5014
5015	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5016	rxbd->addr_hi = cpu_to_le32(val);
5017	data->rx.bd_page_base.hi = cpu_to_le32(val);
5018
5019	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5020	rxbd->addr_lo = cpu_to_le32(val);
5021	data->rx.bd_page_base.lo = cpu_to_le32(val);
5022
5023	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5024	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5025	rxcqe->addr_hi = cpu_to_le32(val);
5026	data->rx.cqe_page_base.hi = cpu_to_le32(val);
5027
5028	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5029	rxcqe->addr_lo = cpu_to_le32(val);
5030	data->rx.cqe_page_base.lo = cpu_to_le32(val);
5031
5032	/* Other ramrod params */
5033	data->rx.client_qzone_id = cl_qzone_id;
5034	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5035	data->rx.status_block_id = BNX2X_DEF_SB_ID;
5036
5037	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5038
5039	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5040	data->rx.outer_vlan_removal_enable_flg = 1;
5041	data->rx.silent_vlan_removal_flg = 1;
5042	data->rx.silent_vlan_value = 0;
5043	data->rx.silent_vlan_mask = 0xffff;
5044
5045	cp->rx_cons_ptr =
5046		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5047	cp->rx_cons = *cp->rx_cons_ptr;
5048}
5049
5050static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5051{
5052	struct cnic_local *cp = dev->cnic_priv;
5053	struct bnx2x *bp = netdev_priv(dev->netdev);
5054	u32 pfid = bp->pfid;
5055
5056	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5057			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5058	cp->kcq1.sw_prod_idx = 0;
5059
5060	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5061		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5062
5063		cp->kcq1.hw_prod_idx_ptr =
5064			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5065		cp->kcq1.status_idx_ptr =
5066			&sb->sb.running_index[SM_RX_ID];
5067	} else {
5068		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5069
5070		cp->kcq1.hw_prod_idx_ptr =
5071			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5072		cp->kcq1.status_idx_ptr =
5073			&sb->sb.running_index[SM_RX_ID];
5074	}
5075
5076	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5077		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5078
5079		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5080					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5081		cp->kcq2.sw_prod_idx = 0;
5082		cp->kcq2.hw_prod_idx_ptr =
5083			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5084		cp->kcq2.status_idx_ptr =
5085			&sb->sb.running_index[SM_RX_ID];
5086	}
5087}
5088
5089static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5090{
5091	struct cnic_local *cp = dev->cnic_priv;
5092	struct bnx2x *bp = netdev_priv(dev->netdev);
5093	struct cnic_eth_dev *ethdev = cp->ethdev;
5094	int ret;
5095	u32 pfid;
5096
5097	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5098	cp->func = bp->pf_num;
5099
5100	pfid = bp->pfid;
5101
5102	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5103			       cp->iscsi_start_cid, 0);
5104
5105	if (ret)
5106		return -ENOMEM;
5107
5108	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5109		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5110					cp->fcoe_start_cid, 0);
5111
5112		if (ret)
5113			return -ENOMEM;
5114	}
5115
5116	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5117
5118	cnic_init_bnx2x_kcq(dev);
5119
5120	/* Only 1 EQ */
5121	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5122	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5124	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5125		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5126		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5127	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5129		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5130	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5131		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5132		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5133	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5134		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5135		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5136	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5137		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5138	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5139		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5140	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5141		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5142		HC_INDEX_ISCSI_EQ_CONS);
5143
5144	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5145		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5146		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5147	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5148		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5149		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5150
5151	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5152		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5153
5154	cnic_setup_bnx2x_context(dev);
5155
5156	ret = cnic_init_bnx2x_irq(dev);
5157	if (ret)
5158		return ret;
5159
5160	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5161	return 0;
5162}
5163
5164static void cnic_init_rings(struct cnic_dev *dev)
5165{
5166	struct cnic_local *cp = dev->cnic_priv;
5167	struct bnx2x *bp = netdev_priv(dev->netdev);
5168	struct cnic_uio_dev *udev = cp->udev;
5169
5170	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5171		return;
5172
5173	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5174		cnic_init_bnx2_tx_ring(dev);
5175		cnic_init_bnx2_rx_ring(dev);
5176		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5177	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5178		u32 cli = cp->ethdev->iscsi_l2_client_id;
5179		u32 cid = cp->ethdev->iscsi_l2_cid;
5180		u32 cl_qzone_id;
5181		struct client_init_ramrod_data *data;
5182		union l5cm_specific_data l5_data;
5183		struct ustorm_eth_rx_producers rx_prods = {0};
5184		u32 off, i, *cid_ptr;
5185
5186		rx_prods.bd_prod = 0;
5187		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5188		barrier();
5189
5190		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5191
5192		off = BAR_USTRORM_INTMEM +
5193			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
5194			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5195			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5196
5197		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5198			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5199
5200		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5201
5202		data = udev->l2_buf;
5203		cid_ptr = udev->l2_buf + 12;
5204
5205		memset(data, 0, sizeof(*data));
5206
5207		cnic_init_bnx2x_tx_ring(dev, data);
5208		cnic_init_bnx2x_rx_ring(dev, data);
5209
5210		data->general.fp_hsi_ver =  ETH_FP_HSI_VERSION;
5211
5212		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5213		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5214
5215		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5216
5217		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5218			cid, ETH_CONNECTION_TYPE, &l5_data);
5219
5220		i = 0;
5221		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5222		       ++i < 10)
5223			msleep(1);
5224
5225		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5226			netdev_err(dev->netdev,
5227				"iSCSI CLIENT_SETUP did not complete\n");
5228		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5229		cnic_ring_ctl(dev, cid, cli, 1);
5230		*cid_ptr = cid >> 4;
5231		*(cid_ptr + 1) = cid * bp->db_size;
5232		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5233	}
5234}
5235
5236static void cnic_shutdown_rings(struct cnic_dev *dev)
5237{
5238	struct cnic_local *cp = dev->cnic_priv;
5239	struct cnic_uio_dev *udev = cp->udev;
5240	void *rx_ring;
5241
5242	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5243		return;
5244
5245	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5246		cnic_shutdown_bnx2_rx_ring(dev);
5247	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5248		u32 cli = cp->ethdev->iscsi_l2_client_id;
5249		u32 cid = cp->ethdev->iscsi_l2_cid;
5250		union l5cm_specific_data l5_data;
5251		int i;
5252
5253		cnic_ring_ctl(dev, cid, cli, 0);
5254
5255		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5256
5257		l5_data.phy_address.lo = cli;
5258		l5_data.phy_address.hi = 0;
5259		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5260			cid, ETH_CONNECTION_TYPE, &l5_data);
5261		i = 0;
5262		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5263		       ++i < 10)
5264			msleep(1);
5265
5266		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5267			netdev_err(dev->netdev,
5268				"iSCSI CLIENT_HALT did not complete\n");
5269		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5270
5271		memset(&l5_data, 0, sizeof(l5_data));
5272		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5273			cid, NONE_CONNECTION_TYPE, &l5_data);
5274		msleep(10);
5275	}
5276	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5277	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5278	memset(rx_ring, 0, CNIC_PAGE_SIZE);
5279}
5280
5281static int cnic_register_netdev(struct cnic_dev *dev)
5282{
5283	struct cnic_local *cp = dev->cnic_priv;
5284	struct cnic_eth_dev *ethdev = cp->ethdev;
5285	int err;
5286
5287	if (!ethdev)
5288		return -ENODEV;
5289
5290	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5291		return 0;
5292
5293	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5294	if (err)
5295		netdev_err(dev->netdev, "register_cnic failed\n");
5296
5297	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
5298	 * can change after firmware is downloaded.
5299	 */
5300	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5301	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5302		dev->max_iscsi_conn = 0;
5303
5304	return err;
5305}
5306
5307static void cnic_unregister_netdev(struct cnic_dev *dev)
5308{
5309	struct cnic_local *cp = dev->cnic_priv;
5310	struct cnic_eth_dev *ethdev = cp->ethdev;
5311
5312	if (!ethdev)
5313		return;
5314
5315	ethdev->drv_unregister_cnic(dev->netdev);
5316}
5317
5318static int cnic_start_hw(struct cnic_dev *dev)
5319{
5320	struct cnic_local *cp = dev->cnic_priv;
5321	struct cnic_eth_dev *ethdev = cp->ethdev;
5322	int err;
5323
5324	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5325		return -EALREADY;
5326
5327	dev->regview = ethdev->io_base;
5328	pci_dev_get(dev->pcidev);
5329	cp->func = PCI_FUNC(dev->pcidev->devfn);
5330	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5331	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5332
5333	err = cp->alloc_resc(dev);
5334	if (err) {
5335		netdev_err(dev->netdev, "allocate resource failure\n");
5336		goto err1;
5337	}
5338
5339	err = cp->start_hw(dev);
5340	if (err)
5341		goto err1;
5342
5343	err = cnic_cm_open(dev);
5344	if (err)
5345		goto err1;
5346
5347	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5348
5349	cp->enable_int(dev);
5350
5351	return 0;
5352
5353err1:
5354	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5355		cp->stop_hw(dev);
5356	else
5357		cp->free_resc(dev);
5358	pci_dev_put(dev->pcidev);
5359	return err;
5360}
5361
5362static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5363{
5364	cnic_disable_bnx2_int_sync(dev);
5365
5366	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5367	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5368
5369	cnic_init_context(dev, KWQ_CID);
5370	cnic_init_context(dev, KCQ_CID);
5371
5372	cnic_setup_5709_context(dev, 0);
5373	cnic_free_irq(dev);
5374
5375	cnic_free_resc(dev);
5376}
5377
5378
5379static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5380{
5381	struct cnic_local *cp = dev->cnic_priv;
5382	struct bnx2x *bp = netdev_priv(dev->netdev);
5383	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5384	u32 sb_id = cp->status_blk_num;
5385	u32 idx_off, syn_off;
5386
5387	cnic_free_irq(dev);
5388
5389	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5390		idx_off = offsetof(struct hc_status_block_e2, index_values) +
5391			  (hc_index * sizeof(u16));
5392
5393		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5394	} else {
5395		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5396			  (hc_index * sizeof(u16));
5397
5398		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5399	}
5400	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5401	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5402		  idx_off, 0);
5403
5404	*cp->kcq1.hw_prod_idx_ptr = 0;
5405	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5406		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5407	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5408	cnic_free_resc(dev);
5409}
5410
5411static void cnic_stop_hw(struct cnic_dev *dev)
5412{
5413	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5414		struct cnic_local *cp = dev->cnic_priv;
5415		int i = 0;
5416
5417		/* Need to wait for the ring shutdown event to complete
5418		 * before clearing the CNIC_UP flag.
5419		 */
5420		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5421			msleep(100);
5422			i++;
5423		}
5424		cnic_shutdown_rings(dev);
5425		cp->stop_cm(dev);
5426		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5427		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5428		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5429		synchronize_rcu();
5430		cnic_cm_shutdown(dev);
5431		cp->stop_hw(dev);
5432		pci_dev_put(dev->pcidev);
5433	}
5434}
5435
5436static void cnic_free_dev(struct cnic_dev *dev)
5437{
5438	int i = 0;
5439
5440	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5441		msleep(100);
5442		i++;
5443	}
5444	if (atomic_read(&dev->ref_count) != 0)
5445		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5446
5447	netdev_info(dev->netdev, "Removed CNIC device\n");
5448	dev_put(dev->netdev);
5449	kfree(dev);
5450}
5451
5452static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5453				struct cnic_fc_npiv_tbl *npiv_tbl)
5454{
5455	struct cnic_local *cp = dev->cnic_priv;
5456	struct bnx2x *bp = netdev_priv(dev->netdev);
5457	int ret;
5458
5459	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5460		return -EAGAIN;     /* bnx2x is down */
5461
5462	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5463		return -EINVAL;
5464
5465	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5466	return ret;
5467}
5468
5469static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5470				       struct pci_dev *pdev)
5471{
5472	struct cnic_dev *cdev;
5473	struct cnic_local *cp;
5474	int alloc_size;
5475
5476	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5477
5478	cdev = kzalloc(alloc_size, GFP_KERNEL);
5479	if (cdev == NULL)
5480		return NULL;
5481
5482	cdev->netdev = dev;
5483	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5484	cdev->register_device = cnic_register_device;
5485	cdev->unregister_device = cnic_unregister_device;
5486	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5487	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5488	atomic_set(&cdev->ref_count, 0);
5489
5490	cp = cdev->cnic_priv;
5491	cp->dev = cdev;
5492	cp->l2_single_buf_size = 0x400;
5493	cp->l2_rx_ring_size = 3;
5494
5495	spin_lock_init(&cp->cnic_ulp_lock);
5496
5497	netdev_info(dev, "Added CNIC device\n");
5498
5499	return cdev;
5500}
5501
5502static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5503{
5504	struct pci_dev *pdev;
5505	struct cnic_dev *cdev;
5506	struct cnic_local *cp;
5507	struct bnx2 *bp = netdev_priv(dev);
5508	struct cnic_eth_dev *ethdev = NULL;
5509
5510	if (bp->cnic_probe)
5511		ethdev = (bp->cnic_probe)(dev);
5512
5513	if (!ethdev)
5514		return NULL;
5515
5516	pdev = ethdev->pdev;
5517	if (!pdev)
5518		return NULL;
5519
5520	dev_hold(dev);
5521	pci_dev_get(pdev);
5522	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5523	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5524	    (pdev->revision < 0x10)) {
5525		pci_dev_put(pdev);
5526		goto cnic_err;
5527	}
5528	pci_dev_put(pdev);
5529
5530	cdev = cnic_alloc_dev(dev, pdev);
5531	if (cdev == NULL)
5532		goto cnic_err;
5533
5534	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5535	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5536
5537	cp = cdev->cnic_priv;
5538	cp->ethdev = ethdev;
5539	cdev->pcidev = pdev;
5540	cp->chip_id = ethdev->chip_id;
5541
5542	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5543
5544	cp->cnic_ops = &cnic_bnx2_ops;
5545	cp->start_hw = cnic_start_bnx2_hw;
5546	cp->stop_hw = cnic_stop_bnx2_hw;
5547	cp->setup_pgtbl = cnic_setup_page_tbl;
5548	cp->alloc_resc = cnic_alloc_bnx2_resc;
5549	cp->free_resc = cnic_free_resc;
5550	cp->start_cm = cnic_cm_init_bnx2_hw;
5551	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5552	cp->enable_int = cnic_enable_bnx2_int;
5553	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5554	cp->close_conn = cnic_close_bnx2_conn;
5555	return cdev;
5556
5557cnic_err:
5558	dev_put(dev);
5559	return NULL;
5560}
5561
5562static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5563{
5564	struct pci_dev *pdev;
5565	struct cnic_dev *cdev;
5566	struct cnic_local *cp;
5567	struct bnx2x *bp = netdev_priv(dev);
5568	struct cnic_eth_dev *ethdev = NULL;
5569
5570	if (bp->cnic_probe)
5571		ethdev = bp->cnic_probe(dev);
5572
5573	if (!ethdev)
5574		return NULL;
5575
5576	pdev = ethdev->pdev;
5577	if (!pdev)
5578		return NULL;
5579
5580	dev_hold(dev);
5581	cdev = cnic_alloc_dev(dev, pdev);
5582	if (cdev == NULL) {
5583		dev_put(dev);
5584		return NULL;
5585	}
5586
5587	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5588	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5589
5590	cp = cdev->cnic_priv;
5591	cp->ethdev = ethdev;
5592	cdev->pcidev = pdev;
5593	cp->chip_id = ethdev->chip_id;
5594
5595	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5596
5597	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5598		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5599	if (CNIC_SUPPORTS_FCOE(bp)) {
5600		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5601		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5602	}
5603
5604	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5605		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5606
5607	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5608
5609	cp->cnic_ops = &cnic_bnx2x_ops;
5610	cp->start_hw = cnic_start_bnx2x_hw;
5611	cp->stop_hw = cnic_stop_bnx2x_hw;
5612	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5613	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5614	cp->free_resc = cnic_free_resc;
5615	cp->start_cm = cnic_cm_init_bnx2x_hw;
5616	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5617	cp->enable_int = cnic_enable_bnx2x_int;
5618	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5619	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5620		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5621		cp->arm_int = cnic_arm_bnx2x_e2_msix;
5622	} else {
5623		cp->ack_int = cnic_ack_bnx2x_msix;
5624		cp->arm_int = cnic_arm_bnx2x_msix;
5625	}
5626	cp->close_conn = cnic_close_bnx2x_conn;
5627	return cdev;
5628}
5629
5630static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5631{
5632	struct ethtool_drvinfo drvinfo;
5633	struct cnic_dev *cdev = NULL;
5634
5635	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5636		memset(&drvinfo, 0, sizeof(drvinfo));
5637		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5638
5639		if (!strcmp(drvinfo.driver, "bnx2"))
5640			cdev = init_bnx2_cnic(dev);
5641		if (!strcmp(drvinfo.driver, "bnx2x"))
5642			cdev = init_bnx2x_cnic(dev);
5643		if (cdev) {
5644			write_lock(&cnic_dev_lock);
5645			list_add(&cdev->list, &cnic_dev_list);
5646			write_unlock(&cnic_dev_lock);
5647		}
5648	}
5649	return cdev;
5650}
5651
5652static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5653			      u16 vlan_id)
5654{
5655	int if_type;
5656
5657	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5658		struct cnic_ulp_ops *ulp_ops;
5659		void *ctx;
5660
5661		mutex_lock(&cnic_lock);
5662		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5663						lockdep_is_held(&cnic_lock));
5664		if (!ulp_ops || !ulp_ops->indicate_netevent) {
5665			mutex_unlock(&cnic_lock);
5666			continue;
5667		}
5668
5669		ctx = cp->ulp_handle[if_type];
5670
5671		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5672		mutex_unlock(&cnic_lock);
5673
5674		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5675
5676		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5677	}
5678}
5679
5680/* netdev event handler */
5681static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5682							 void *ptr)
5683{
5684	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5685	struct cnic_dev *dev;
5686	int new_dev = 0;
5687
5688	dev = cnic_from_netdev(netdev);
5689
5690	if (!dev && event == NETDEV_REGISTER) {
5691		/* Check for the hot-plug device */
5692		dev = is_cnic_dev(netdev);
5693		if (dev) {
5694			new_dev = 1;
5695			cnic_hold(dev);
5696		}
5697	}
5698	if (dev) {
5699		struct cnic_local *cp = dev->cnic_priv;
5700
5701		if (new_dev)
5702			cnic_ulp_init(dev);
5703		else if (event == NETDEV_UNREGISTER)
5704			cnic_ulp_exit(dev);
5705
5706		if (event == NETDEV_UP) {
5707			if (cnic_register_netdev(dev) != 0) {
5708				cnic_put(dev);
5709				goto done;
5710			}
5711			if (!cnic_start_hw(dev))
5712				cnic_ulp_start(dev);
5713		}
5714
5715		cnic_rcv_netevent(cp, event, 0);
5716
5717		if (event == NETDEV_GOING_DOWN) {
5718			cnic_ulp_stop(dev);
5719			cnic_stop_hw(dev);
5720			cnic_unregister_netdev(dev);
5721		} else if (event == NETDEV_UNREGISTER) {
5722			write_lock(&cnic_dev_lock);
5723			list_del_init(&dev->list);
5724			write_unlock(&cnic_dev_lock);
5725
5726			cnic_put(dev);
5727			cnic_free_dev(dev);
5728			goto done;
5729		}
5730		cnic_put(dev);
5731	} else {
5732		struct net_device *realdev;
5733		u16 vid;
5734
5735		vid = cnic_get_vlan(netdev, &realdev);
5736		if (realdev) {
5737			dev = cnic_from_netdev(realdev);
5738			if (dev) {
5739				vid |= VLAN_CFI_MASK;	/* make non-zero */
5740				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5741				cnic_put(dev);
5742			}
5743		}
5744	}
5745done:
5746	return NOTIFY_DONE;
5747}
5748
5749static struct notifier_block cnic_netdev_notifier = {
5750	.notifier_call = cnic_netdev_event
5751};
5752
5753static void cnic_release(void)
5754{
5755	struct cnic_uio_dev *udev;
5756
5757	while (!list_empty(&cnic_udev_list)) {
5758		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5759				  list);
5760		cnic_free_uio(udev);
5761	}
5762}
5763
5764static int __init cnic_init(void)
5765{
5766	int rc = 0;
5767
5768	pr_info("%s", version);
5769
5770	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5771	if (rc) {
5772		cnic_release();
5773		return rc;
5774	}
5775
5776	cnic_wq = create_singlethread_workqueue("cnic_wq");
5777	if (!cnic_wq) {
5778		cnic_release();
5779		unregister_netdevice_notifier(&cnic_netdev_notifier);
5780		return -ENOMEM;
5781	}
5782
5783	return 0;
5784}
5785
5786static void __exit cnic_exit(void)
5787{
5788	unregister_netdevice_notifier(&cnic_netdev_notifier);
5789	cnic_release();
5790	destroy_workqueue(cnic_wq);
5791}
5792
5793module_init(cnic_init);
5794module_exit(cnic_exit);