Loading...
1/* cnic.c: QLogic CNIC core network driver.
2 *
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/uio_driver.h>
27#include <linux/in.h>
28#include <linux/dma-mapping.h>
29#include <linux/delay.h>
30#include <linux/ethtool.h>
31#include <linux/if_vlan.h>
32#include <linux/prefetch.h>
33#include <linux/random.h>
34#if IS_ENABLED(CONFIG_VLAN_8021Q)
35#define BCM_VLAN 1
36#endif
37#include <net/ip.h>
38#include <net/tcp.h>
39#include <net/route.h>
40#include <net/ipv6.h>
41#include <net/ip6_route.h>
42#include <net/ip6_checksum.h>
43#include <scsi/iscsi_if.h>
44
45#define BCM_CNIC 1
46#include "cnic_if.h"
47#include "bnx2.h"
48#include "bnx2x/bnx2x.h"
49#include "bnx2x/bnx2x_reg.h"
50#include "bnx2x/bnx2x_fw_defs.h"
51#include "bnx2x/bnx2x_hsi.h"
52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
55#include "cnic.h"
56#include "cnic_defs.h"
57
58#define CNIC_MODULE_NAME "cnic"
59
60static char version[] =
61 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
62
63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
64 "Chen (zongxi@broadcom.com");
65MODULE_DESCRIPTION("QLogic cnic Driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(CNIC_MODULE_VERSION);
68
69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70static LIST_HEAD(cnic_dev_list);
71static LIST_HEAD(cnic_udev_list);
72static DEFINE_RWLOCK(cnic_dev_lock);
73static DEFINE_MUTEX(cnic_lock);
74
75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
76
77/* helper function, assuming cnic_lock is held */
78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
79{
80 return rcu_dereference_protected(cnic_ulp_tbl[type],
81 lockdep_is_held(&cnic_lock));
82}
83
84static int cnic_service_bnx2(void *, void *);
85static int cnic_service_bnx2x(void *, void *);
86static int cnic_ctl(void *, struct cnic_ctl_info *);
87
88static struct cnic_ops cnic_bnx2_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2,
91 .cnic_ctl = cnic_ctl,
92};
93
94static struct cnic_ops cnic_bnx2x_ops = {
95 .cnic_owner = THIS_MODULE,
96 .cnic_handler = cnic_service_bnx2x,
97 .cnic_ctl = cnic_ctl,
98};
99
100static struct workqueue_struct *cnic_wq;
101
102static void cnic_shutdown_rings(struct cnic_dev *);
103static void cnic_init_rings(struct cnic_dev *);
104static int cnic_cm_set_pg(struct cnic_sock *);
105
106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
107{
108 struct cnic_uio_dev *udev = uinfo->priv;
109 struct cnic_dev *dev;
110
111 if (!capable(CAP_NET_ADMIN))
112 return -EPERM;
113
114 if (udev->uio_dev != -1)
115 return -EBUSY;
116
117 rtnl_lock();
118 dev = udev->dev;
119
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
121 rtnl_unlock();
122 return -ENODEV;
123 }
124
125 udev->uio_dev = iminor(inode);
126
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
129 rtnl_unlock();
130
131 return 0;
132}
133
134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
135{
136 struct cnic_uio_dev *udev = uinfo->priv;
137
138 udev->uio_dev = -1;
139 return 0;
140}
141
142static inline void cnic_hold(struct cnic_dev *dev)
143{
144 atomic_inc(&dev->ref_count);
145}
146
147static inline void cnic_put(struct cnic_dev *dev)
148{
149 atomic_dec(&dev->ref_count);
150}
151
152static inline void csk_hold(struct cnic_sock *csk)
153{
154 atomic_inc(&csk->ref_count);
155}
156
157static inline void csk_put(struct cnic_sock *csk)
158{
159 atomic_dec(&csk->ref_count);
160}
161
162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
163{
164 struct cnic_dev *cdev;
165
166 read_lock(&cnic_dev_lock);
167 list_for_each_entry(cdev, &cnic_dev_list, list) {
168 if (netdev == cdev->netdev) {
169 cnic_hold(cdev);
170 read_unlock(&cnic_dev_lock);
171 return cdev;
172 }
173 }
174 read_unlock(&cnic_dev_lock);
175 return NULL;
176}
177
178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
179{
180 atomic_inc(&ulp_ops->ref_count);
181}
182
183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
184{
185 atomic_dec(&ulp_ops->ref_count);
186}
187
188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
194
195 memset(&info, 0, sizeof(struct drv_ctl_info));
196 info.cmd = DRV_CTL_CTX_WR_CMD;
197 io->cid_addr = cid_addr;
198 io->offset = off;
199 io->data = val;
200 ethdev->drv_ctl(dev->netdev, &info);
201}
202
203static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
204{
205 struct cnic_local *cp = dev->cnic_priv;
206 struct cnic_eth_dev *ethdev = cp->ethdev;
207 struct drv_ctl_info info;
208 struct drv_ctl_io *io = &info.data.io;
209
210 memset(&info, 0, sizeof(struct drv_ctl_info));
211 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
212 io->offset = off;
213 io->dma_addr = addr;
214 ethdev->drv_ctl(dev->netdev, &info);
215}
216
217static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
218{
219 struct cnic_local *cp = dev->cnic_priv;
220 struct cnic_eth_dev *ethdev = cp->ethdev;
221 struct drv_ctl_info info;
222 struct drv_ctl_l2_ring *ring = &info.data.ring;
223
224 memset(&info, 0, sizeof(struct drv_ctl_info));
225 if (start)
226 info.cmd = DRV_CTL_START_L2_CMD;
227 else
228 info.cmd = DRV_CTL_STOP_L2_CMD;
229
230 ring->cid = cid;
231 ring->client_id = cl_id;
232 ethdev->drv_ctl(dev->netdev, &info);
233}
234
235static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
236{
237 struct cnic_local *cp = dev->cnic_priv;
238 struct cnic_eth_dev *ethdev = cp->ethdev;
239 struct drv_ctl_info info;
240 struct drv_ctl_io *io = &info.data.io;
241
242 memset(&info, 0, sizeof(struct drv_ctl_info));
243 info.cmd = DRV_CTL_IO_WR_CMD;
244 io->offset = off;
245 io->data = val;
246 ethdev->drv_ctl(dev->netdev, &info);
247}
248
249static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
250{
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254 struct drv_ctl_io *io = &info.data.io;
255
256 memset(&info, 0, sizeof(struct drv_ctl_info));
257 info.cmd = DRV_CTL_IO_RD_CMD;
258 io->offset = off;
259 ethdev->drv_ctl(dev->netdev, &info);
260 return io->data;
261}
262
263static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
264{
265 struct cnic_local *cp = dev->cnic_priv;
266 struct cnic_eth_dev *ethdev = cp->ethdev;
267 struct drv_ctl_info info;
268 struct fcoe_capabilities *fcoe_cap =
269 &info.data.register_data.fcoe_features;
270
271 memset(&info, 0, sizeof(struct drv_ctl_info));
272 if (reg) {
273 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
276 } else {
277 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
278 }
279
280 info.data.ulp_type = ulp_type;
281 info.drv_state = state;
282 ethdev->drv_ctl(dev->netdev, &info);
283}
284
285static int cnic_in_use(struct cnic_sock *csk)
286{
287 return test_bit(SK_F_INUSE, &csk->flags);
288}
289
290static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
291{
292 struct cnic_local *cp = dev->cnic_priv;
293 struct cnic_eth_dev *ethdev = cp->ethdev;
294 struct drv_ctl_info info;
295
296 memset(&info, 0, sizeof(struct drv_ctl_info));
297 info.cmd = cmd;
298 info.data.credit.credit_count = count;
299 ethdev->drv_ctl(dev->netdev, &info);
300}
301
302static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
303{
304 u32 i;
305
306 if (!cp->ctx_tbl)
307 return -EINVAL;
308
309 for (i = 0; i < cp->max_cid_space; i++) {
310 if (cp->ctx_tbl[i].cid == cid) {
311 *l5_cid = i;
312 return 0;
313 }
314 }
315 return -EINVAL;
316}
317
318static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
319 struct cnic_sock *csk)
320{
321 struct iscsi_path path_req;
322 char *buf = NULL;
323 u16 len = 0;
324 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
325 struct cnic_ulp_ops *ulp_ops;
326 struct cnic_uio_dev *udev = cp->udev;
327 int rc = 0, retry = 0;
328
329 if (!udev || udev->uio_dev == -1)
330 return -ENODEV;
331
332 if (csk) {
333 len = sizeof(path_req);
334 buf = (char *) &path_req;
335 memset(&path_req, 0, len);
336
337 msg_type = ISCSI_KEVENT_PATH_REQ;
338 path_req.handle = (u64) csk->l5_cid;
339 if (test_bit(SK_F_IPV6, &csk->flags)) {
340 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
341 sizeof(struct in6_addr));
342 path_req.ip_addr_len = 16;
343 } else {
344 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
345 sizeof(struct in_addr));
346 path_req.ip_addr_len = 4;
347 }
348 path_req.vlan_id = csk->vlan_id;
349 path_req.pmtu = csk->mtu;
350 }
351
352 while (retry < 3) {
353 rc = 0;
354 rcu_read_lock();
355 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
356 if (ulp_ops)
357 rc = ulp_ops->iscsi_nl_send_msg(
358 cp->ulp_handle[CNIC_ULP_ISCSI],
359 msg_type, buf, len);
360 rcu_read_unlock();
361 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
362 break;
363
364 msleep(100);
365 retry++;
366 }
367 return rc;
368}
369
370static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
371
372static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
373 char *buf, u16 len)
374{
375 int rc = -EINVAL;
376
377 switch (msg_type) {
378 case ISCSI_UEVENT_PATH_UPDATE: {
379 struct cnic_local *cp;
380 u32 l5_cid;
381 struct cnic_sock *csk;
382 struct iscsi_path *path_resp;
383
384 if (len < sizeof(*path_resp))
385 break;
386
387 path_resp = (struct iscsi_path *) buf;
388 cp = dev->cnic_priv;
389 l5_cid = (u32) path_resp->handle;
390 if (l5_cid >= MAX_CM_SK_TBL_SZ)
391 break;
392
393 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
394 rc = -ENODEV;
395 break;
396 }
397 csk = &cp->csk_tbl[l5_cid];
398 csk_hold(csk);
399 if (cnic_in_use(csk) &&
400 test_bit(SK_F_CONNECT_START, &csk->flags)) {
401
402 csk->vlan_id = path_resp->vlan_id;
403
404 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
405 if (test_bit(SK_F_IPV6, &csk->flags))
406 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
407 sizeof(struct in6_addr));
408 else
409 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
410 sizeof(struct in_addr));
411
412 if (is_valid_ether_addr(csk->ha)) {
413 cnic_cm_set_pg(csk);
414 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
415 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
416
417 cnic_cm_upcall(cp, csk,
418 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
419 clear_bit(SK_F_CONNECT_START, &csk->flags);
420 }
421 }
422 csk_put(csk);
423 rc = 0;
424 }
425 }
426
427 return rc;
428}
429
430static int cnic_offld_prep(struct cnic_sock *csk)
431{
432 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
433 return 0;
434
435 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
436 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
437 return 0;
438 }
439
440 return 1;
441}
442
443static int cnic_close_prep(struct cnic_sock *csk)
444{
445 clear_bit(SK_F_CONNECT_START, &csk->flags);
446 smp_mb__after_atomic();
447
448 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
449 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
450 msleep(1);
451
452 return 1;
453 }
454 return 0;
455}
456
457static int cnic_abort_prep(struct cnic_sock *csk)
458{
459 clear_bit(SK_F_CONNECT_START, &csk->flags);
460 smp_mb__after_atomic();
461
462 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
463 msleep(1);
464
465 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
466 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
467 return 1;
468 }
469
470 return 0;
471}
472
473int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
474{
475 struct cnic_dev *dev;
476
477 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478 pr_err("%s: Bad type %d\n", __func__, ulp_type);
479 return -EINVAL;
480 }
481 mutex_lock(&cnic_lock);
482 if (cnic_ulp_tbl_prot(ulp_type)) {
483 pr_err("%s: Type %d has already been registered\n",
484 __func__, ulp_type);
485 mutex_unlock(&cnic_lock);
486 return -EBUSY;
487 }
488
489 read_lock(&cnic_dev_lock);
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
492
493 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
494 }
495 read_unlock(&cnic_dev_lock);
496
497 atomic_set(&ulp_ops->ref_count, 0);
498 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
499 mutex_unlock(&cnic_lock);
500
501 /* Prevent race conditions with netdev_event */
502 rtnl_lock();
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
505
506 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
507 ulp_ops->cnic_init(dev);
508 }
509 rtnl_unlock();
510
511 return 0;
512}
513
514int cnic_unregister_driver(int ulp_type)
515{
516 struct cnic_dev *dev;
517 struct cnic_ulp_ops *ulp_ops;
518 int i = 0;
519
520 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
521 pr_err("%s: Bad type %d\n", __func__, ulp_type);
522 return -EINVAL;
523 }
524 mutex_lock(&cnic_lock);
525 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
526 if (!ulp_ops) {
527 pr_err("%s: Type %d has not been registered\n",
528 __func__, ulp_type);
529 goto out_unlock;
530 }
531 read_lock(&cnic_dev_lock);
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
534
535 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
536 pr_err("%s: Type %d still has devices registered\n",
537 __func__, ulp_type);
538 read_unlock(&cnic_dev_lock);
539 goto out_unlock;
540 }
541 }
542 read_unlock(&cnic_dev_lock);
543
544 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
545
546 mutex_unlock(&cnic_lock);
547 synchronize_rcu();
548 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
549 msleep(100);
550 i++;
551 }
552
553 if (atomic_read(&ulp_ops->ref_count) != 0)
554 pr_warn("%s: Failed waiting for ref count to go to zero\n",
555 __func__);
556 return 0;
557
558out_unlock:
559 mutex_unlock(&cnic_lock);
560 return -EINVAL;
561}
562
563static int cnic_start_hw(struct cnic_dev *);
564static void cnic_stop_hw(struct cnic_dev *);
565
566static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
567 void *ulp_ctx)
568{
569 struct cnic_local *cp = dev->cnic_priv;
570 struct cnic_ulp_ops *ulp_ops;
571
572 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
573 pr_err("%s: Bad type %d\n", __func__, ulp_type);
574 return -EINVAL;
575 }
576 mutex_lock(&cnic_lock);
577 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
578 pr_err("%s: Driver with type %d has not been registered\n",
579 __func__, ulp_type);
580 mutex_unlock(&cnic_lock);
581 return -EAGAIN;
582 }
583 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
584 pr_err("%s: Type %d has already been registered to this device\n",
585 __func__, ulp_type);
586 mutex_unlock(&cnic_lock);
587 return -EBUSY;
588 }
589
590 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
591 cp->ulp_handle[ulp_type] = ulp_ctx;
592 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
594 cnic_hold(dev);
595
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
597 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
598 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
599
600 mutex_unlock(&cnic_lock);
601
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
603
604 return 0;
605
606}
607EXPORT_SYMBOL(cnic_register_driver);
608
609static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
610{
611 struct cnic_local *cp = dev->cnic_priv;
612 int i = 0;
613
614 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
615 pr_err("%s: Bad type %d\n", __func__, ulp_type);
616 return -EINVAL;
617 }
618
619 if (ulp_type == CNIC_ULP_ISCSI)
620 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
621
622 mutex_lock(&cnic_lock);
623 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
624 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
625 cnic_put(dev);
626 } else {
627 pr_err("%s: device not registered to this ulp type %d\n",
628 __func__, ulp_type);
629 mutex_unlock(&cnic_lock);
630 return -EINVAL;
631 }
632 mutex_unlock(&cnic_lock);
633
634 if (ulp_type == CNIC_ULP_FCOE)
635 dev->fcoe_cap = NULL;
636
637 synchronize_rcu();
638
639 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
640 i < 20) {
641 msleep(100);
642 i++;
643 }
644 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
646
647 if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
649 else
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
651
652 return 0;
653}
654EXPORT_SYMBOL(cnic_unregister_driver);
655
656static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
657 u32 next)
658{
659 id_tbl->start = start_id;
660 id_tbl->max = size;
661 id_tbl->next = next;
662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
664 if (!id_tbl->table)
665 return -ENOMEM;
666
667 return 0;
668}
669
670static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
671{
672 bitmap_free(id_tbl->table);
673 id_tbl->table = NULL;
674}
675
676static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
677{
678 int ret = -1;
679
680 id -= id_tbl->start;
681 if (id >= id_tbl->max)
682 return ret;
683
684 spin_lock(&id_tbl->lock);
685 if (!test_bit(id, id_tbl->table)) {
686 set_bit(id, id_tbl->table);
687 ret = 0;
688 }
689 spin_unlock(&id_tbl->lock);
690 return ret;
691}
692
693/* Returns -1 if not successful */
694static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
695{
696 u32 id;
697
698 spin_lock(&id_tbl->lock);
699 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
700 if (id >= id_tbl->max) {
701 id = -1;
702 if (id_tbl->next != 0) {
703 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
704 if (id >= id_tbl->next)
705 id = -1;
706 }
707 }
708
709 if (id < id_tbl->max) {
710 set_bit(id, id_tbl->table);
711 id_tbl->next = (id + 1) & (id_tbl->max - 1);
712 id += id_tbl->start;
713 }
714
715 spin_unlock(&id_tbl->lock);
716
717 return id;
718}
719
720static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
721{
722 if (id == -1)
723 return;
724
725 id -= id_tbl->start;
726 if (id >= id_tbl->max)
727 return;
728
729 clear_bit(id, id_tbl->table);
730}
731
732static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
733{
734 int i;
735
736 if (!dma->pg_arr)
737 return;
738
739 for (i = 0; i < dma->num_pages; i++) {
740 if (dma->pg_arr[i]) {
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
742 dma->pg_arr[i], dma->pg_map_arr[i]);
743 dma->pg_arr[i] = NULL;
744 }
745 }
746 if (dma->pgtbl) {
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
748 dma->pgtbl, dma->pgtbl_map);
749 dma->pgtbl = NULL;
750 }
751 kfree(dma->pg_arr);
752 dma->pg_arr = NULL;
753 dma->num_pages = 0;
754}
755
756static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
757{
758 int i;
759 __le32 *page_table = (__le32 *) dma->pgtbl;
760
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in big endian format. */
763 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
764 page_table++;
765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
766 page_table++;
767 }
768}
769
770static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
771{
772 int i;
773 __le32 *page_table = (__le32 *) dma->pgtbl;
774
775 for (i = 0; i < dma->num_pages; i++) {
776 /* Each entry needs to be in little endian format. */
777 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
778 page_table++;
779 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
780 page_table++;
781 }
782}
783
784static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785 int pages, int use_pg_tbl)
786{
787 int i, size;
788 struct cnic_local *cp = dev->cnic_priv;
789
790 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
791 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
792 if (dma->pg_arr == NULL)
793 return -ENOMEM;
794
795 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
796 dma->num_pages = pages;
797
798 for (i = 0; i < pages; i++) {
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
800 CNIC_PAGE_SIZE,
801 &dma->pg_map_arr[i],
802 GFP_ATOMIC);
803 if (dma->pg_arr[i] == NULL)
804 goto error;
805 }
806 if (!use_pg_tbl)
807 return 0;
808
809 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
810 ~(CNIC_PAGE_SIZE - 1);
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
812 &dma->pgtbl_map, GFP_ATOMIC);
813 if (dma->pgtbl == NULL)
814 goto error;
815
816 cp->setup_pgtbl(dev, dma);
817
818 return 0;
819
820error:
821 cnic_free_dma(dev, dma);
822 return -ENOMEM;
823}
824
825static void cnic_free_context(struct cnic_dev *dev)
826{
827 struct cnic_local *cp = dev->cnic_priv;
828 int i;
829
830 for (i = 0; i < cp->ctx_blks; i++) {
831 if (cp->ctx_arr[i].ctx) {
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
833 cp->ctx_arr[i].ctx,
834 cp->ctx_arr[i].mapping);
835 cp->ctx_arr[i].ctx = NULL;
836 }
837 }
838}
839
840static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
841{
842 if (udev->l2_buf) {
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
844 udev->l2_buf, udev->l2_buf_map);
845 udev->l2_buf = NULL;
846 }
847
848 if (udev->l2_ring) {
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
850 udev->l2_ring, udev->l2_ring_map);
851 udev->l2_ring = NULL;
852 }
853
854}
855
856static void __cnic_free_uio(struct cnic_uio_dev *udev)
857{
858 uio_unregister_device(&udev->cnic_uinfo);
859
860 __cnic_free_uio_rings(udev);
861
862 pci_dev_put(udev->pdev);
863 kfree(udev);
864}
865
866static void cnic_free_uio(struct cnic_uio_dev *udev)
867{
868 if (!udev)
869 return;
870
871 write_lock(&cnic_dev_lock);
872 list_del_init(&udev->list);
873 write_unlock(&cnic_dev_lock);
874 __cnic_free_uio(udev);
875}
876
877static void cnic_free_resc(struct cnic_dev *dev)
878{
879 struct cnic_local *cp = dev->cnic_priv;
880 struct cnic_uio_dev *udev = cp->udev;
881
882 if (udev) {
883 udev->dev = NULL;
884 cp->udev = NULL;
885 if (udev->uio_dev == -1)
886 __cnic_free_uio_rings(udev);
887 }
888
889 cnic_free_context(dev);
890 kfree(cp->ctx_arr);
891 cp->ctx_arr = NULL;
892 cp->ctx_blks = 0;
893
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
899 kfree(cp->iscsi_tbl);
900 cp->iscsi_tbl = NULL;
901 kfree(cp->ctx_tbl);
902 cp->ctx_tbl = NULL;
903
904 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
905 cnic_free_id_tbl(&cp->cid_tbl);
906}
907
908static int cnic_alloc_context(struct cnic_dev *dev)
909{
910 struct cnic_local *cp = dev->cnic_priv;
911
912 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
913 int i, k, arr_size;
914
915 cp->ctx_blk_size = CNIC_PAGE_SIZE;
916 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
917 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
918 sizeof(struct cnic_ctx);
919 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
920 if (cp->ctx_arr == NULL)
921 return -ENOMEM;
922
923 k = 0;
924 for (i = 0; i < 2; i++) {
925 u32 j, reg, off, lo, hi;
926
927 if (i == 0)
928 off = BNX2_PG_CTX_MAP;
929 else
930 off = BNX2_ISCSI_CTX_MAP;
931
932 reg = cnic_reg_rd_ind(dev, off);
933 lo = reg >> 16;
934 hi = reg & 0xffff;
935 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
936 cp->ctx_arr[k].cid = j;
937 }
938
939 cp->ctx_blks = k;
940 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
941 cp->ctx_blks = 0;
942 return -ENOMEM;
943 }
944
945 for (i = 0; i < cp->ctx_blks; i++) {
946 cp->ctx_arr[i].ctx =
947 dma_alloc_coherent(&dev->pcidev->dev,
948 CNIC_PAGE_SIZE,
949 &cp->ctx_arr[i].mapping,
950 GFP_KERNEL);
951 if (cp->ctx_arr[i].ctx == NULL)
952 return -ENOMEM;
953 }
954 }
955 return 0;
956}
957
958static u16 cnic_bnx2_next_idx(u16 idx)
959{
960 return idx + 1;
961}
962
963static u16 cnic_bnx2_hw_idx(u16 idx)
964{
965 return idx;
966}
967
968static u16 cnic_bnx2x_next_idx(u16 idx)
969{
970 idx++;
971 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
972 idx++;
973
974 return idx;
975}
976
977static u16 cnic_bnx2x_hw_idx(u16 idx)
978{
979 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
980 idx++;
981 return idx;
982}
983
984static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
985 bool use_pg_tbl)
986{
987 int err, i, use_page_tbl = 0;
988 struct kcqe **kcq;
989
990 if (use_pg_tbl)
991 use_page_tbl = 1;
992
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
994 if (err)
995 return err;
996
997 kcq = (struct kcqe **) info->dma.pg_arr;
998 info->kcq = kcq;
999
1000 info->next_idx = cnic_bnx2_next_idx;
1001 info->hw_idx = cnic_bnx2_hw_idx;
1002 if (use_pg_tbl)
1003 return 0;
1004
1005 info->next_idx = cnic_bnx2x_next_idx;
1006 info->hw_idx = cnic_bnx2x_hw_idx;
1007
1008 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009 struct bnx2x_bd_chain_next *next =
1010 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1011 int j = i + 1;
1012
1013 if (j >= KCQ_PAGE_CNT)
1014 j = 0;
1015 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1017 }
1018 return 0;
1019}
1020
1021static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1022{
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1024
1025 if (udev->l2_ring)
1026 return 0;
1027
1028 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030 &udev->l2_ring_map, GFP_KERNEL);
1031 if (!udev->l2_ring)
1032 return -ENOMEM;
1033
1034 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037 &udev->l2_buf_map, GFP_KERNEL);
1038 if (!udev->l2_buf) {
1039 __cnic_free_uio_rings(udev);
1040 return -ENOMEM;
1041 }
1042
1043 return 0;
1044
1045}
1046
1047static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1048{
1049 struct cnic_local *cp = dev->cnic_priv;
1050 struct cnic_uio_dev *udev;
1051
1052 list_for_each_entry(udev, &cnic_udev_list, list) {
1053 if (udev->pdev == dev->pcidev) {
1054 udev->dev = dev;
1055 if (__cnic_alloc_uio_rings(udev, pages)) {
1056 udev->dev = NULL;
1057 return -ENOMEM;
1058 }
1059 cp->udev = udev;
1060 return 0;
1061 }
1062 }
1063
1064 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1065 if (!udev)
1066 return -ENOMEM;
1067
1068 udev->uio_dev = -1;
1069
1070 udev->dev = dev;
1071 udev->pdev = dev->pcidev;
1072
1073 if (__cnic_alloc_uio_rings(udev, pages))
1074 goto err_udev;
1075
1076 list_add(&udev->list, &cnic_udev_list);
1077
1078 pci_dev_get(udev->pdev);
1079
1080 cp->udev = udev;
1081
1082 return 0;
1083
1084 err_udev:
1085 kfree(udev);
1086 return -ENOMEM;
1087}
1088
1089static int cnic_init_uio(struct cnic_dev *dev)
1090{
1091 struct cnic_local *cp = dev->cnic_priv;
1092 struct cnic_uio_dev *udev = cp->udev;
1093 struct uio_info *uinfo;
1094 int ret = 0;
1095
1096 if (!udev)
1097 return -ENOMEM;
1098
1099 uinfo = &udev->cnic_uinfo;
1100
1101 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102 uinfo->mem[0].internal_addr = dev->regview;
1103 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1104
1105 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107 TX_MAX_TSS_RINGS + 1);
1108 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1109 CNIC_PAGE_MASK;
1110 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1111 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1112 else
1113 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1114
1115 uinfo->name = "bnx2_cnic";
1116 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1117 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1118
1119 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1120 CNIC_PAGE_MASK;
1121 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1122
1123 uinfo->name = "bnx2x_cnic";
1124 }
1125
1126 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1127
1128 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1129 uinfo->mem[2].size = udev->l2_ring_size;
1130 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1131
1132 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1133 uinfo->mem[3].size = udev->l2_buf_size;
1134 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1135
1136 uinfo->version = CNIC_MODULE_VERSION;
1137 uinfo->irq = UIO_IRQ_CUSTOM;
1138
1139 uinfo->open = cnic_uio_open;
1140 uinfo->release = cnic_uio_close;
1141
1142 if (udev->uio_dev == -1) {
1143 if (!uinfo->priv) {
1144 uinfo->priv = udev;
1145
1146 ret = uio_register_device(&udev->pdev->dev, uinfo);
1147 }
1148 } else {
1149 cnic_init_rings(dev);
1150 }
1151
1152 return ret;
1153}
1154
1155static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1156{
1157 struct cnic_local *cp = dev->cnic_priv;
1158 int ret;
1159
1160 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1161 if (ret)
1162 goto error;
1163 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1164
1165 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1166 if (ret)
1167 goto error;
1168
1169 ret = cnic_alloc_context(dev);
1170 if (ret)
1171 goto error;
1172
1173 ret = cnic_alloc_uio_rings(dev, 2);
1174 if (ret)
1175 goto error;
1176
1177 ret = cnic_init_uio(dev);
1178 if (ret)
1179 goto error;
1180
1181 return 0;
1182
1183error:
1184 cnic_free_resc(dev);
1185 return ret;
1186}
1187
1188static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1189{
1190 struct cnic_local *cp = dev->cnic_priv;
1191 struct bnx2x *bp = netdev_priv(dev->netdev);
1192 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1193 int total_mem, blks, i;
1194
1195 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1196 blks = total_mem / ctx_blk_size;
1197 if (total_mem % ctx_blk_size)
1198 blks++;
1199
1200 if (blks > cp->ethdev->ctx_tbl_len)
1201 return -ENOMEM;
1202
1203 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1204 if (cp->ctx_arr == NULL)
1205 return -ENOMEM;
1206
1207 cp->ctx_blks = blks;
1208 cp->ctx_blk_size = ctx_blk_size;
1209 if (!CHIP_IS_E1(bp))
1210 cp->ctx_align = 0;
1211 else
1212 cp->ctx_align = ctx_blk_size;
1213
1214 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1215
1216 for (i = 0; i < blks; i++) {
1217 cp->ctx_arr[i].ctx =
1218 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1219 &cp->ctx_arr[i].mapping,
1220 GFP_KERNEL);
1221 if (cp->ctx_arr[i].ctx == NULL)
1222 return -ENOMEM;
1223
1224 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1225 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1226 cnic_free_context(dev);
1227 cp->ctx_blk_size += cp->ctx_align;
1228 i = -1;
1229 continue;
1230 }
1231 }
1232 }
1233 return 0;
1234}
1235
1236static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1237{
1238 struct cnic_local *cp = dev->cnic_priv;
1239 struct bnx2x *bp = netdev_priv(dev->netdev);
1240 struct cnic_eth_dev *ethdev = cp->ethdev;
1241 u32 start_cid = ethdev->starting_cid;
1242 int i, j, n, ret, pages;
1243 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1244
1245 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1246 cp->iscsi_start_cid = start_cid;
1247 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1248
1249 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1250 cp->max_cid_space += dev->max_fcoe_conn;
1251 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1252 if (!cp->fcoe_init_cid)
1253 cp->fcoe_init_cid = 0x10;
1254 }
1255
1256 cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1257 GFP_KERNEL);
1258 if (!cp->iscsi_tbl)
1259 goto error;
1260
1261 cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1262 GFP_KERNEL);
1263 if (!cp->ctx_tbl)
1264 goto error;
1265
1266 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1267 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1269 }
1270
1271 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1272 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1273
1274 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1275 CNIC_PAGE_SIZE;
1276
1277 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1278 if (ret)
1279 goto error;
1280
1281 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1282 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1283 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1284
1285 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1286 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1287 off;
1288
1289 if ((i % n) == (n - 1))
1290 j++;
1291 }
1292
1293 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1294 if (ret)
1295 goto error;
1296
1297 if (CNIC_SUPPORTS_FCOE(bp)) {
1298 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1299 if (ret)
1300 goto error;
1301 }
1302
1303 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1304 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1305 if (ret)
1306 goto error;
1307
1308 ret = cnic_alloc_bnx2x_context(dev);
1309 if (ret)
1310 goto error;
1311
1312 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1313 return 0;
1314
1315 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1316
1317 cp->l2_rx_ring_size = 15;
1318
1319 ret = cnic_alloc_uio_rings(dev, 4);
1320 if (ret)
1321 goto error;
1322
1323 ret = cnic_init_uio(dev);
1324 if (ret)
1325 goto error;
1326
1327 return 0;
1328
1329error:
1330 cnic_free_resc(dev);
1331 return -ENOMEM;
1332}
1333
1334static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1335{
1336 return cp->max_kwq_idx -
1337 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1338}
1339
1340static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1341 u32 num_wqes)
1342{
1343 struct cnic_local *cp = dev->cnic_priv;
1344 struct kwqe *prod_qe;
1345 u16 prod, sw_prod, i;
1346
1347 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1348 return -EAGAIN; /* bnx2 is down */
1349
1350 spin_lock_bh(&cp->cnic_ulp_lock);
1351 if (num_wqes > cnic_kwq_avail(cp) &&
1352 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1353 spin_unlock_bh(&cp->cnic_ulp_lock);
1354 return -EAGAIN;
1355 }
1356
1357 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1358
1359 prod = cp->kwq_prod_idx;
1360 sw_prod = prod & MAX_KWQ_IDX;
1361 for (i = 0; i < num_wqes; i++) {
1362 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1363 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1364 prod++;
1365 sw_prod = prod & MAX_KWQ_IDX;
1366 }
1367 cp->kwq_prod_idx = prod;
1368
1369 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1370
1371 spin_unlock_bh(&cp->cnic_ulp_lock);
1372 return 0;
1373}
1374
1375static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1376 union l5cm_specific_data *l5_data)
1377{
1378 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1379 dma_addr_t map;
1380
1381 map = ctx->kwqe_data_mapping;
1382 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1383 l5_data->phy_address.hi = (u64) map >> 32;
1384 return ctx->kwqe_data;
1385}
1386
1387static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1388 u32 type, union l5cm_specific_data *l5_data)
1389{
1390 struct cnic_local *cp = dev->cnic_priv;
1391 struct bnx2x *bp = netdev_priv(dev->netdev);
1392 struct l5cm_spe kwqe;
1393 struct kwqe_16 *kwq[1];
1394 u16 type_16;
1395 int ret;
1396
1397 kwqe.hdr.conn_and_cmd_data =
1398 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1399 BNX2X_HW_CID(bp, cid)));
1400
1401 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1402 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1403 SPE_HDR_FUNCTION_ID;
1404
1405 kwqe.hdr.type = cpu_to_le16(type_16);
1406 kwqe.hdr.reserved1 = 0;
1407 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1408 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1409
1410 kwq[0] = (struct kwqe_16 *) &kwqe;
1411
1412 spin_lock_bh(&cp->cnic_ulp_lock);
1413 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1414 spin_unlock_bh(&cp->cnic_ulp_lock);
1415
1416 if (ret == 1)
1417 return 0;
1418
1419 return ret;
1420}
1421
1422static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1423 struct kcqe *cqes[], u32 num_cqes)
1424{
1425 struct cnic_local *cp = dev->cnic_priv;
1426 struct cnic_ulp_ops *ulp_ops;
1427
1428 rcu_read_lock();
1429 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1430 if (likely(ulp_ops)) {
1431 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1432 cqes, num_cqes);
1433 }
1434 rcu_read_unlock();
1435}
1436
1437static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1438 int en_tcp_dack)
1439{
1440 struct bnx2x *bp = netdev_priv(dev->netdev);
1441 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1442 u16 tstorm_flags = 0;
1443
1444 if (time_stamps) {
1445 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1446 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1447 }
1448 if (en_tcp_dack)
1449 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1450
1451 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1452 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1453
1454 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1455 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1456}
1457
1458static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1459{
1460 struct cnic_local *cp = dev->cnic_priv;
1461 struct bnx2x *bp = netdev_priv(dev->netdev);
1462 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1463 int hq_bds, pages;
1464 u32 pfid = bp->pfid;
1465
1466 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1467 cp->num_ccells = req1->num_ccells_per_conn;
1468 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1469 cp->num_iscsi_tasks;
1470 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1471 BNX2X_ISCSI_R2TQE_SIZE;
1472 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1473 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1474 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1475 cp->num_cqs = req1->num_cqs;
1476
1477 if (!dev->max_iscsi_conn)
1478 return 0;
1479
1480 /* init Tstorm RAM */
1481 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1482 req1->rq_num_wqes);
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1484 CNIC_PAGE_SIZE);
1485 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1486 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1487 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1488 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1489 req1->num_tasks_per_conn);
1490
1491 /* init Ustorm RAM */
1492 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1493 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1494 req1->rq_buffer_size);
1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1496 CNIC_PAGE_SIZE);
1497 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1498 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1499 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1500 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1501 req1->num_tasks_per_conn);
1502 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1503 req1->rq_num_wqes);
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1505 req1->cq_num_wqes);
1506 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1507 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1508
1509 /* init Xstorm RAM */
1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1511 CNIC_PAGE_SIZE);
1512 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1513 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1514 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1515 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1516 req1->num_tasks_per_conn);
1517 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1518 hq_bds);
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1520 req1->num_tasks_per_conn);
1521 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1522 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1523
1524 /* init Cstorm RAM */
1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1526 CNIC_PAGE_SIZE);
1527 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1528 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1529 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1530 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1531 req1->num_tasks_per_conn);
1532 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1533 req1->cq_num_wqes);
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1535 hq_bds);
1536
1537 cnic_bnx2x_set_tcp_options(dev,
1538 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1539 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1540
1541 return 0;
1542}
1543
1544static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1545{
1546 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1547 struct bnx2x *bp = netdev_priv(dev->netdev);
1548 u32 pfid = bp->pfid;
1549 struct iscsi_kcqe kcqe;
1550 struct kcqe *cqes[1];
1551
1552 memset(&kcqe, 0, sizeof(kcqe));
1553 if (!dev->max_iscsi_conn) {
1554 kcqe.completion_status =
1555 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1556 goto done;
1557 }
1558
1559 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1560 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1561 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1563 req2->error_bit_map[1]);
1564
1565 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1566 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1567 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1568 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571 req2->error_bit_map[1]);
1572
1573 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1574 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1575
1576 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1577
1578done:
1579 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1580 cqes[0] = (struct kcqe *) &kcqe;
1581 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1582
1583 return 0;
1584}
1585
1586static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1587{
1588 struct cnic_local *cp = dev->cnic_priv;
1589 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1590
1591 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1592 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1593
1594 cnic_free_dma(dev, &iscsi->hq_info);
1595 cnic_free_dma(dev, &iscsi->r2tq_info);
1596 cnic_free_dma(dev, &iscsi->task_array_info);
1597 cnic_free_id(&cp->cid_tbl, ctx->cid);
1598 } else {
1599 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1600 }
1601
1602 ctx->cid = 0;
1603}
1604
1605static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1606{
1607 u32 cid;
1608 int ret, pages;
1609 struct cnic_local *cp = dev->cnic_priv;
1610 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1611 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1612
1613 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1614 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1615 if (cid == -1) {
1616 ret = -ENOMEM;
1617 goto error;
1618 }
1619 ctx->cid = cid;
1620 return 0;
1621 }
1622
1623 cid = cnic_alloc_new_id(&cp->cid_tbl);
1624 if (cid == -1) {
1625 ret = -ENOMEM;
1626 goto error;
1627 }
1628
1629 ctx->cid = cid;
1630 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1631
1632 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1633 if (ret)
1634 goto error;
1635
1636 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1637 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1638 if (ret)
1639 goto error;
1640
1641 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1642 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1643 if (ret)
1644 goto error;
1645
1646 return 0;
1647
1648error:
1649 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1650 return ret;
1651}
1652
1653static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1654 struct regpair *ctx_addr)
1655{
1656 struct cnic_local *cp = dev->cnic_priv;
1657 struct cnic_eth_dev *ethdev = cp->ethdev;
1658 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1659 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1660 unsigned long align_off = 0;
1661 dma_addr_t ctx_map;
1662 void *ctx;
1663
1664 if (cp->ctx_align) {
1665 unsigned long mask = cp->ctx_align - 1;
1666
1667 if (cp->ctx_arr[blk].mapping & mask)
1668 align_off = cp->ctx_align -
1669 (cp->ctx_arr[blk].mapping & mask);
1670 }
1671 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1672 (off * BNX2X_CONTEXT_MEM_SIZE);
1673 ctx = cp->ctx_arr[blk].ctx + align_off +
1674 (off * BNX2X_CONTEXT_MEM_SIZE);
1675 if (init)
1676 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1677
1678 ctx_addr->lo = ctx_map & 0xffffffff;
1679 ctx_addr->hi = (u64) ctx_map >> 32;
1680 return ctx;
1681}
1682
1683static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1684 u32 num)
1685{
1686 struct cnic_local *cp = dev->cnic_priv;
1687 struct bnx2x *bp = netdev_priv(dev->netdev);
1688 struct iscsi_kwqe_conn_offload1 *req1 =
1689 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1690 struct iscsi_kwqe_conn_offload2 *req2 =
1691 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1692 struct iscsi_kwqe_conn_offload3 *req3;
1693 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1694 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1695 u32 cid = ctx->cid;
1696 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1697 struct iscsi_context *ictx;
1698 struct regpair context_addr;
1699 int i, j, n = 2, n_max;
1700 u8 port = BP_PORT(bp);
1701
1702 ctx->ctx_flags = 0;
1703 if (!req2->num_additional_wqes)
1704 return -EINVAL;
1705
1706 n_max = req2->num_additional_wqes + 2;
1707
1708 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1709 if (ictx == NULL)
1710 return -ENOMEM;
1711
1712 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1713
1714 ictx->xstorm_ag_context.hq_prod = 1;
1715
1716 ictx->xstorm_st_context.iscsi.first_burst_length =
1717 ISCSI_DEF_FIRST_BURST_LEN;
1718 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1719 ISCSI_DEF_MAX_RECV_SEG_LEN;
1720 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1721 req1->sq_page_table_addr_lo;
1722 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1723 req1->sq_page_table_addr_hi;
1724 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1725 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1726 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1727 iscsi->hq_info.pgtbl_map & 0xffffffff;
1728 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1729 (u64) iscsi->hq_info.pgtbl_map >> 32;
1730 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1731 iscsi->hq_info.pgtbl[0];
1732 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1733 iscsi->hq_info.pgtbl[1];
1734 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1735 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1736 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1737 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1738 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1739 iscsi->r2tq_info.pgtbl[0];
1740 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1741 iscsi->r2tq_info.pgtbl[1];
1742 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1743 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1744 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1745 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1746 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1747 BNX2X_ISCSI_PBL_NOT_CACHED;
1748 ictx->xstorm_st_context.iscsi.flags.flags |=
1749 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1750 ictx->xstorm_st_context.iscsi.flags.flags |=
1751 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1752 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1753 ETH_P_8021Q;
1754 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1755 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1756
1757 port = 0;
1758 }
1759 ictx->xstorm_st_context.common.flags =
1760 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1761 ictx->xstorm_st_context.common.flags =
1762 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1763
1764 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1765 /* TSTORM requires the base address of RQ DB & not PTE */
1766 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1767 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1768 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1769 req2->rq_page_table_addr_hi;
1770 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1771 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1772 ictx->tstorm_st_context.tcp.flags2 |=
1773 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1774 ictx->tstorm_st_context.tcp.ooo_support_mode =
1775 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1776
1777 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1778
1779 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1780 req2->rq_page_table_addr_lo;
1781 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1782 req2->rq_page_table_addr_hi;
1783 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1784 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1785 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1786 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1787 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1788 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1789 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1790 iscsi->r2tq_info.pgtbl[0];
1791 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1792 iscsi->r2tq_info.pgtbl[1];
1793 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1794 req1->cq_page_table_addr_lo;
1795 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1796 req1->cq_page_table_addr_hi;
1797 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1798 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1799 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1800 ictx->ustorm_st_context.task_pbe_cache_index =
1801 BNX2X_ISCSI_PBL_NOT_CACHED;
1802 ictx->ustorm_st_context.task_pdu_cache_index =
1803 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1804
1805 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1806 if (j == 3) {
1807 if (n >= n_max)
1808 break;
1809 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1810 j = 0;
1811 }
1812 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1813 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1814 req3->qp_first_pte[j].hi;
1815 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1816 req3->qp_first_pte[j].lo;
1817 }
1818
1819 ictx->ustorm_st_context.task_pbl_base.lo =
1820 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1821 ictx->ustorm_st_context.task_pbl_base.hi =
1822 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1823 ictx->ustorm_st_context.tce_phy_addr.lo =
1824 iscsi->task_array_info.pgtbl[0];
1825 ictx->ustorm_st_context.tce_phy_addr.hi =
1826 iscsi->task_array_info.pgtbl[1];
1827 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1828 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1829 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1830 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1831 ISCSI_DEF_MAX_BURST_LEN;
1832 ictx->ustorm_st_context.negotiated_rx |=
1833 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1834 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1835
1836 ictx->cstorm_st_context.hq_pbl_base.lo =
1837 iscsi->hq_info.pgtbl_map & 0xffffffff;
1838 ictx->cstorm_st_context.hq_pbl_base.hi =
1839 (u64) iscsi->hq_info.pgtbl_map >> 32;
1840 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1841 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1842 ictx->cstorm_st_context.task_pbl_base.lo =
1843 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1844 ictx->cstorm_st_context.task_pbl_base.hi =
1845 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1846 /* CSTORM and USTORM initialization is different, CSTORM requires
1847 * CQ DB base & not PTE addr */
1848 ictx->cstorm_st_context.cq_db_base.lo =
1849 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1850 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1851 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1852 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1853 for (i = 0; i < cp->num_cqs; i++) {
1854 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1855 ISCSI_INITIAL_SN;
1856 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1857 ISCSI_INITIAL_SN;
1858 }
1859
1860 ictx->xstorm_ag_context.cdu_reserved =
1861 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1862 ISCSI_CONNECTION_TYPE);
1863 ictx->ustorm_ag_context.cdu_usage =
1864 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1865 ISCSI_CONNECTION_TYPE);
1866 return 0;
1867
1868}
1869
1870static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1871 u32 num, int *work)
1872{
1873 struct iscsi_kwqe_conn_offload1 *req1;
1874 struct iscsi_kwqe_conn_offload2 *req2;
1875 struct cnic_local *cp = dev->cnic_priv;
1876 struct bnx2x *bp = netdev_priv(dev->netdev);
1877 struct cnic_context *ctx;
1878 struct iscsi_kcqe kcqe;
1879 struct kcqe *cqes[1];
1880 u32 l5_cid;
1881 int ret = 0;
1882
1883 if (num < 2) {
1884 *work = num;
1885 return -EINVAL;
1886 }
1887
1888 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1889 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1890 if ((num - 2) < req2->num_additional_wqes) {
1891 *work = num;
1892 return -EINVAL;
1893 }
1894 *work = 2 + req2->num_additional_wqes;
1895
1896 l5_cid = req1->iscsi_conn_id;
1897 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1898 return -EINVAL;
1899
1900 memset(&kcqe, 0, sizeof(kcqe));
1901 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1902 kcqe.iscsi_conn_id = l5_cid;
1903 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1904
1905 ctx = &cp->ctx_tbl[l5_cid];
1906 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1907 kcqe.completion_status =
1908 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1909 goto done;
1910 }
1911
1912 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1913 atomic_dec(&cp->iscsi_conn);
1914 goto done;
1915 }
1916 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1917 if (ret) {
1918 atomic_dec(&cp->iscsi_conn);
1919 goto done;
1920 }
1921 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1922 if (ret < 0) {
1923 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1924 atomic_dec(&cp->iscsi_conn);
1925 goto done;
1926 }
1927
1928 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1929 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1930
1931done:
1932 cqes[0] = (struct kcqe *) &kcqe;
1933 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1934 return 0;
1935}
1936
1937
1938static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1939{
1940 struct cnic_local *cp = dev->cnic_priv;
1941 struct iscsi_kwqe_conn_update *req =
1942 (struct iscsi_kwqe_conn_update *) kwqe;
1943 void *data;
1944 union l5cm_specific_data l5_data;
1945 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1946 int ret;
1947
1948 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1949 return -EINVAL;
1950
1951 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1952 if (!data)
1953 return -ENOMEM;
1954
1955 memcpy(data, kwqe, sizeof(struct kwqe));
1956
1957 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1958 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1959 return ret;
1960}
1961
1962static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1963{
1964 struct cnic_local *cp = dev->cnic_priv;
1965 struct bnx2x *bp = netdev_priv(dev->netdev);
1966 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1967 union l5cm_specific_data l5_data;
1968 int ret;
1969 u32 hw_cid;
1970
1971 init_waitqueue_head(&ctx->waitq);
1972 ctx->wait_cond = 0;
1973 memset(&l5_data, 0, sizeof(l5_data));
1974 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1975
1976 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1977 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1978
1979 if (ret == 0) {
1980 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1981 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1982 return -EBUSY;
1983 }
1984
1985 return 0;
1986}
1987
1988static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1989{
1990 struct cnic_local *cp = dev->cnic_priv;
1991 struct iscsi_kwqe_conn_destroy *req =
1992 (struct iscsi_kwqe_conn_destroy *) kwqe;
1993 u32 l5_cid = req->reserved0;
1994 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1995 int ret = 0;
1996 struct iscsi_kcqe kcqe;
1997 struct kcqe *cqes[1];
1998
1999 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2000 goto skip_cfc_delete;
2001
2002 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2003 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2004
2005 if (delta > (2 * HZ))
2006 delta = 0;
2007
2008 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2009 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2010 goto destroy_reply;
2011 }
2012
2013 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2014
2015skip_cfc_delete:
2016 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2017
2018 if (!ret) {
2019 atomic_dec(&cp->iscsi_conn);
2020 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2021 }
2022
2023destroy_reply:
2024 memset(&kcqe, 0, sizeof(kcqe));
2025 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2026 kcqe.iscsi_conn_id = l5_cid;
2027 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2028 kcqe.iscsi_conn_context_id = req->context_id;
2029
2030 cqes[0] = (struct kcqe *) &kcqe;
2031 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2032
2033 return 0;
2034}
2035
2036static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2037 struct l4_kwq_connect_req1 *kwqe1,
2038 struct l4_kwq_connect_req3 *kwqe3,
2039 struct l5cm_active_conn_buffer *conn_buf)
2040{
2041 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2042 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2043 &conn_buf->xstorm_conn_buffer;
2044 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2045 &conn_buf->tstorm_conn_buffer;
2046 struct regpair context_addr;
2047 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2048 struct in6_addr src_ip, dst_ip;
2049 int i;
2050 u32 *addrp;
2051
2052 addrp = (u32 *) &conn_addr->local_ip_addr;
2053 for (i = 0; i < 4; i++, addrp++)
2054 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2055
2056 addrp = (u32 *) &conn_addr->remote_ip_addr;
2057 for (i = 0; i < 4; i++, addrp++)
2058 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2059
2060 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2061
2062 xstorm_buf->context_addr.hi = context_addr.hi;
2063 xstorm_buf->context_addr.lo = context_addr.lo;
2064 xstorm_buf->mss = 0xffff;
2065 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2066 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2067 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2068 xstorm_buf->pseudo_header_checksum =
2069 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2070
2071 if (kwqe3->ka_timeout) {
2072 tstorm_buf->ka_enable = 1;
2073 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2074 tstorm_buf->ka_interval = kwqe3->ka_interval;
2075 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2076 }
2077 tstorm_buf->max_rt_time = 0xffffffff;
2078}
2079
2080static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2081{
2082 struct bnx2x *bp = netdev_priv(dev->netdev);
2083 u32 pfid = bp->pfid;
2084 u8 *mac = dev->mac_addr;
2085
2086 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2087 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2088 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2089 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2090 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2091 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2092 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2094 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2096 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2098
2099 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2103 mac[4]);
2104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2108 mac[2]);
2109 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2111 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2112 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2113 mac[0]);
2114}
2115
2116static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2117 u32 num, int *work)
2118{
2119 struct cnic_local *cp = dev->cnic_priv;
2120 struct bnx2x *bp = netdev_priv(dev->netdev);
2121 struct l4_kwq_connect_req1 *kwqe1 =
2122 (struct l4_kwq_connect_req1 *) wqes[0];
2123 struct l4_kwq_connect_req3 *kwqe3;
2124 struct l5cm_active_conn_buffer *conn_buf;
2125 struct l5cm_conn_addr_params *conn_addr;
2126 union l5cm_specific_data l5_data;
2127 u32 l5_cid = kwqe1->pg_cid;
2128 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2129 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2130 int ret;
2131
2132 if (num < 2) {
2133 *work = num;
2134 return -EINVAL;
2135 }
2136
2137 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2138 *work = 3;
2139 else
2140 *work = 2;
2141
2142 if (num < *work) {
2143 *work = num;
2144 return -EINVAL;
2145 }
2146
2147 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2148 netdev_err(dev->netdev, "conn_buf size too big\n");
2149 return -ENOMEM;
2150 }
2151 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2152 if (!conn_buf)
2153 return -ENOMEM;
2154
2155 memset(conn_buf, 0, sizeof(*conn_buf));
2156
2157 conn_addr = &conn_buf->conn_addr_buf;
2158 conn_addr->remote_addr_0 = csk->ha[0];
2159 conn_addr->remote_addr_1 = csk->ha[1];
2160 conn_addr->remote_addr_2 = csk->ha[2];
2161 conn_addr->remote_addr_3 = csk->ha[3];
2162 conn_addr->remote_addr_4 = csk->ha[4];
2163 conn_addr->remote_addr_5 = csk->ha[5];
2164
2165 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2166 struct l4_kwq_connect_req2 *kwqe2 =
2167 (struct l4_kwq_connect_req2 *) wqes[1];
2168
2169 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2170 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2171 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2172
2173 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2174 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2175 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2176 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2177 }
2178 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2179
2180 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2181 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2182 conn_addr->local_tcp_port = kwqe1->src_port;
2183 conn_addr->remote_tcp_port = kwqe1->dst_port;
2184
2185 conn_addr->pmtu = kwqe3->pmtu;
2186 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2187
2188 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2189 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2190
2191 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2192 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2193 if (!ret)
2194 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2195
2196 return ret;
2197}
2198
2199static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2200{
2201 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2202 union l5cm_specific_data l5_data;
2203 int ret;
2204
2205 memset(&l5_data, 0, sizeof(l5_data));
2206 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2207 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2208 return ret;
2209}
2210
2211static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2212{
2213 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2214 union l5cm_specific_data l5_data;
2215 int ret;
2216
2217 memset(&l5_data, 0, sizeof(l5_data));
2218 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2219 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2220 return ret;
2221}
2222static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2223{
2224 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2225 struct l4_kcq kcqe;
2226 struct kcqe *cqes[1];
2227
2228 memset(&kcqe, 0, sizeof(kcqe));
2229 kcqe.pg_host_opaque = req->host_opaque;
2230 kcqe.pg_cid = req->host_opaque;
2231 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2232 cqes[0] = (struct kcqe *) &kcqe;
2233 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2234 return 0;
2235}
2236
2237static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2238{
2239 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2240 struct l4_kcq kcqe;
2241 struct kcqe *cqes[1];
2242
2243 memset(&kcqe, 0, sizeof(kcqe));
2244 kcqe.pg_host_opaque = req->pg_host_opaque;
2245 kcqe.pg_cid = req->pg_cid;
2246 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2247 cqes[0] = (struct kcqe *) &kcqe;
2248 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2249 return 0;
2250}
2251
2252static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2253{
2254 struct fcoe_kwqe_stat *req;
2255 struct fcoe_stat_ramrod_params *fcoe_stat;
2256 union l5cm_specific_data l5_data;
2257 struct cnic_local *cp = dev->cnic_priv;
2258 struct bnx2x *bp = netdev_priv(dev->netdev);
2259 int ret;
2260 u32 cid;
2261
2262 req = (struct fcoe_kwqe_stat *) kwqe;
2263 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2264
2265 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2266 if (!fcoe_stat)
2267 return -ENOMEM;
2268
2269 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2270 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2271
2272 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2273 FCOE_CONNECTION_TYPE, &l5_data);
2274 return ret;
2275}
2276
2277static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2278 u32 num, int *work)
2279{
2280 int ret;
2281 struct cnic_local *cp = dev->cnic_priv;
2282 struct bnx2x *bp = netdev_priv(dev->netdev);
2283 u32 cid;
2284 struct fcoe_init_ramrod_params *fcoe_init;
2285 struct fcoe_kwqe_init1 *req1;
2286 struct fcoe_kwqe_init2 *req2;
2287 struct fcoe_kwqe_init3 *req3;
2288 union l5cm_specific_data l5_data;
2289
2290 if (num < 3) {
2291 *work = num;
2292 return -EINVAL;
2293 }
2294 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2295 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2296 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2297 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2298 *work = 1;
2299 return -EINVAL;
2300 }
2301 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2302 *work = 2;
2303 return -EINVAL;
2304 }
2305
2306 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2307 netdev_err(dev->netdev, "fcoe_init size too big\n");
2308 return -ENOMEM;
2309 }
2310 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2311 if (!fcoe_init)
2312 return -ENOMEM;
2313
2314 memset(fcoe_init, 0, sizeof(*fcoe_init));
2315 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2316 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2317 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2318 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2319 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2320 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2321
2322 fcoe_init->sb_num = cp->status_blk_num;
2323 fcoe_init->eq_prod = MAX_KCQ_IDX;
2324 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2325 cp->kcq2.sw_prod_idx = 0;
2326
2327 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2328 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2329 FCOE_CONNECTION_TYPE, &l5_data);
2330 *work = 3;
2331 return ret;
2332}
2333
2334static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2335 u32 num, int *work)
2336{
2337 int ret = 0;
2338 u32 cid = -1, l5_cid;
2339 struct cnic_local *cp = dev->cnic_priv;
2340 struct bnx2x *bp = netdev_priv(dev->netdev);
2341 struct fcoe_kwqe_conn_offload1 *req1;
2342 struct fcoe_kwqe_conn_offload2 *req2;
2343 struct fcoe_kwqe_conn_offload3 *req3;
2344 struct fcoe_kwqe_conn_offload4 *req4;
2345 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2346 struct cnic_context *ctx;
2347 struct fcoe_context *fctx;
2348 struct regpair ctx_addr;
2349 union l5cm_specific_data l5_data;
2350 struct fcoe_kcqe kcqe;
2351 struct kcqe *cqes[1];
2352
2353 if (num < 4) {
2354 *work = num;
2355 return -EINVAL;
2356 }
2357 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2358 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2359 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2360 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2361
2362 *work = 4;
2363
2364 l5_cid = req1->fcoe_conn_id;
2365 if (l5_cid >= dev->max_fcoe_conn)
2366 goto err_reply;
2367
2368 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2369
2370 ctx = &cp->ctx_tbl[l5_cid];
2371 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2372 goto err_reply;
2373
2374 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2375 if (ret) {
2376 ret = 0;
2377 goto err_reply;
2378 }
2379 cid = ctx->cid;
2380
2381 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2382 if (fctx) {
2383 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2384 u32 val;
2385
2386 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2387 FCOE_CONNECTION_TYPE);
2388 fctx->xstorm_ag_context.cdu_reserved = val;
2389 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2390 FCOE_CONNECTION_TYPE);
2391 fctx->ustorm_ag_context.cdu_usage = val;
2392 }
2393 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2394 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2395 goto err_reply;
2396 }
2397 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2398 if (!fcoe_offload)
2399 goto err_reply;
2400
2401 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2402 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2403 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2404 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2405 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2406
2407 cid = BNX2X_HW_CID(bp, cid);
2408 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2409 FCOE_CONNECTION_TYPE, &l5_data);
2410 if (!ret)
2411 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2412
2413 return ret;
2414
2415err_reply:
2416 if (cid != -1)
2417 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2418
2419 memset(&kcqe, 0, sizeof(kcqe));
2420 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2421 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2422 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2423
2424 cqes[0] = (struct kcqe *) &kcqe;
2425 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2426 return ret;
2427}
2428
2429static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2430{
2431 struct fcoe_kwqe_conn_enable_disable *req;
2432 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2433 union l5cm_specific_data l5_data;
2434 int ret;
2435 u32 cid, l5_cid;
2436 struct cnic_local *cp = dev->cnic_priv;
2437
2438 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2439 cid = req->context_id;
2440 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2441
2442 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2443 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2444 return -ENOMEM;
2445 }
2446 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2447 if (!fcoe_enable)
2448 return -ENOMEM;
2449
2450 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2451 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2452 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2453 FCOE_CONNECTION_TYPE, &l5_data);
2454 return ret;
2455}
2456
2457static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2458{
2459 struct fcoe_kwqe_conn_enable_disable *req;
2460 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2461 union l5cm_specific_data l5_data;
2462 int ret;
2463 u32 cid, l5_cid;
2464 struct cnic_local *cp = dev->cnic_priv;
2465
2466 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2467 cid = req->context_id;
2468 l5_cid = req->conn_id;
2469 if (l5_cid >= dev->max_fcoe_conn)
2470 return -EINVAL;
2471
2472 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2473
2474 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2475 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2476 return -ENOMEM;
2477 }
2478 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2479 if (!fcoe_disable)
2480 return -ENOMEM;
2481
2482 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2483 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2484 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2485 FCOE_CONNECTION_TYPE, &l5_data);
2486 return ret;
2487}
2488
2489static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2490{
2491 struct fcoe_kwqe_conn_destroy *req;
2492 union l5cm_specific_data l5_data;
2493 int ret;
2494 u32 cid, l5_cid;
2495 struct cnic_local *cp = dev->cnic_priv;
2496 struct cnic_context *ctx;
2497 struct fcoe_kcqe kcqe;
2498 struct kcqe *cqes[1];
2499
2500 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2501 cid = req->context_id;
2502 l5_cid = req->conn_id;
2503 if (l5_cid >= dev->max_fcoe_conn)
2504 return -EINVAL;
2505
2506 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2507
2508 ctx = &cp->ctx_tbl[l5_cid];
2509
2510 init_waitqueue_head(&ctx->waitq);
2511 ctx->wait_cond = 0;
2512
2513 memset(&kcqe, 0, sizeof(kcqe));
2514 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2515 memset(&l5_data, 0, sizeof(l5_data));
2516 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2517 FCOE_CONNECTION_TYPE, &l5_data);
2518 if (ret == 0) {
2519 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2520 if (ctx->wait_cond)
2521 kcqe.completion_status = 0;
2522 }
2523
2524 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2525 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2526
2527 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2528 kcqe.fcoe_conn_id = req->conn_id;
2529 kcqe.fcoe_conn_context_id = cid;
2530
2531 cqes[0] = (struct kcqe *) &kcqe;
2532 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2533 return ret;
2534}
2535
2536static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2537{
2538 struct cnic_local *cp = dev->cnic_priv;
2539 u32 i;
2540
2541 for (i = start_cid; i < cp->max_cid_space; i++) {
2542 struct cnic_context *ctx = &cp->ctx_tbl[i];
2543 int j;
2544
2545 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2546 msleep(10);
2547
2548 for (j = 0; j < 5; j++) {
2549 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2550 break;
2551 msleep(20);
2552 }
2553
2554 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2555 netdev_warn(dev->netdev, "CID %x not deleted\n",
2556 ctx->cid);
2557 }
2558}
2559
2560static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2561{
2562 union l5cm_specific_data l5_data;
2563 struct cnic_local *cp = dev->cnic_priv;
2564 struct bnx2x *bp = netdev_priv(dev->netdev);
2565 int ret;
2566 u32 cid;
2567
2568 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2569
2570 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2571
2572 memset(&l5_data, 0, sizeof(l5_data));
2573 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2574 FCOE_CONNECTION_TYPE, &l5_data);
2575 return ret;
2576}
2577
2578static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2579{
2580 struct cnic_local *cp = dev->cnic_priv;
2581 struct kcqe kcqe;
2582 struct kcqe *cqes[1];
2583 u32 cid;
2584 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2585 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2586 u32 kcqe_op;
2587 int ulp_type;
2588
2589 cid = kwqe->kwqe_info0;
2590 memset(&kcqe, 0, sizeof(kcqe));
2591
2592 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2593 u32 l5_cid = 0;
2594
2595 ulp_type = CNIC_ULP_FCOE;
2596 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2597 struct fcoe_kwqe_conn_enable_disable *req;
2598
2599 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2600 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2601 cid = req->context_id;
2602 l5_cid = req->conn_id;
2603 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2604 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2605 } else {
2606 return;
2607 }
2608 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2609 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2610 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2611 kcqe.kcqe_info2 = cid;
2612 kcqe.kcqe_info0 = l5_cid;
2613
2614 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2615 ulp_type = CNIC_ULP_ISCSI;
2616 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2617 cid = kwqe->kwqe_info1;
2618
2619 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2620 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2621 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2622 kcqe.kcqe_info2 = cid;
2623 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2624
2625 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2626 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2627
2628 ulp_type = CNIC_ULP_L4;
2629 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2630 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2631 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2632 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2633 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2634 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2635 else
2636 return;
2637
2638 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2639 KCQE_FLAGS_LAYER_MASK_L4;
2640 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2641 l4kcqe->cid = cid;
2642 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2643 } else {
2644 return;
2645 }
2646
2647 cqes[0] = &kcqe;
2648 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2649}
2650
2651static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2652 struct kwqe *wqes[], u32 num_wqes)
2653{
2654 int i, work, ret;
2655 u32 opcode;
2656 struct kwqe *kwqe;
2657
2658 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2659 return -EAGAIN; /* bnx2 is down */
2660
2661 for (i = 0; i < num_wqes; ) {
2662 kwqe = wqes[i];
2663 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2664 work = 1;
2665
2666 switch (opcode) {
2667 case ISCSI_KWQE_OPCODE_INIT1:
2668 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2669 break;
2670 case ISCSI_KWQE_OPCODE_INIT2:
2671 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2672 break;
2673 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2674 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2675 num_wqes - i, &work);
2676 break;
2677 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2678 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2679 break;
2680 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2681 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2682 break;
2683 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2684 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2685 &work);
2686 break;
2687 case L4_KWQE_OPCODE_VALUE_CLOSE:
2688 ret = cnic_bnx2x_close(dev, kwqe);
2689 break;
2690 case L4_KWQE_OPCODE_VALUE_RESET:
2691 ret = cnic_bnx2x_reset(dev, kwqe);
2692 break;
2693 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2694 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2695 break;
2696 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2697 ret = cnic_bnx2x_update_pg(dev, kwqe);
2698 break;
2699 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2700 ret = 0;
2701 break;
2702 default:
2703 ret = 0;
2704 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2705 opcode);
2706 break;
2707 }
2708 if (ret < 0) {
2709 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2710 opcode);
2711
2712 /* Possibly bnx2x parity error, send completion
2713 * to ulp drivers with error code to speed up
2714 * cleanup and reset recovery.
2715 */
2716 if (ret == -EIO || ret == -EAGAIN)
2717 cnic_bnx2x_kwqe_err(dev, kwqe);
2718 }
2719 i += work;
2720 }
2721 return 0;
2722}
2723
2724static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2725 struct kwqe *wqes[], u32 num_wqes)
2726{
2727 struct bnx2x *bp = netdev_priv(dev->netdev);
2728 int i, work, ret;
2729 u32 opcode;
2730 struct kwqe *kwqe;
2731
2732 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2733 return -EAGAIN; /* bnx2 is down */
2734
2735 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2736 return -EINVAL;
2737
2738 for (i = 0; i < num_wqes; ) {
2739 kwqe = wqes[i];
2740 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2741 work = 1;
2742
2743 switch (opcode) {
2744 case FCOE_KWQE_OPCODE_INIT1:
2745 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2746 num_wqes - i, &work);
2747 break;
2748 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2749 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2750 num_wqes - i, &work);
2751 break;
2752 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2753 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2754 break;
2755 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2756 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2757 break;
2758 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2759 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2760 break;
2761 case FCOE_KWQE_OPCODE_DESTROY:
2762 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2763 break;
2764 case FCOE_KWQE_OPCODE_STAT:
2765 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2766 break;
2767 default:
2768 ret = 0;
2769 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2770 opcode);
2771 break;
2772 }
2773 if (ret < 0) {
2774 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2775 opcode);
2776
2777 /* Possibly bnx2x parity error, send completion
2778 * to ulp drivers with error code to speed up
2779 * cleanup and reset recovery.
2780 */
2781 if (ret == -EIO || ret == -EAGAIN)
2782 cnic_bnx2x_kwqe_err(dev, kwqe);
2783 }
2784 i += work;
2785 }
2786 return 0;
2787}
2788
2789static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2790 u32 num_wqes)
2791{
2792 int ret = -EINVAL;
2793 u32 layer_code;
2794
2795 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2796 return -EAGAIN; /* bnx2x is down */
2797
2798 if (!num_wqes)
2799 return 0;
2800
2801 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2802 switch (layer_code) {
2803 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2804 case KWQE_FLAGS_LAYER_MASK_L4:
2805 case KWQE_FLAGS_LAYER_MASK_L2:
2806 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2807 break;
2808
2809 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2810 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2811 break;
2812 }
2813 return ret;
2814}
2815
2816static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2817{
2818 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2819 return KCQE_FLAGS_LAYER_MASK_L4;
2820
2821 return opflag & KCQE_FLAGS_LAYER_MASK;
2822}
2823
2824static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2825{
2826 struct cnic_local *cp = dev->cnic_priv;
2827 int i, j, comp = 0;
2828
2829 i = 0;
2830 j = 1;
2831 while (num_cqes) {
2832 struct cnic_ulp_ops *ulp_ops;
2833 int ulp_type;
2834 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2835 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2836
2837 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2838 comp++;
2839
2840 while (j < num_cqes) {
2841 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2842
2843 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2844 break;
2845
2846 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2847 comp++;
2848 j++;
2849 }
2850
2851 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2852 ulp_type = CNIC_ULP_RDMA;
2853 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2854 ulp_type = CNIC_ULP_ISCSI;
2855 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2856 ulp_type = CNIC_ULP_FCOE;
2857 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2858 ulp_type = CNIC_ULP_L4;
2859 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2860 goto end;
2861 else {
2862 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2863 kcqe_op_flag);
2864 goto end;
2865 }
2866
2867 rcu_read_lock();
2868 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2869 if (likely(ulp_ops)) {
2870 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2871 cp->completed_kcq + i, j);
2872 }
2873 rcu_read_unlock();
2874end:
2875 num_cqes -= j;
2876 i += j;
2877 j = 1;
2878 }
2879 if (unlikely(comp))
2880 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2881}
2882
2883static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2884{
2885 struct cnic_local *cp = dev->cnic_priv;
2886 u16 i, ri, hw_prod, last;
2887 struct kcqe *kcqe;
2888 int kcqe_cnt = 0, last_cnt = 0;
2889
2890 i = ri = last = info->sw_prod_idx;
2891 ri &= MAX_KCQ_IDX;
2892 hw_prod = *info->hw_prod_idx_ptr;
2893 hw_prod = info->hw_idx(hw_prod);
2894
2895 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2896 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2897 cp->completed_kcq[kcqe_cnt++] = kcqe;
2898 i = info->next_idx(i);
2899 ri = i & MAX_KCQ_IDX;
2900 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2901 last_cnt = kcqe_cnt;
2902 last = i;
2903 }
2904 }
2905
2906 info->sw_prod_idx = last;
2907 return last_cnt;
2908}
2909
2910static int cnic_l2_completion(struct cnic_local *cp)
2911{
2912 u16 hw_cons, sw_cons;
2913 struct cnic_uio_dev *udev = cp->udev;
2914 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2915 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2916 u32 cmd;
2917 int comp = 0;
2918
2919 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2920 return 0;
2921
2922 hw_cons = *cp->rx_cons_ptr;
2923 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2924 hw_cons++;
2925
2926 sw_cons = cp->rx_cons;
2927 while (sw_cons != hw_cons) {
2928 u8 cqe_fp_flags;
2929
2930 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2931 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2932 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2933 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2934 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2935 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2936 cmd == RAMROD_CMD_ID_ETH_HALT)
2937 comp++;
2938 }
2939 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2940 }
2941 return comp;
2942}
2943
2944static void cnic_chk_pkt_rings(struct cnic_local *cp)
2945{
2946 u16 rx_cons, tx_cons;
2947 int comp = 0;
2948
2949 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2950 return;
2951
2952 rx_cons = *cp->rx_cons_ptr;
2953 tx_cons = *cp->tx_cons_ptr;
2954 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2955 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2956 comp = cnic_l2_completion(cp);
2957
2958 cp->tx_cons = tx_cons;
2959 cp->rx_cons = rx_cons;
2960
2961 if (cp->udev)
2962 uio_event_notify(&cp->udev->cnic_uinfo);
2963 }
2964 if (comp)
2965 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2966}
2967
2968static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2969{
2970 struct cnic_local *cp = dev->cnic_priv;
2971 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2972 int kcqe_cnt;
2973
2974 /* status block index must be read before reading other fields */
2975 rmb();
2976 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2977
2978 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2979
2980 service_kcqes(dev, kcqe_cnt);
2981
2982 /* Tell compiler that status_blk fields can change. */
2983 barrier();
2984 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2985 /* status block index must be read first */
2986 rmb();
2987 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2988 }
2989
2990 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2991
2992 cnic_chk_pkt_rings(cp);
2993
2994 return status_idx;
2995}
2996
2997static int cnic_service_bnx2(void *data, void *status_blk)
2998{
2999 struct cnic_dev *dev = data;
3000
3001 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3002 struct status_block *sblk = status_blk;
3003
3004 return sblk->status_idx;
3005 }
3006
3007 return cnic_service_bnx2_queues(dev);
3008}
3009
3010static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3011{
3012 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3013 struct cnic_dev *dev = cp->dev;
3014
3015 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3016
3017 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3018 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3019}
3020
3021static void cnic_doirq(struct cnic_dev *dev)
3022{
3023 struct cnic_local *cp = dev->cnic_priv;
3024
3025 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3026 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3027
3028 prefetch(cp->status_blk.gen);
3029 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3030
3031 tasklet_schedule(&cp->cnic_irq_task);
3032 }
3033}
3034
3035static irqreturn_t cnic_irq(int irq, void *dev_instance)
3036{
3037 struct cnic_dev *dev = dev_instance;
3038 struct cnic_local *cp = dev->cnic_priv;
3039
3040 if (cp->ack_int)
3041 cp->ack_int(dev);
3042
3043 cnic_doirq(dev);
3044
3045 return IRQ_HANDLED;
3046}
3047
3048static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3049 u16 index, u8 op, u8 update)
3050{
3051 struct bnx2x *bp = netdev_priv(dev->netdev);
3052 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3053 COMMAND_REG_INT_ACK);
3054 struct igu_ack_register igu_ack;
3055
3056 igu_ack.status_block_index = index;
3057 igu_ack.sb_id_and_flags =
3058 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3059 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3060 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3061 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3062
3063 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3064}
3065
3066static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3067 u16 index, u8 op, u8 update)
3068{
3069 struct igu_regular cmd_data;
3070 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3071
3072 cmd_data.sb_id_and_flags =
3073 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3074 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3075 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3076 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3077
3078
3079 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3080}
3081
3082static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3083{
3084 struct cnic_local *cp = dev->cnic_priv;
3085
3086 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3087 IGU_INT_DISABLE, 0);
3088}
3089
3090static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3091{
3092 struct cnic_local *cp = dev->cnic_priv;
3093
3094 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3095 IGU_INT_DISABLE, 0);
3096}
3097
3098static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3099{
3100 struct cnic_local *cp = dev->cnic_priv;
3101
3102 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3103 IGU_INT_ENABLE, 1);
3104}
3105
3106static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3107{
3108 struct cnic_local *cp = dev->cnic_priv;
3109
3110 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3111 IGU_INT_ENABLE, 1);
3112}
3113
3114static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3115{
3116 u32 last_status = *info->status_idx_ptr;
3117 int kcqe_cnt;
3118
3119 /* status block index must be read before reading the KCQ */
3120 rmb();
3121 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3122
3123 service_kcqes(dev, kcqe_cnt);
3124
3125 /* Tell compiler that sblk fields can change. */
3126 barrier();
3127
3128 last_status = *info->status_idx_ptr;
3129 /* status block index must be read before reading the KCQ */
3130 rmb();
3131 }
3132 return last_status;
3133}
3134
3135static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3136{
3137 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3138 struct cnic_dev *dev = cp->dev;
3139 struct bnx2x *bp = netdev_priv(dev->netdev);
3140 u32 status_idx, new_status_idx;
3141
3142 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3143 return;
3144
3145 while (1) {
3146 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3147
3148 CNIC_WR16(dev, cp->kcq1.io_addr,
3149 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3150
3151 if (!CNIC_SUPPORTS_FCOE(bp)) {
3152 cp->arm_int(dev, status_idx);
3153 break;
3154 }
3155
3156 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3157
3158 if (new_status_idx != status_idx)
3159 continue;
3160
3161 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3162 MAX_KCQ_IDX);
3163
3164 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3165 status_idx, IGU_INT_ENABLE, 1);
3166
3167 break;
3168 }
3169}
3170
3171static int cnic_service_bnx2x(void *data, void *status_blk)
3172{
3173 struct cnic_dev *dev = data;
3174 struct cnic_local *cp = dev->cnic_priv;
3175
3176 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3177 cnic_doirq(dev);
3178
3179 cnic_chk_pkt_rings(cp);
3180
3181 return 0;
3182}
3183
3184static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3185{
3186 struct cnic_ulp_ops *ulp_ops;
3187
3188 if (if_type == CNIC_ULP_ISCSI)
3189 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3190
3191 mutex_lock(&cnic_lock);
3192 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3193 lockdep_is_held(&cnic_lock));
3194 if (!ulp_ops) {
3195 mutex_unlock(&cnic_lock);
3196 return;
3197 }
3198 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3199 mutex_unlock(&cnic_lock);
3200
3201 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3202 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3203
3204 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3205}
3206
3207static void cnic_ulp_stop(struct cnic_dev *dev)
3208{
3209 struct cnic_local *cp = dev->cnic_priv;
3210 int if_type;
3211
3212 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3213 cnic_ulp_stop_one(cp, if_type);
3214}
3215
3216static void cnic_ulp_start(struct cnic_dev *dev)
3217{
3218 struct cnic_local *cp = dev->cnic_priv;
3219 int if_type;
3220
3221 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3222 struct cnic_ulp_ops *ulp_ops;
3223
3224 mutex_lock(&cnic_lock);
3225 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3226 lockdep_is_held(&cnic_lock));
3227 if (!ulp_ops || !ulp_ops->cnic_start) {
3228 mutex_unlock(&cnic_lock);
3229 continue;
3230 }
3231 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3232 mutex_unlock(&cnic_lock);
3233
3234 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3235 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3236
3237 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3238 }
3239}
3240
3241static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3242{
3243 struct cnic_local *cp = dev->cnic_priv;
3244 struct cnic_ulp_ops *ulp_ops;
3245 int rc;
3246
3247 mutex_lock(&cnic_lock);
3248 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3249 lockdep_is_held(&cnic_lock));
3250 if (ulp_ops && ulp_ops->cnic_get_stats)
3251 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3252 else
3253 rc = -ENODEV;
3254 mutex_unlock(&cnic_lock);
3255 return rc;
3256}
3257
3258static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3259{
3260 struct cnic_dev *dev = data;
3261 int ulp_type = CNIC_ULP_ISCSI;
3262
3263 switch (info->cmd) {
3264 case CNIC_CTL_STOP_CMD:
3265 cnic_hold(dev);
3266
3267 cnic_ulp_stop(dev);
3268 cnic_stop_hw(dev);
3269
3270 cnic_put(dev);
3271 break;
3272 case CNIC_CTL_START_CMD:
3273 cnic_hold(dev);
3274
3275 if (!cnic_start_hw(dev))
3276 cnic_ulp_start(dev);
3277
3278 cnic_put(dev);
3279 break;
3280 case CNIC_CTL_STOP_ISCSI_CMD: {
3281 struct cnic_local *cp = dev->cnic_priv;
3282 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3283 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3284 break;
3285 }
3286 case CNIC_CTL_COMPLETION_CMD: {
3287 struct cnic_ctl_completion *comp = &info->data.comp;
3288 u32 cid = BNX2X_SW_CID(comp->cid);
3289 u32 l5_cid;
3290 struct cnic_local *cp = dev->cnic_priv;
3291
3292 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3293 break;
3294
3295 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3296 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3297
3298 if (unlikely(comp->error)) {
3299 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3300 netdev_err(dev->netdev,
3301 "CID %x CFC delete comp error %x\n",
3302 cid, comp->error);
3303 }
3304
3305 ctx->wait_cond = 1;
3306 wake_up(&ctx->waitq);
3307 }
3308 break;
3309 }
3310 case CNIC_CTL_FCOE_STATS_GET_CMD:
3311 ulp_type = CNIC_ULP_FCOE;
3312 fallthrough;
3313 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3314 cnic_hold(dev);
3315 cnic_copy_ulp_stats(dev, ulp_type);
3316 cnic_put(dev);
3317 break;
3318
3319 default:
3320 return -EINVAL;
3321 }
3322 return 0;
3323}
3324
3325static void cnic_ulp_init(struct cnic_dev *dev)
3326{
3327 int i;
3328 struct cnic_local *cp = dev->cnic_priv;
3329
3330 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3331 struct cnic_ulp_ops *ulp_ops;
3332
3333 mutex_lock(&cnic_lock);
3334 ulp_ops = cnic_ulp_tbl_prot(i);
3335 if (!ulp_ops || !ulp_ops->cnic_init) {
3336 mutex_unlock(&cnic_lock);
3337 continue;
3338 }
3339 ulp_get(ulp_ops);
3340 mutex_unlock(&cnic_lock);
3341
3342 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3343 ulp_ops->cnic_init(dev);
3344
3345 ulp_put(ulp_ops);
3346 }
3347}
3348
3349static void cnic_ulp_exit(struct cnic_dev *dev)
3350{
3351 int i;
3352 struct cnic_local *cp = dev->cnic_priv;
3353
3354 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3355 struct cnic_ulp_ops *ulp_ops;
3356
3357 mutex_lock(&cnic_lock);
3358 ulp_ops = cnic_ulp_tbl_prot(i);
3359 if (!ulp_ops || !ulp_ops->cnic_exit) {
3360 mutex_unlock(&cnic_lock);
3361 continue;
3362 }
3363 ulp_get(ulp_ops);
3364 mutex_unlock(&cnic_lock);
3365
3366 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3367 ulp_ops->cnic_exit(dev);
3368
3369 ulp_put(ulp_ops);
3370 }
3371}
3372
3373static int cnic_cm_offload_pg(struct cnic_sock *csk)
3374{
3375 struct cnic_dev *dev = csk->dev;
3376 struct l4_kwq_offload_pg *l4kwqe;
3377 struct kwqe *wqes[1];
3378
3379 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3380 memset(l4kwqe, 0, sizeof(*l4kwqe));
3381 wqes[0] = (struct kwqe *) l4kwqe;
3382
3383 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3384 l4kwqe->flags =
3385 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3386 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3387
3388 l4kwqe->da0 = csk->ha[0];
3389 l4kwqe->da1 = csk->ha[1];
3390 l4kwqe->da2 = csk->ha[2];
3391 l4kwqe->da3 = csk->ha[3];
3392 l4kwqe->da4 = csk->ha[4];
3393 l4kwqe->da5 = csk->ha[5];
3394
3395 l4kwqe->sa0 = dev->mac_addr[0];
3396 l4kwqe->sa1 = dev->mac_addr[1];
3397 l4kwqe->sa2 = dev->mac_addr[2];
3398 l4kwqe->sa3 = dev->mac_addr[3];
3399 l4kwqe->sa4 = dev->mac_addr[4];
3400 l4kwqe->sa5 = dev->mac_addr[5];
3401
3402 l4kwqe->etype = ETH_P_IP;
3403 l4kwqe->ipid_start = DEF_IPID_START;
3404 l4kwqe->host_opaque = csk->l5_cid;
3405
3406 if (csk->vlan_id) {
3407 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3408 l4kwqe->vlan_tag = csk->vlan_id;
3409 l4kwqe->l2hdr_nbytes += 4;
3410 }
3411
3412 return dev->submit_kwqes(dev, wqes, 1);
3413}
3414
3415static int cnic_cm_update_pg(struct cnic_sock *csk)
3416{
3417 struct cnic_dev *dev = csk->dev;
3418 struct l4_kwq_update_pg *l4kwqe;
3419 struct kwqe *wqes[1];
3420
3421 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3422 memset(l4kwqe, 0, sizeof(*l4kwqe));
3423 wqes[0] = (struct kwqe *) l4kwqe;
3424
3425 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3426 l4kwqe->flags =
3427 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3428 l4kwqe->pg_cid = csk->pg_cid;
3429
3430 l4kwqe->da0 = csk->ha[0];
3431 l4kwqe->da1 = csk->ha[1];
3432 l4kwqe->da2 = csk->ha[2];
3433 l4kwqe->da3 = csk->ha[3];
3434 l4kwqe->da4 = csk->ha[4];
3435 l4kwqe->da5 = csk->ha[5];
3436
3437 l4kwqe->pg_host_opaque = csk->l5_cid;
3438 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3439
3440 return dev->submit_kwqes(dev, wqes, 1);
3441}
3442
3443static int cnic_cm_upload_pg(struct cnic_sock *csk)
3444{
3445 struct cnic_dev *dev = csk->dev;
3446 struct l4_kwq_upload *l4kwqe;
3447 struct kwqe *wqes[1];
3448
3449 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3450 memset(l4kwqe, 0, sizeof(*l4kwqe));
3451 wqes[0] = (struct kwqe *) l4kwqe;
3452
3453 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3454 l4kwqe->flags =
3455 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3456 l4kwqe->cid = csk->pg_cid;
3457
3458 return dev->submit_kwqes(dev, wqes, 1);
3459}
3460
3461static int cnic_cm_conn_req(struct cnic_sock *csk)
3462{
3463 struct cnic_dev *dev = csk->dev;
3464 struct l4_kwq_connect_req1 *l4kwqe1;
3465 struct l4_kwq_connect_req2 *l4kwqe2;
3466 struct l4_kwq_connect_req3 *l4kwqe3;
3467 struct kwqe *wqes[3];
3468 u8 tcp_flags = 0;
3469 int num_wqes = 2;
3470
3471 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3472 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3473 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3474 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3475 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3476 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3477
3478 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3479 l4kwqe3->flags =
3480 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3481 l4kwqe3->ka_timeout = csk->ka_timeout;
3482 l4kwqe3->ka_interval = csk->ka_interval;
3483 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3484 l4kwqe3->tos = csk->tos;
3485 l4kwqe3->ttl = csk->ttl;
3486 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3487 l4kwqe3->pmtu = csk->mtu;
3488 l4kwqe3->rcv_buf = csk->rcv_buf;
3489 l4kwqe3->snd_buf = csk->snd_buf;
3490 l4kwqe3->seed = csk->seed;
3491
3492 wqes[0] = (struct kwqe *) l4kwqe1;
3493 if (test_bit(SK_F_IPV6, &csk->flags)) {
3494 wqes[1] = (struct kwqe *) l4kwqe2;
3495 wqes[2] = (struct kwqe *) l4kwqe3;
3496 num_wqes = 3;
3497
3498 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3499 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3500 l4kwqe2->flags =
3501 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3502 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3503 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3504 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3505 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3506 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3507 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3508 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3509 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3510 sizeof(struct tcphdr);
3511 } else {
3512 wqes[1] = (struct kwqe *) l4kwqe3;
3513 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3514 sizeof(struct tcphdr);
3515 }
3516
3517 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3518 l4kwqe1->flags =
3519 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3520 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3521 l4kwqe1->cid = csk->cid;
3522 l4kwqe1->pg_cid = csk->pg_cid;
3523 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3524 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3525 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3526 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3527 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3528 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3529 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3530 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3531 if (csk->tcp_flags & SK_TCP_NAGLE)
3532 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3533 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3534 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3535 if (csk->tcp_flags & SK_TCP_SACK)
3536 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3537 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3538 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3539
3540 l4kwqe1->tcp_flags = tcp_flags;
3541
3542 return dev->submit_kwqes(dev, wqes, num_wqes);
3543}
3544
3545static int cnic_cm_close_req(struct cnic_sock *csk)
3546{
3547 struct cnic_dev *dev = csk->dev;
3548 struct l4_kwq_close_req *l4kwqe;
3549 struct kwqe *wqes[1];
3550
3551 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3552 memset(l4kwqe, 0, sizeof(*l4kwqe));
3553 wqes[0] = (struct kwqe *) l4kwqe;
3554
3555 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3556 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3557 l4kwqe->cid = csk->cid;
3558
3559 return dev->submit_kwqes(dev, wqes, 1);
3560}
3561
3562static int cnic_cm_abort_req(struct cnic_sock *csk)
3563{
3564 struct cnic_dev *dev = csk->dev;
3565 struct l4_kwq_reset_req *l4kwqe;
3566 struct kwqe *wqes[1];
3567
3568 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3569 memset(l4kwqe, 0, sizeof(*l4kwqe));
3570 wqes[0] = (struct kwqe *) l4kwqe;
3571
3572 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3573 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3574 l4kwqe->cid = csk->cid;
3575
3576 return dev->submit_kwqes(dev, wqes, 1);
3577}
3578
3579static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3580 u32 l5_cid, struct cnic_sock **csk, void *context)
3581{
3582 struct cnic_local *cp = dev->cnic_priv;
3583 struct cnic_sock *csk1;
3584
3585 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3586 return -EINVAL;
3587
3588 if (cp->ctx_tbl) {
3589 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3590
3591 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3592 return -EAGAIN;
3593 }
3594
3595 csk1 = &cp->csk_tbl[l5_cid];
3596 if (atomic_read(&csk1->ref_count))
3597 return -EAGAIN;
3598
3599 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3600 return -EBUSY;
3601
3602 csk1->dev = dev;
3603 csk1->cid = cid;
3604 csk1->l5_cid = l5_cid;
3605 csk1->ulp_type = ulp_type;
3606 csk1->context = context;
3607
3608 csk1->ka_timeout = DEF_KA_TIMEOUT;
3609 csk1->ka_interval = DEF_KA_INTERVAL;
3610 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3611 csk1->tos = DEF_TOS;
3612 csk1->ttl = DEF_TTL;
3613 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3614 csk1->rcv_buf = DEF_RCV_BUF;
3615 csk1->snd_buf = DEF_SND_BUF;
3616 csk1->seed = DEF_SEED;
3617 csk1->tcp_flags = 0;
3618
3619 *csk = csk1;
3620 return 0;
3621}
3622
3623static void cnic_cm_cleanup(struct cnic_sock *csk)
3624{
3625 if (csk->src_port) {
3626 struct cnic_dev *dev = csk->dev;
3627 struct cnic_local *cp = dev->cnic_priv;
3628
3629 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3630 csk->src_port = 0;
3631 }
3632}
3633
3634static void cnic_close_conn(struct cnic_sock *csk)
3635{
3636 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3637 cnic_cm_upload_pg(csk);
3638 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3639 }
3640 cnic_cm_cleanup(csk);
3641}
3642
3643static int cnic_cm_destroy(struct cnic_sock *csk)
3644{
3645 if (!cnic_in_use(csk))
3646 return -EINVAL;
3647
3648 csk_hold(csk);
3649 clear_bit(SK_F_INUSE, &csk->flags);
3650 smp_mb__after_atomic();
3651 while (atomic_read(&csk->ref_count) != 1)
3652 msleep(1);
3653 cnic_cm_cleanup(csk);
3654
3655 csk->flags = 0;
3656 csk_put(csk);
3657 return 0;
3658}
3659
3660static inline u16 cnic_get_vlan(struct net_device *dev,
3661 struct net_device **vlan_dev)
3662{
3663 if (is_vlan_dev(dev)) {
3664 *vlan_dev = vlan_dev_real_dev(dev);
3665 return vlan_dev_vlan_id(dev);
3666 }
3667 *vlan_dev = dev;
3668 return 0;
3669}
3670
3671static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3672 struct dst_entry **dst)
3673{
3674#if defined(CONFIG_INET)
3675 struct rtable *rt;
3676
3677 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3678 if (!IS_ERR(rt)) {
3679 *dst = &rt->dst;
3680 return 0;
3681 }
3682 return PTR_ERR(rt);
3683#else
3684 return -ENETUNREACH;
3685#endif
3686}
3687
3688static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3689 struct dst_entry **dst)
3690{
3691#if IS_ENABLED(CONFIG_IPV6)
3692 struct flowi6 fl6;
3693
3694 memset(&fl6, 0, sizeof(fl6));
3695 fl6.daddr = dst_addr->sin6_addr;
3696 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3697 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3698
3699 *dst = ip6_route_output(&init_net, NULL, &fl6);
3700 if ((*dst)->error) {
3701 dst_release(*dst);
3702 *dst = NULL;
3703 return -ENETUNREACH;
3704 } else
3705 return 0;
3706#endif
3707
3708 return -ENETUNREACH;
3709}
3710
3711static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3712 int ulp_type)
3713{
3714 struct cnic_dev *dev = NULL;
3715 struct dst_entry *dst;
3716 struct net_device *netdev = NULL;
3717 int err = -ENETUNREACH;
3718
3719 if (dst_addr->sin_family == AF_INET)
3720 err = cnic_get_v4_route(dst_addr, &dst);
3721 else if (dst_addr->sin_family == AF_INET6) {
3722 struct sockaddr_in6 *dst_addr6 =
3723 (struct sockaddr_in6 *) dst_addr;
3724
3725 err = cnic_get_v6_route(dst_addr6, &dst);
3726 } else
3727 return NULL;
3728
3729 if (err)
3730 return NULL;
3731
3732 if (!dst->dev)
3733 goto done;
3734
3735 cnic_get_vlan(dst->dev, &netdev);
3736
3737 dev = cnic_from_netdev(netdev);
3738
3739done:
3740 dst_release(dst);
3741 if (dev)
3742 cnic_put(dev);
3743 return dev;
3744}
3745
3746static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3747{
3748 struct cnic_dev *dev = csk->dev;
3749 struct cnic_local *cp = dev->cnic_priv;
3750
3751 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3752}
3753
3754static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3755{
3756 struct cnic_dev *dev = csk->dev;
3757 struct cnic_local *cp = dev->cnic_priv;
3758 int is_v6, rc = 0;
3759 struct dst_entry *dst = NULL;
3760 struct net_device *realdev;
3761 __be16 local_port;
3762 u32 port_id;
3763
3764 if (saddr->local.v6.sin6_family == AF_INET6 &&
3765 saddr->remote.v6.sin6_family == AF_INET6)
3766 is_v6 = 1;
3767 else if (saddr->local.v4.sin_family == AF_INET &&
3768 saddr->remote.v4.sin_family == AF_INET)
3769 is_v6 = 0;
3770 else
3771 return -EINVAL;
3772
3773 clear_bit(SK_F_IPV6, &csk->flags);
3774
3775 if (is_v6) {
3776 set_bit(SK_F_IPV6, &csk->flags);
3777 cnic_get_v6_route(&saddr->remote.v6, &dst);
3778
3779 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3780 sizeof(struct in6_addr));
3781 csk->dst_port = saddr->remote.v6.sin6_port;
3782 local_port = saddr->local.v6.sin6_port;
3783
3784 } else {
3785 cnic_get_v4_route(&saddr->remote.v4, &dst);
3786
3787 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3788 csk->dst_port = saddr->remote.v4.sin_port;
3789 local_port = saddr->local.v4.sin_port;
3790 }
3791
3792 csk->vlan_id = 0;
3793 csk->mtu = dev->netdev->mtu;
3794 if (dst && dst->dev) {
3795 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3796 if (realdev == dev->netdev) {
3797 csk->vlan_id = vlan;
3798 csk->mtu = dst_mtu(dst);
3799 }
3800 }
3801
3802 port_id = be16_to_cpu(local_port);
3803 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3804 port_id < CNIC_LOCAL_PORT_MAX) {
3805 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3806 port_id = 0;
3807 } else
3808 port_id = 0;
3809
3810 if (!port_id) {
3811 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3812 if (port_id == -1) {
3813 rc = -ENOMEM;
3814 goto err_out;
3815 }
3816 local_port = cpu_to_be16(port_id);
3817 }
3818 csk->src_port = local_port;
3819
3820err_out:
3821 dst_release(dst);
3822 return rc;
3823}
3824
3825static void cnic_init_csk_state(struct cnic_sock *csk)
3826{
3827 csk->state = 0;
3828 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3829 clear_bit(SK_F_CLOSING, &csk->flags);
3830}
3831
3832static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3833{
3834 struct cnic_local *cp = csk->dev->cnic_priv;
3835 int err = 0;
3836
3837 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3838 return -EOPNOTSUPP;
3839
3840 if (!cnic_in_use(csk))
3841 return -EINVAL;
3842
3843 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3844 return -EINVAL;
3845
3846 cnic_init_csk_state(csk);
3847
3848 err = cnic_get_route(csk, saddr);
3849 if (err)
3850 goto err_out;
3851
3852 err = cnic_resolve_addr(csk, saddr);
3853 if (!err)
3854 return 0;
3855
3856err_out:
3857 clear_bit(SK_F_CONNECT_START, &csk->flags);
3858 return err;
3859}
3860
3861static int cnic_cm_abort(struct cnic_sock *csk)
3862{
3863 struct cnic_local *cp = csk->dev->cnic_priv;
3864 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3865
3866 if (!cnic_in_use(csk))
3867 return -EINVAL;
3868
3869 if (cnic_abort_prep(csk))
3870 return cnic_cm_abort_req(csk);
3871
3872 /* Getting here means that we haven't started connect, or
3873 * connect was not successful, or it has been reset by the target.
3874 */
3875
3876 cp->close_conn(csk, opcode);
3877 if (csk->state != opcode) {
3878 /* Wait for remote reset sequence to complete */
3879 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3880 msleep(1);
3881
3882 return -EALREADY;
3883 }
3884
3885 return 0;
3886}
3887
3888static int cnic_cm_close(struct cnic_sock *csk)
3889{
3890 if (!cnic_in_use(csk))
3891 return -EINVAL;
3892
3893 if (cnic_close_prep(csk)) {
3894 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3895 return cnic_cm_close_req(csk);
3896 } else {
3897 /* Wait for remote reset sequence to complete */
3898 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3899 msleep(1);
3900
3901 return -EALREADY;
3902 }
3903 return 0;
3904}
3905
3906static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3907 u8 opcode)
3908{
3909 struct cnic_ulp_ops *ulp_ops;
3910 int ulp_type = csk->ulp_type;
3911
3912 rcu_read_lock();
3913 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3914 if (ulp_ops) {
3915 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3916 ulp_ops->cm_connect_complete(csk);
3917 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3918 ulp_ops->cm_close_complete(csk);
3919 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3920 ulp_ops->cm_remote_abort(csk);
3921 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3922 ulp_ops->cm_abort_complete(csk);
3923 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3924 ulp_ops->cm_remote_close(csk);
3925 }
3926 rcu_read_unlock();
3927}
3928
3929static int cnic_cm_set_pg(struct cnic_sock *csk)
3930{
3931 if (cnic_offld_prep(csk)) {
3932 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3933 cnic_cm_update_pg(csk);
3934 else
3935 cnic_cm_offload_pg(csk);
3936 }
3937 return 0;
3938}
3939
3940static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3941{
3942 struct cnic_local *cp = dev->cnic_priv;
3943 u32 l5_cid = kcqe->pg_host_opaque;
3944 u8 opcode = kcqe->op_code;
3945 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3946
3947 csk_hold(csk);
3948 if (!cnic_in_use(csk))
3949 goto done;
3950
3951 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3952 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3953 goto done;
3954 }
3955 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3956 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3957 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3958 cnic_cm_upcall(cp, csk,
3959 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3960 goto done;
3961 }
3962
3963 csk->pg_cid = kcqe->pg_cid;
3964 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3965 cnic_cm_conn_req(csk);
3966
3967done:
3968 csk_put(csk);
3969}
3970
3971static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3972{
3973 struct cnic_local *cp = dev->cnic_priv;
3974 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3975 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3976 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3977
3978 ctx->timestamp = jiffies;
3979 ctx->wait_cond = 1;
3980 wake_up(&ctx->waitq);
3981}
3982
3983static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3984{
3985 struct cnic_local *cp = dev->cnic_priv;
3986 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3987 u8 opcode = l4kcqe->op_code;
3988 u32 l5_cid;
3989 struct cnic_sock *csk;
3990
3991 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3992 cnic_process_fcoe_term_conn(dev, kcqe);
3993 return;
3994 }
3995 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3996 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3997 cnic_cm_process_offld_pg(dev, l4kcqe);
3998 return;
3999 }
4000
4001 l5_cid = l4kcqe->conn_id;
4002 if (opcode & 0x80)
4003 l5_cid = l4kcqe->cid;
4004 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4005 return;
4006
4007 csk = &cp->csk_tbl[l5_cid];
4008 csk_hold(csk);
4009
4010 if (!cnic_in_use(csk)) {
4011 csk_put(csk);
4012 return;
4013 }
4014
4015 switch (opcode) {
4016 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4017 if (l4kcqe->status != 0) {
4018 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4019 cnic_cm_upcall(cp, csk,
4020 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4021 }
4022 break;
4023 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4024 if (l4kcqe->status == 0)
4025 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4026 else if (l4kcqe->status ==
4027 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4028 set_bit(SK_F_HW_ERR, &csk->flags);
4029
4030 smp_mb__before_atomic();
4031 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4032 cnic_cm_upcall(cp, csk, opcode);
4033 break;
4034
4035 case L5CM_RAMROD_CMD_ID_CLOSE: {
4036 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4037
4038 if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4039 break;
4040
4041 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4042 l4kcqe->status, l5kcqe->completion_status);
4043 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4044 }
4045 fallthrough;
4046 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4047 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4048 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4049 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4050 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4051 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4052 set_bit(SK_F_HW_ERR, &csk->flags);
4053
4054 cp->close_conn(csk, opcode);
4055 break;
4056
4057 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4058 /* after we already sent CLOSE_REQ */
4059 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4060 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4061 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4062 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4063 else
4064 cnic_cm_upcall(cp, csk, opcode);
4065 break;
4066 }
4067 csk_put(csk);
4068}
4069
4070static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4071{
4072 struct cnic_dev *dev = data;
4073 int i;
4074
4075 for (i = 0; i < num; i++)
4076 cnic_cm_process_kcqe(dev, kcqe[i]);
4077}
4078
4079static struct cnic_ulp_ops cm_ulp_ops = {
4080 .indicate_kcqes = cnic_cm_indicate_kcqe,
4081};
4082
4083static void cnic_cm_free_mem(struct cnic_dev *dev)
4084{
4085 struct cnic_local *cp = dev->cnic_priv;
4086
4087 kvfree(cp->csk_tbl);
4088 cp->csk_tbl = NULL;
4089 cnic_free_id_tbl(&cp->csk_port_tbl);
4090}
4091
4092static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4093{
4094 struct cnic_local *cp = dev->cnic_priv;
4095 u32 port_id;
4096 int i;
4097
4098 cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4099 GFP_KERNEL);
4100 if (!cp->csk_tbl)
4101 return -ENOMEM;
4102
4103 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4104 atomic_set(&cp->csk_tbl[i].ref_count, 0);
4105
4106 port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4107 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4108 CNIC_LOCAL_PORT_MIN, port_id)) {
4109 cnic_cm_free_mem(dev);
4110 return -ENOMEM;
4111 }
4112 return 0;
4113}
4114
4115static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4116{
4117 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4118 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4119 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4120 csk->state = opcode;
4121 }
4122
4123 /* 1. If event opcode matches the expected event in csk->state
4124 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4125 * event
4126 * 3. If the expected event is 0, meaning the connection was never
4127 * never established, we accept the opcode from cm_abort.
4128 */
4129 if (opcode == csk->state || csk->state == 0 ||
4130 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4131 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4132 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4133 if (csk->state == 0)
4134 csk->state = opcode;
4135 return 1;
4136 }
4137 }
4138 return 0;
4139}
4140
4141static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4142{
4143 struct cnic_dev *dev = csk->dev;
4144 struct cnic_local *cp = dev->cnic_priv;
4145
4146 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4147 cnic_cm_upcall(cp, csk, opcode);
4148 return;
4149 }
4150
4151 clear_bit(SK_F_CONNECT_START, &csk->flags);
4152 cnic_close_conn(csk);
4153 csk->state = opcode;
4154 cnic_cm_upcall(cp, csk, opcode);
4155}
4156
4157static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4158{
4159}
4160
4161static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4162{
4163 u32 seed;
4164
4165 seed = get_random_u32();
4166 cnic_ctx_wr(dev, 45, 0, seed);
4167 return 0;
4168}
4169
4170static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4171{
4172 struct cnic_dev *dev = csk->dev;
4173 struct cnic_local *cp = dev->cnic_priv;
4174 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4175 union l5cm_specific_data l5_data;
4176 u32 cmd = 0;
4177 int close_complete = 0;
4178
4179 switch (opcode) {
4180 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4181 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4182 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4183 if (cnic_ready_to_close(csk, opcode)) {
4184 if (test_bit(SK_F_HW_ERR, &csk->flags))
4185 close_complete = 1;
4186 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4187 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4188 else
4189 close_complete = 1;
4190 }
4191 break;
4192 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4193 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4194 break;
4195 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4196 close_complete = 1;
4197 break;
4198 }
4199 if (cmd) {
4200 memset(&l5_data, 0, sizeof(l5_data));
4201
4202 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4203 &l5_data);
4204 } else if (close_complete) {
4205 ctx->timestamp = jiffies;
4206 cnic_close_conn(csk);
4207 cnic_cm_upcall(cp, csk, csk->state);
4208 }
4209}
4210
4211static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4212{
4213 struct cnic_local *cp = dev->cnic_priv;
4214
4215 if (!cp->ctx_tbl)
4216 return;
4217
4218 if (!netif_running(dev->netdev))
4219 return;
4220
4221 cnic_bnx2x_delete_wait(dev, 0);
4222
4223 cancel_delayed_work(&cp->delete_task);
4224 flush_workqueue(cnic_wq);
4225
4226 if (atomic_read(&cp->iscsi_conn) != 0)
4227 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4228 atomic_read(&cp->iscsi_conn));
4229}
4230
4231static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4232{
4233 struct bnx2x *bp = netdev_priv(dev->netdev);
4234 u32 pfid = bp->pfid;
4235 u32 port = BP_PORT(bp);
4236
4237 cnic_init_bnx2x_mac(dev);
4238 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4239
4240 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4241 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4242
4243 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4244 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4245 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4246 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4247 DEF_MAX_DA_COUNT);
4248
4249 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4250 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4251 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4252 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4253 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4254 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4255 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4256 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4257
4258 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4259 DEF_MAX_CWND);
4260 return 0;
4261}
4262
4263static void cnic_delete_task(struct work_struct *work)
4264{
4265 struct cnic_local *cp;
4266 struct cnic_dev *dev;
4267 u32 i;
4268 int need_resched = 0;
4269
4270 cp = container_of(work, struct cnic_local, delete_task.work);
4271 dev = cp->dev;
4272
4273 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4274 struct drv_ctl_info info;
4275
4276 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4277
4278 memset(&info, 0, sizeof(struct drv_ctl_info));
4279 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4280 cp->ethdev->drv_ctl(dev->netdev, &info);
4281 }
4282
4283 for (i = 0; i < cp->max_cid_space; i++) {
4284 struct cnic_context *ctx = &cp->ctx_tbl[i];
4285 int err;
4286
4287 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4288 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4289 continue;
4290
4291 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4292 need_resched = 1;
4293 continue;
4294 }
4295
4296 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4297 continue;
4298
4299 err = cnic_bnx2x_destroy_ramrod(dev, i);
4300
4301 cnic_free_bnx2x_conn_resc(dev, i);
4302 if (!err) {
4303 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4304 atomic_dec(&cp->iscsi_conn);
4305
4306 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4307 }
4308 }
4309
4310 if (need_resched)
4311 queue_delayed_work(cnic_wq, &cp->delete_task,
4312 msecs_to_jiffies(10));
4313
4314}
4315
4316static int cnic_cm_open(struct cnic_dev *dev)
4317{
4318 struct cnic_local *cp = dev->cnic_priv;
4319 int err;
4320
4321 err = cnic_cm_alloc_mem(dev);
4322 if (err)
4323 return err;
4324
4325 err = cp->start_cm(dev);
4326
4327 if (err)
4328 goto err_out;
4329
4330 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4331
4332 dev->cm_create = cnic_cm_create;
4333 dev->cm_destroy = cnic_cm_destroy;
4334 dev->cm_connect = cnic_cm_connect;
4335 dev->cm_abort = cnic_cm_abort;
4336 dev->cm_close = cnic_cm_close;
4337 dev->cm_select_dev = cnic_cm_select_dev;
4338
4339 cp->ulp_handle[CNIC_ULP_L4] = dev;
4340 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4341 return 0;
4342
4343err_out:
4344 cnic_cm_free_mem(dev);
4345 return err;
4346}
4347
4348static int cnic_cm_shutdown(struct cnic_dev *dev)
4349{
4350 struct cnic_local *cp = dev->cnic_priv;
4351 int i;
4352
4353 if (!cp->csk_tbl)
4354 return 0;
4355
4356 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4357 struct cnic_sock *csk = &cp->csk_tbl[i];
4358
4359 clear_bit(SK_F_INUSE, &csk->flags);
4360 cnic_cm_cleanup(csk);
4361 }
4362 cnic_cm_free_mem(dev);
4363
4364 return 0;
4365}
4366
4367static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4368{
4369 u32 cid_addr;
4370 int i;
4371
4372 cid_addr = GET_CID_ADDR(cid);
4373
4374 for (i = 0; i < CTX_SIZE; i += 4)
4375 cnic_ctx_wr(dev, cid_addr, i, 0);
4376}
4377
4378static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4379{
4380 struct cnic_local *cp = dev->cnic_priv;
4381 int ret = 0, i;
4382 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4383
4384 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4385 return 0;
4386
4387 for (i = 0; i < cp->ctx_blks; i++) {
4388 int j;
4389 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4390 u32 val;
4391
4392 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4393
4394 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4395 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4396 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4397 (u64) cp->ctx_arr[i].mapping >> 32);
4398 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4399 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4400 for (j = 0; j < 10; j++) {
4401
4402 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4403 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4404 break;
4405 udelay(5);
4406 }
4407 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4408 ret = -EBUSY;
4409 break;
4410 }
4411 }
4412 return ret;
4413}
4414
4415static void cnic_free_irq(struct cnic_dev *dev)
4416{
4417 struct cnic_local *cp = dev->cnic_priv;
4418 struct cnic_eth_dev *ethdev = cp->ethdev;
4419
4420 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4421 cp->disable_int_sync(dev);
4422 tasklet_kill(&cp->cnic_irq_task);
4423 free_irq(ethdev->irq_arr[0].vector, dev);
4424 }
4425}
4426
4427static int cnic_request_irq(struct cnic_dev *dev)
4428{
4429 struct cnic_local *cp = dev->cnic_priv;
4430 struct cnic_eth_dev *ethdev = cp->ethdev;
4431 int err;
4432
4433 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4434 if (err)
4435 tasklet_disable(&cp->cnic_irq_task);
4436
4437 return err;
4438}
4439
4440static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4441{
4442 struct cnic_local *cp = dev->cnic_priv;
4443 struct cnic_eth_dev *ethdev = cp->ethdev;
4444
4445 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4446 int err, i = 0;
4447 int sblk_num = cp->status_blk_num;
4448 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4449 BNX2_HC_SB_CONFIG_1;
4450
4451 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4452
4453 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4454 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4455 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4456
4457 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4458 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
4459 err = cnic_request_irq(dev);
4460 if (err)
4461 return err;
4462
4463 while (cp->status_blk.bnx2->status_completion_producer_index &&
4464 i < 10) {
4465 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4466 1 << (11 + sblk_num));
4467 udelay(10);
4468 i++;
4469 barrier();
4470 }
4471 if (cp->status_blk.bnx2->status_completion_producer_index) {
4472 cnic_free_irq(dev);
4473 goto failed;
4474 }
4475
4476 } else {
4477 struct status_block *sblk = cp->status_blk.gen;
4478 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4479 int i = 0;
4480
4481 while (sblk->status_completion_producer_index && i < 10) {
4482 CNIC_WR(dev, BNX2_HC_COMMAND,
4483 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4484 udelay(10);
4485 i++;
4486 barrier();
4487 }
4488 if (sblk->status_completion_producer_index)
4489 goto failed;
4490
4491 }
4492 return 0;
4493
4494failed:
4495 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4496 return -EBUSY;
4497}
4498
4499static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4500{
4501 struct cnic_local *cp = dev->cnic_priv;
4502 struct cnic_eth_dev *ethdev = cp->ethdev;
4503
4504 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4505 return;
4506
4507 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4508 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4509}
4510
4511static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4512{
4513 struct cnic_local *cp = dev->cnic_priv;
4514 struct cnic_eth_dev *ethdev = cp->ethdev;
4515
4516 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4517 return;
4518
4519 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4520 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4521 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4522 synchronize_irq(ethdev->irq_arr[0].vector);
4523}
4524
4525static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4526{
4527 struct cnic_local *cp = dev->cnic_priv;
4528 struct cnic_eth_dev *ethdev = cp->ethdev;
4529 struct cnic_uio_dev *udev = cp->udev;
4530 u32 cid_addr, tx_cid, sb_id;
4531 u32 val, offset0, offset1, offset2, offset3;
4532 int i;
4533 struct bnx2_tx_bd *txbd;
4534 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4535 struct status_block *s_blk = cp->status_blk.gen;
4536
4537 sb_id = cp->status_blk_num;
4538 tx_cid = 20;
4539 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4540 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4541 struct status_block_msix *sblk = cp->status_blk.bnx2;
4542
4543 tx_cid = TX_TSS_CID + sb_id - 1;
4544 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4545 (TX_TSS_CID << 7));
4546 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4547 }
4548 cp->tx_cons = *cp->tx_cons_ptr;
4549
4550 cid_addr = GET_CID_ADDR(tx_cid);
4551 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4552 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4553
4554 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4555 cnic_ctx_wr(dev, cid_addr2, i, 0);
4556
4557 offset0 = BNX2_L2CTX_TYPE_XI;
4558 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4559 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4560 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4561 } else {
4562 cnic_init_context(dev, tx_cid);
4563 cnic_init_context(dev, tx_cid + 1);
4564
4565 offset0 = BNX2_L2CTX_TYPE;
4566 offset1 = BNX2_L2CTX_CMD_TYPE;
4567 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4568 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4569 }
4570 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4571 cnic_ctx_wr(dev, cid_addr, offset0, val);
4572
4573 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4574 cnic_ctx_wr(dev, cid_addr, offset1, val);
4575
4576 txbd = udev->l2_ring;
4577
4578 buf_map = udev->l2_buf_map;
4579 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4580 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4581 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4582 }
4583 val = (u64) ring_map >> 32;
4584 cnic_ctx_wr(dev, cid_addr, offset2, val);
4585 txbd->tx_bd_haddr_hi = val;
4586
4587 val = (u64) ring_map & 0xffffffff;
4588 cnic_ctx_wr(dev, cid_addr, offset3, val);
4589 txbd->tx_bd_haddr_lo = val;
4590}
4591
4592static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4593{
4594 struct cnic_local *cp = dev->cnic_priv;
4595 struct cnic_eth_dev *ethdev = cp->ethdev;
4596 struct cnic_uio_dev *udev = cp->udev;
4597 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4598 int i;
4599 struct bnx2_rx_bd *rxbd;
4600 struct status_block *s_blk = cp->status_blk.gen;
4601 dma_addr_t ring_map = udev->l2_ring_map;
4602
4603 sb_id = cp->status_blk_num;
4604 cnic_init_context(dev, 2);
4605 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4606 coal_reg = BNX2_HC_COMMAND;
4607 coal_val = CNIC_RD(dev, coal_reg);
4608 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4609 struct status_block_msix *sblk = cp->status_blk.bnx2;
4610
4611 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4612 coal_reg = BNX2_HC_COALESCE_NOW;
4613 coal_val = 1 << (11 + sb_id);
4614 }
4615 i = 0;
4616 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4617 CNIC_WR(dev, coal_reg, coal_val);
4618 udelay(10);
4619 i++;
4620 barrier();
4621 }
4622 cp->rx_cons = *cp->rx_cons_ptr;
4623
4624 cid_addr = GET_CID_ADDR(2);
4625 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4626 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4627 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4628
4629 if (sb_id == 0)
4630 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4631 else
4632 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4633 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4634
4635 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4636 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4637 dma_addr_t buf_map;
4638 int n = (i % cp->l2_rx_ring_size) + 1;
4639
4640 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4641 rxbd->rx_bd_len = cp->l2_single_buf_size;
4642 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4643 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4644 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4645 }
4646 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4647 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4648 rxbd->rx_bd_haddr_hi = val;
4649
4650 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4651 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4652 rxbd->rx_bd_haddr_lo = val;
4653
4654 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4655 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4656}
4657
4658static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4659{
4660 struct kwqe *wqes[1], l2kwqe;
4661
4662 memset(&l2kwqe, 0, sizeof(l2kwqe));
4663 wqes[0] = &l2kwqe;
4664 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4665 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4666 KWQE_OPCODE_SHIFT) | 2;
4667 dev->submit_kwqes(dev, wqes, 1);
4668}
4669
4670static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4671{
4672 struct cnic_local *cp = dev->cnic_priv;
4673 u32 val;
4674
4675 val = cp->func << 2;
4676
4677 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4678
4679 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4680 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4681 dev->mac_addr[0] = (u8) (val >> 8);
4682 dev->mac_addr[1] = (u8) val;
4683
4684 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4685
4686 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4687 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4688 dev->mac_addr[2] = (u8) (val >> 24);
4689 dev->mac_addr[3] = (u8) (val >> 16);
4690 dev->mac_addr[4] = (u8) (val >> 8);
4691 dev->mac_addr[5] = (u8) val;
4692
4693 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4694
4695 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4696 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4697 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4698
4699 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4700 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4701 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4702}
4703
4704static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4705{
4706 struct cnic_local *cp = dev->cnic_priv;
4707 struct cnic_eth_dev *ethdev = cp->ethdev;
4708 struct status_block *sblk = cp->status_blk.gen;
4709 u32 val, kcq_cid_addr, kwq_cid_addr;
4710 int err;
4711
4712 cnic_set_bnx2_mac(dev);
4713
4714 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4715 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4716 if (CNIC_PAGE_BITS > 12)
4717 val |= (12 - 8) << 4;
4718 else
4719 val |= (CNIC_PAGE_BITS - 8) << 4;
4720
4721 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4722
4723 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4724 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4725 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4726
4727 err = cnic_setup_5709_context(dev, 1);
4728 if (err)
4729 return err;
4730
4731 cnic_init_context(dev, KWQ_CID);
4732 cnic_init_context(dev, KCQ_CID);
4733
4734 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4735 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4736
4737 cp->max_kwq_idx = MAX_KWQ_IDX;
4738 cp->kwq_prod_idx = 0;
4739 cp->kwq_con_idx = 0;
4740 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4741
4742 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4743 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4744 else
4745 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4746
4747 /* Initialize the kernel work queue context. */
4748 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4749 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4750 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4751
4752 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4754
4755 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4756 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4757
4758 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4759 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4760
4761 val = (u32) cp->kwq_info.pgtbl_map;
4762 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4763
4764 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4765 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4766
4767 cp->kcq1.sw_prod_idx = 0;
4768 cp->kcq1.hw_prod_idx_ptr =
4769 &sblk->status_completion_producer_index;
4770
4771 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4772
4773 /* Initialize the kernel complete queue context. */
4774 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4775 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4776 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4777
4778 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4780
4781 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4782 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4783
4784 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4786
4787 val = (u32) cp->kcq1.dma.pgtbl_map;
4788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4789
4790 cp->int_num = 0;
4791 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4792 struct status_block_msix *msblk = cp->status_blk.bnx2;
4793 u32 sb_id = cp->status_blk_num;
4794 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4795
4796 cp->kcq1.hw_prod_idx_ptr =
4797 &msblk->status_completion_producer_index;
4798 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4799 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4800 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4801 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4802 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4803 }
4804
4805 /* Enable Commnad Scheduler notification when we write to the
4806 * host producer index of the kernel contexts. */
4807 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4808
4809 /* Enable Command Scheduler notification when we write to either
4810 * the Send Queue or Receive Queue producer indexes of the kernel
4811 * bypass contexts. */
4812 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4813 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4814
4815 /* Notify COM when the driver post an application buffer. */
4816 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4817
4818 /* Set the CP and COM doorbells. These two processors polls the
4819 * doorbell for a non zero value before running. This must be done
4820 * after setting up the kernel queue contexts. */
4821 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4822 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4823
4824 cnic_init_bnx2_tx_ring(dev);
4825 cnic_init_bnx2_rx_ring(dev);
4826
4827 err = cnic_init_bnx2_irq(dev);
4828 if (err) {
4829 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4830 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4831 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4832 return err;
4833 }
4834
4835 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4836
4837 return 0;
4838}
4839
4840static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4841{
4842 struct cnic_local *cp = dev->cnic_priv;
4843 struct cnic_eth_dev *ethdev = cp->ethdev;
4844 u32 start_offset = ethdev->ctx_tbl_offset;
4845 int i;
4846
4847 for (i = 0; i < cp->ctx_blks; i++) {
4848 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4849 dma_addr_t map = ctx->mapping;
4850
4851 if (cp->ctx_align) {
4852 unsigned long mask = cp->ctx_align - 1;
4853
4854 map = (map + mask) & ~mask;
4855 }
4856
4857 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4858 }
4859}
4860
4861static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4862{
4863 struct cnic_local *cp = dev->cnic_priv;
4864 struct cnic_eth_dev *ethdev = cp->ethdev;
4865 int err = 0;
4866
4867 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
4868 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4869 err = cnic_request_irq(dev);
4870
4871 return err;
4872}
4873
4874static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4875 u16 sb_id, u8 sb_index,
4876 u8 disable)
4877{
4878 struct bnx2x *bp = netdev_priv(dev->netdev);
4879
4880 u32 addr = BAR_CSTRORM_INTMEM +
4881 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4882 offsetof(struct hc_status_block_data_e1x, index_data) +
4883 sizeof(struct hc_index_data)*sb_index +
4884 offsetof(struct hc_index_data, flags);
4885 u16 flags = CNIC_RD16(dev, addr);
4886 /* clear and set */
4887 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4888 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4889 HC_INDEX_DATA_HC_ENABLED);
4890 CNIC_WR16(dev, addr, flags);
4891}
4892
4893static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4894{
4895 struct cnic_local *cp = dev->cnic_priv;
4896 struct bnx2x *bp = netdev_priv(dev->netdev);
4897 u8 sb_id = cp->status_blk_num;
4898
4899 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4900 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4901 offsetof(struct hc_status_block_data_e1x, index_data) +
4902 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4903 offsetof(struct hc_index_data, timeout), 64 / 4);
4904 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4905}
4906
4907static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4908{
4909}
4910
4911static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4912 struct client_init_ramrod_data *data)
4913{
4914 struct cnic_local *cp = dev->cnic_priv;
4915 struct bnx2x *bp = netdev_priv(dev->netdev);
4916 struct cnic_uio_dev *udev = cp->udev;
4917 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4918 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4919 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4920 int i;
4921 u32 cli = cp->ethdev->iscsi_l2_client_id;
4922 u32 val;
4923
4924 memset(txbd, 0, CNIC_PAGE_SIZE);
4925
4926 buf_map = udev->l2_buf_map;
4927 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4928 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4929 struct eth_tx_parse_bd_e1x *pbd_e1x =
4930 &((txbd + 1)->parse_bd_e1x);
4931 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4932 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4933
4934 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4935 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4936 reg_bd->addr_hi = start_bd->addr_hi;
4937 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4938 start_bd->nbytes = cpu_to_le16(0x10);
4939 start_bd->nbd = cpu_to_le16(3);
4940 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4941 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4942 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4943
4944 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4945 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4946 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4947 else
4948 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4949 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4950 }
4951
4952 val = (u64) ring_map >> 32;
4953 txbd->next_bd.addr_hi = cpu_to_le32(val);
4954
4955 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4956
4957 val = (u64) ring_map & 0xffffffff;
4958 txbd->next_bd.addr_lo = cpu_to_le32(val);
4959
4960 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4961
4962 /* Other ramrod params */
4963 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4964 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4965
4966 /* reset xstorm per client statistics */
4967 if (cli < MAX_STAT_COUNTER_ID) {
4968 data->general.statistics_zero_flg = 1;
4969 data->general.statistics_en_flg = 1;
4970 data->general.statistics_counter_id = cli;
4971 }
4972
4973 cp->tx_cons_ptr =
4974 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4975}
4976
4977static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4978 struct client_init_ramrod_data *data)
4979{
4980 struct cnic_local *cp = dev->cnic_priv;
4981 struct bnx2x *bp = netdev_priv(dev->netdev);
4982 struct cnic_uio_dev *udev = cp->udev;
4983 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4984 CNIC_PAGE_SIZE);
4985 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4986 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4987 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4988 int i;
4989 u32 cli = cp->ethdev->iscsi_l2_client_id;
4990 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4991 u32 val;
4992 dma_addr_t ring_map = udev->l2_ring_map;
4993
4994 /* General data */
4995 data->general.client_id = cli;
4996 data->general.activate_flg = 1;
4997 data->general.sp_client_id = cli;
4998 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4999 data->general.func_id = bp->pfid;
5000
5001 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5002 dma_addr_t buf_map;
5003 int n = (i % cp->l2_rx_ring_size) + 1;
5004
5005 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5006 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5007 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5008 }
5009
5010 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5011 rxbd->addr_hi = cpu_to_le32(val);
5012 data->rx.bd_page_base.hi = cpu_to_le32(val);
5013
5014 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5015 rxbd->addr_lo = cpu_to_le32(val);
5016 data->rx.bd_page_base.lo = cpu_to_le32(val);
5017
5018 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5019 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5020 rxcqe->addr_hi = cpu_to_le32(val);
5021 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5022
5023 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5024 rxcqe->addr_lo = cpu_to_le32(val);
5025 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5026
5027 /* Other ramrod params */
5028 data->rx.client_qzone_id = cl_qzone_id;
5029 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5030 data->rx.status_block_id = BNX2X_DEF_SB_ID;
5031
5032 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5033
5034 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5035 data->rx.outer_vlan_removal_enable_flg = 1;
5036 data->rx.silent_vlan_removal_flg = 1;
5037 data->rx.silent_vlan_value = 0;
5038 data->rx.silent_vlan_mask = 0xffff;
5039
5040 cp->rx_cons_ptr =
5041 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5042 cp->rx_cons = *cp->rx_cons_ptr;
5043}
5044
5045static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5046{
5047 struct cnic_local *cp = dev->cnic_priv;
5048 struct bnx2x *bp = netdev_priv(dev->netdev);
5049 u32 pfid = bp->pfid;
5050
5051 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5052 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5053 cp->kcq1.sw_prod_idx = 0;
5054
5055 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5056 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5057
5058 cp->kcq1.hw_prod_idx_ptr =
5059 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5060 cp->kcq1.status_idx_ptr =
5061 &sb->sb.running_index[SM_RX_ID];
5062 } else {
5063 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5064
5065 cp->kcq1.hw_prod_idx_ptr =
5066 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5067 cp->kcq1.status_idx_ptr =
5068 &sb->sb.running_index[SM_RX_ID];
5069 }
5070
5071 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5072 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5073
5074 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5075 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5076 cp->kcq2.sw_prod_idx = 0;
5077 cp->kcq2.hw_prod_idx_ptr =
5078 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5079 cp->kcq2.status_idx_ptr =
5080 &sb->sb.running_index[SM_RX_ID];
5081 }
5082}
5083
5084static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5085{
5086 struct cnic_local *cp = dev->cnic_priv;
5087 struct bnx2x *bp = netdev_priv(dev->netdev);
5088 struct cnic_eth_dev *ethdev = cp->ethdev;
5089 int ret;
5090 u32 pfid;
5091
5092 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5093 cp->func = bp->pf_num;
5094
5095 pfid = bp->pfid;
5096
5097 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5098 cp->iscsi_start_cid, 0);
5099
5100 if (ret)
5101 return -ENOMEM;
5102
5103 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5104 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5105 cp->fcoe_start_cid, 0);
5106
5107 if (ret)
5108 return -ENOMEM;
5109 }
5110
5111 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5112
5113 cnic_init_bnx2x_kcq(dev);
5114
5115 /* Only 1 EQ */
5116 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5117 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5118 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5119 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5121 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5124 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5127 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5128 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5129 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5130 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5131 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5132 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5133 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5134 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5135 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5136 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5137 HC_INDEX_ISCSI_EQ_CONS);
5138
5139 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5141 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5142 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5143 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5144 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5145
5146 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5147 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5148
5149 cnic_setup_bnx2x_context(dev);
5150
5151 ret = cnic_init_bnx2x_irq(dev);
5152 if (ret)
5153 return ret;
5154
5155 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5156 return 0;
5157}
5158
5159static void cnic_init_rings(struct cnic_dev *dev)
5160{
5161 struct cnic_local *cp = dev->cnic_priv;
5162 struct bnx2x *bp = netdev_priv(dev->netdev);
5163 struct cnic_uio_dev *udev = cp->udev;
5164
5165 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5166 return;
5167
5168 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5169 cnic_init_bnx2_tx_ring(dev);
5170 cnic_init_bnx2_rx_ring(dev);
5171 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5172 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5173 u32 cli = cp->ethdev->iscsi_l2_client_id;
5174 u32 cid = cp->ethdev->iscsi_l2_cid;
5175 u32 cl_qzone_id;
5176 struct client_init_ramrod_data *data;
5177 union l5cm_specific_data l5_data;
5178 struct ustorm_eth_rx_producers rx_prods = {0};
5179 u32 off, i, *cid_ptr;
5180
5181 rx_prods.bd_prod = 0;
5182 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5183 barrier();
5184
5185 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5186
5187 off = BAR_USTRORM_INTMEM +
5188 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5189 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5190 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5191
5192 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5193 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5194
5195 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5196
5197 data = udev->l2_buf;
5198 cid_ptr = udev->l2_buf + 12;
5199
5200 memset(data, 0, sizeof(*data));
5201
5202 cnic_init_bnx2x_tx_ring(dev, data);
5203 cnic_init_bnx2x_rx_ring(dev, data);
5204
5205 data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
5206
5207 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5208 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5209
5210 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5211
5212 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5213 cid, ETH_CONNECTION_TYPE, &l5_data);
5214
5215 i = 0;
5216 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5217 ++i < 10)
5218 msleep(1);
5219
5220 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5221 netdev_err(dev->netdev,
5222 "iSCSI CLIENT_SETUP did not complete\n");
5223 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5224 cnic_ring_ctl(dev, cid, cli, 1);
5225 *cid_ptr = cid >> 4;
5226 *(cid_ptr + 1) = cid * bp->db_size;
5227 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5228 }
5229}
5230
5231static void cnic_shutdown_rings(struct cnic_dev *dev)
5232{
5233 struct cnic_local *cp = dev->cnic_priv;
5234 struct cnic_uio_dev *udev = cp->udev;
5235 void *rx_ring;
5236
5237 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5238 return;
5239
5240 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5241 cnic_shutdown_bnx2_rx_ring(dev);
5242 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5243 u32 cli = cp->ethdev->iscsi_l2_client_id;
5244 u32 cid = cp->ethdev->iscsi_l2_cid;
5245 union l5cm_specific_data l5_data;
5246 int i;
5247
5248 cnic_ring_ctl(dev, cid, cli, 0);
5249
5250 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5251
5252 l5_data.phy_address.lo = cli;
5253 l5_data.phy_address.hi = 0;
5254 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5255 cid, ETH_CONNECTION_TYPE, &l5_data);
5256 i = 0;
5257 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5258 ++i < 10)
5259 msleep(1);
5260
5261 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5262 netdev_err(dev->netdev,
5263 "iSCSI CLIENT_HALT did not complete\n");
5264 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5265
5266 memset(&l5_data, 0, sizeof(l5_data));
5267 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5268 cid, NONE_CONNECTION_TYPE, &l5_data);
5269 msleep(10);
5270 }
5271 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5272 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5273 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5274}
5275
5276static int cnic_register_netdev(struct cnic_dev *dev)
5277{
5278 struct cnic_local *cp = dev->cnic_priv;
5279 struct cnic_eth_dev *ethdev = cp->ethdev;
5280 int err;
5281
5282 if (!ethdev)
5283 return -ENODEV;
5284
5285 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5286 return 0;
5287
5288 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5289 if (err)
5290 netdev_err(dev->netdev, "register_cnic failed\n");
5291
5292 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5293 * can change after firmware is downloaded.
5294 */
5295 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5296 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5297 dev->max_iscsi_conn = 0;
5298
5299 return err;
5300}
5301
5302static void cnic_unregister_netdev(struct cnic_dev *dev)
5303{
5304 struct cnic_local *cp = dev->cnic_priv;
5305 struct cnic_eth_dev *ethdev = cp->ethdev;
5306
5307 if (!ethdev)
5308 return;
5309
5310 ethdev->drv_unregister_cnic(dev->netdev);
5311}
5312
5313static int cnic_start_hw(struct cnic_dev *dev)
5314{
5315 struct cnic_local *cp = dev->cnic_priv;
5316 struct cnic_eth_dev *ethdev = cp->ethdev;
5317 int err;
5318
5319 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5320 return -EALREADY;
5321
5322 dev->regview = ethdev->io_base;
5323 pci_dev_get(dev->pcidev);
5324 cp->func = PCI_FUNC(dev->pcidev->devfn);
5325 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5326 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5327
5328 err = cp->alloc_resc(dev);
5329 if (err) {
5330 netdev_err(dev->netdev, "allocate resource failure\n");
5331 goto err1;
5332 }
5333
5334 err = cp->start_hw(dev);
5335 if (err)
5336 goto err1;
5337
5338 err = cnic_cm_open(dev);
5339 if (err)
5340 goto err1;
5341
5342 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5343
5344 cp->enable_int(dev);
5345
5346 return 0;
5347
5348err1:
5349 if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5350 cp->stop_hw(dev);
5351 else
5352 cp->free_resc(dev);
5353 pci_dev_put(dev->pcidev);
5354 return err;
5355}
5356
5357static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5358{
5359 cnic_disable_bnx2_int_sync(dev);
5360
5361 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5362 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5363
5364 cnic_init_context(dev, KWQ_CID);
5365 cnic_init_context(dev, KCQ_CID);
5366
5367 cnic_setup_5709_context(dev, 0);
5368 cnic_free_irq(dev);
5369
5370 cnic_free_resc(dev);
5371}
5372
5373
5374static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5375{
5376 struct cnic_local *cp = dev->cnic_priv;
5377 struct bnx2x *bp = netdev_priv(dev->netdev);
5378 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5379 u32 sb_id = cp->status_blk_num;
5380 u32 idx_off, syn_off;
5381
5382 cnic_free_irq(dev);
5383
5384 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5385 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5386 (hc_index * sizeof(u16));
5387
5388 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5389 } else {
5390 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5391 (hc_index * sizeof(u16));
5392
5393 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5394 }
5395 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5396 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5397 idx_off, 0);
5398
5399 *cp->kcq1.hw_prod_idx_ptr = 0;
5400 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5401 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5402 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5403 cnic_free_resc(dev);
5404}
5405
5406static void cnic_stop_hw(struct cnic_dev *dev)
5407{
5408 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5409 struct cnic_local *cp = dev->cnic_priv;
5410 int i = 0;
5411
5412 /* Need to wait for the ring shutdown event to complete
5413 * before clearing the CNIC_UP flag.
5414 */
5415 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5416 msleep(100);
5417 i++;
5418 }
5419 cnic_shutdown_rings(dev);
5420 cp->stop_cm(dev);
5421 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5422 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5423 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5424 synchronize_rcu();
5425 cnic_cm_shutdown(dev);
5426 cp->stop_hw(dev);
5427 pci_dev_put(dev->pcidev);
5428 }
5429}
5430
5431static void cnic_free_dev(struct cnic_dev *dev)
5432{
5433 int i = 0;
5434
5435 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5436 msleep(100);
5437 i++;
5438 }
5439 if (atomic_read(&dev->ref_count) != 0)
5440 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5441
5442 netdev_info(dev->netdev, "Removed CNIC device\n");
5443 dev_put(dev->netdev);
5444 kfree(dev);
5445}
5446
5447static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5448 struct cnic_fc_npiv_tbl *npiv_tbl)
5449{
5450 struct cnic_local *cp = dev->cnic_priv;
5451 struct bnx2x *bp = netdev_priv(dev->netdev);
5452 int ret;
5453
5454 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5455 return -EAGAIN; /* bnx2x is down */
5456
5457 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5458 return -EINVAL;
5459
5460 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5461 return ret;
5462}
5463
5464static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5465 struct pci_dev *pdev)
5466{
5467 struct cnic_dev *cdev;
5468 struct cnic_local *cp;
5469 int alloc_size;
5470
5471 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5472
5473 cdev = kzalloc(alloc_size, GFP_KERNEL);
5474 if (cdev == NULL)
5475 return NULL;
5476
5477 cdev->netdev = dev;
5478 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5479 cdev->register_device = cnic_register_device;
5480 cdev->unregister_device = cnic_unregister_device;
5481 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5482 cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5483 atomic_set(&cdev->ref_count, 0);
5484
5485 cp = cdev->cnic_priv;
5486 cp->dev = cdev;
5487 cp->l2_single_buf_size = 0x400;
5488 cp->l2_rx_ring_size = 3;
5489
5490 spin_lock_init(&cp->cnic_ulp_lock);
5491
5492 netdev_info(dev, "Added CNIC device\n");
5493
5494 return cdev;
5495}
5496
5497static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5498{
5499 struct pci_dev *pdev;
5500 struct cnic_dev *cdev;
5501 struct cnic_local *cp;
5502 struct bnx2 *bp = netdev_priv(dev);
5503 struct cnic_eth_dev *ethdev = NULL;
5504
5505 if (bp->cnic_probe)
5506 ethdev = (bp->cnic_probe)(dev);
5507
5508 if (!ethdev)
5509 return NULL;
5510
5511 pdev = ethdev->pdev;
5512 if (!pdev)
5513 return NULL;
5514
5515 dev_hold(dev);
5516 pci_dev_get(pdev);
5517 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5518 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5519 (pdev->revision < 0x10)) {
5520 pci_dev_put(pdev);
5521 goto cnic_err;
5522 }
5523 pci_dev_put(pdev);
5524
5525 cdev = cnic_alloc_dev(dev, pdev);
5526 if (cdev == NULL)
5527 goto cnic_err;
5528
5529 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5530 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5531
5532 cp = cdev->cnic_priv;
5533 cp->ethdev = ethdev;
5534 cdev->pcidev = pdev;
5535 cp->chip_id = ethdev->chip_id;
5536
5537 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5538
5539 cp->cnic_ops = &cnic_bnx2_ops;
5540 cp->start_hw = cnic_start_bnx2_hw;
5541 cp->stop_hw = cnic_stop_bnx2_hw;
5542 cp->setup_pgtbl = cnic_setup_page_tbl;
5543 cp->alloc_resc = cnic_alloc_bnx2_resc;
5544 cp->free_resc = cnic_free_resc;
5545 cp->start_cm = cnic_cm_init_bnx2_hw;
5546 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5547 cp->enable_int = cnic_enable_bnx2_int;
5548 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5549 cp->close_conn = cnic_close_bnx2_conn;
5550 return cdev;
5551
5552cnic_err:
5553 dev_put(dev);
5554 return NULL;
5555}
5556
5557static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5558{
5559 struct pci_dev *pdev;
5560 struct cnic_dev *cdev;
5561 struct cnic_local *cp;
5562 struct bnx2x *bp = netdev_priv(dev);
5563 struct cnic_eth_dev *ethdev = NULL;
5564
5565 if (bp->cnic_probe)
5566 ethdev = bp->cnic_probe(dev);
5567
5568 if (!ethdev)
5569 return NULL;
5570
5571 pdev = ethdev->pdev;
5572 if (!pdev)
5573 return NULL;
5574
5575 dev_hold(dev);
5576 cdev = cnic_alloc_dev(dev, pdev);
5577 if (cdev == NULL) {
5578 dev_put(dev);
5579 return NULL;
5580 }
5581
5582 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5583 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5584
5585 cp = cdev->cnic_priv;
5586 cp->ethdev = ethdev;
5587 cdev->pcidev = pdev;
5588 cp->chip_id = ethdev->chip_id;
5589
5590 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5591
5592 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5593 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5594 if (CNIC_SUPPORTS_FCOE(bp)) {
5595 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5596 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5597 }
5598
5599 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5600 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5601
5602 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5603
5604 cp->cnic_ops = &cnic_bnx2x_ops;
5605 cp->start_hw = cnic_start_bnx2x_hw;
5606 cp->stop_hw = cnic_stop_bnx2x_hw;
5607 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5608 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5609 cp->free_resc = cnic_free_resc;
5610 cp->start_cm = cnic_cm_init_bnx2x_hw;
5611 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5612 cp->enable_int = cnic_enable_bnx2x_int;
5613 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5614 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5615 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5616 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5617 } else {
5618 cp->ack_int = cnic_ack_bnx2x_msix;
5619 cp->arm_int = cnic_arm_bnx2x_msix;
5620 }
5621 cp->close_conn = cnic_close_bnx2x_conn;
5622 return cdev;
5623}
5624
5625static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5626{
5627 struct ethtool_drvinfo drvinfo;
5628 struct cnic_dev *cdev = NULL;
5629
5630 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5631 memset(&drvinfo, 0, sizeof(drvinfo));
5632 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5633
5634 if (!strcmp(drvinfo.driver, "bnx2"))
5635 cdev = init_bnx2_cnic(dev);
5636 if (!strcmp(drvinfo.driver, "bnx2x"))
5637 cdev = init_bnx2x_cnic(dev);
5638 if (cdev) {
5639 write_lock(&cnic_dev_lock);
5640 list_add(&cdev->list, &cnic_dev_list);
5641 write_unlock(&cnic_dev_lock);
5642 }
5643 }
5644 return cdev;
5645}
5646
5647static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5648 u16 vlan_id)
5649{
5650 int if_type;
5651
5652 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5653 struct cnic_ulp_ops *ulp_ops;
5654 void *ctx;
5655
5656 mutex_lock(&cnic_lock);
5657 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5658 lockdep_is_held(&cnic_lock));
5659 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5660 mutex_unlock(&cnic_lock);
5661 continue;
5662 }
5663
5664 ctx = cp->ulp_handle[if_type];
5665
5666 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5667 mutex_unlock(&cnic_lock);
5668
5669 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5670
5671 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5672 }
5673}
5674
5675/* netdev event handler */
5676static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5677 void *ptr)
5678{
5679 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5680 struct cnic_dev *dev;
5681 int new_dev = 0;
5682
5683 dev = cnic_from_netdev(netdev);
5684
5685 if (!dev && event == NETDEV_REGISTER) {
5686 /* Check for the hot-plug device */
5687 dev = is_cnic_dev(netdev);
5688 if (dev) {
5689 new_dev = 1;
5690 cnic_hold(dev);
5691 }
5692 }
5693 if (dev) {
5694 struct cnic_local *cp = dev->cnic_priv;
5695
5696 if (new_dev)
5697 cnic_ulp_init(dev);
5698 else if (event == NETDEV_UNREGISTER)
5699 cnic_ulp_exit(dev);
5700
5701 if (event == NETDEV_UP) {
5702 if (cnic_register_netdev(dev) != 0) {
5703 cnic_put(dev);
5704 goto done;
5705 }
5706 if (!cnic_start_hw(dev))
5707 cnic_ulp_start(dev);
5708 }
5709
5710 cnic_rcv_netevent(cp, event, 0);
5711
5712 if (event == NETDEV_GOING_DOWN) {
5713 cnic_ulp_stop(dev);
5714 cnic_stop_hw(dev);
5715 cnic_unregister_netdev(dev);
5716 } else if (event == NETDEV_UNREGISTER) {
5717 write_lock(&cnic_dev_lock);
5718 list_del_init(&dev->list);
5719 write_unlock(&cnic_dev_lock);
5720
5721 cnic_put(dev);
5722 cnic_free_dev(dev);
5723 goto done;
5724 }
5725 cnic_put(dev);
5726 } else {
5727 struct net_device *realdev;
5728 u16 vid;
5729
5730 vid = cnic_get_vlan(netdev, &realdev);
5731 if (realdev) {
5732 dev = cnic_from_netdev(realdev);
5733 if (dev) {
5734 vid |= VLAN_CFI_MASK; /* make non-zero */
5735 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5736 cnic_put(dev);
5737 }
5738 }
5739 }
5740done:
5741 return NOTIFY_DONE;
5742}
5743
5744static struct notifier_block cnic_netdev_notifier = {
5745 .notifier_call = cnic_netdev_event
5746};
5747
5748static void cnic_release(void)
5749{
5750 struct cnic_uio_dev *udev;
5751
5752 while (!list_empty(&cnic_udev_list)) {
5753 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5754 list);
5755 cnic_free_uio(udev);
5756 }
5757}
5758
5759static int __init cnic_init(void)
5760{
5761 int rc = 0;
5762
5763 pr_info("%s", version);
5764
5765 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5766 if (rc) {
5767 cnic_release();
5768 return rc;
5769 }
5770
5771 cnic_wq = create_singlethread_workqueue("cnic_wq");
5772 if (!cnic_wq) {
5773 cnic_release();
5774 unregister_netdevice_notifier(&cnic_netdev_notifier);
5775 return -ENOMEM;
5776 }
5777
5778 return 0;
5779}
5780
5781static void __exit cnic_exit(void)
5782{
5783 unregister_netdevice_notifier(&cnic_netdev_notifier);
5784 cnic_release();
5785 destroy_workqueue(cnic_wq);
5786}
5787
5788module_init(cnic_init);
5789module_exit(cnic_exit);
1/* cnic.c: QLogic CNIC core network driver.
2 *
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/uio_driver.h>
27#include <linux/in.h>
28#include <linux/dma-mapping.h>
29#include <linux/delay.h>
30#include <linux/ethtool.h>
31#include <linux/if_vlan.h>
32#include <linux/prefetch.h>
33#include <linux/random.h>
34#include <linux/workqueue.h>
35#if IS_ENABLED(CONFIG_VLAN_8021Q)
36#define BCM_VLAN 1
37#endif
38#include <net/ip.h>
39#include <net/tcp.h>
40#include <net/route.h>
41#include <net/ipv6.h>
42#include <net/ip6_route.h>
43#include <net/ip6_checksum.h>
44#include <scsi/iscsi_if.h>
45
46#define BCM_CNIC 1
47#include "cnic_if.h"
48#include "bnx2.h"
49#include "bnx2x/bnx2x.h"
50#include "bnx2x/bnx2x_reg.h"
51#include "bnx2x/bnx2x_fw_defs.h"
52#include "bnx2x/bnx2x_hsi.h"
53#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
54#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
55#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
56#include "cnic.h"
57#include "cnic_defs.h"
58
59#define CNIC_MODULE_NAME "cnic"
60
61static char version[] =
62 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
63
64MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
65 "Chen (zongxi@broadcom.com");
66MODULE_DESCRIPTION("QLogic cnic Driver");
67MODULE_LICENSE("GPL");
68MODULE_VERSION(CNIC_MODULE_VERSION);
69
70/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
71static LIST_HEAD(cnic_dev_list);
72static LIST_HEAD(cnic_udev_list);
73static DEFINE_RWLOCK(cnic_dev_lock);
74static DEFINE_MUTEX(cnic_lock);
75
76static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
77
78/* helper function, assuming cnic_lock is held */
79static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
80{
81 return rcu_dereference_protected(cnic_ulp_tbl[type],
82 lockdep_is_held(&cnic_lock));
83}
84
85static int cnic_service_bnx2(void *, void *);
86static int cnic_service_bnx2x(void *, void *);
87static int cnic_ctl(void *, struct cnic_ctl_info *);
88
89static struct cnic_ops cnic_bnx2_ops = {
90 .cnic_owner = THIS_MODULE,
91 .cnic_handler = cnic_service_bnx2,
92 .cnic_ctl = cnic_ctl,
93};
94
95static struct cnic_ops cnic_bnx2x_ops = {
96 .cnic_owner = THIS_MODULE,
97 .cnic_handler = cnic_service_bnx2x,
98 .cnic_ctl = cnic_ctl,
99};
100
101static struct workqueue_struct *cnic_wq;
102
103static void cnic_shutdown_rings(struct cnic_dev *);
104static void cnic_init_rings(struct cnic_dev *);
105static int cnic_cm_set_pg(struct cnic_sock *);
106
107static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
108{
109 struct cnic_uio_dev *udev = uinfo->priv;
110 struct cnic_dev *dev;
111
112 if (!capable(CAP_NET_ADMIN))
113 return -EPERM;
114
115 if (udev->uio_dev != -1)
116 return -EBUSY;
117
118 rtnl_lock();
119 dev = udev->dev;
120
121 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
122 rtnl_unlock();
123 return -ENODEV;
124 }
125
126 udev->uio_dev = iminor(inode);
127
128 cnic_shutdown_rings(dev);
129 cnic_init_rings(dev);
130 rtnl_unlock();
131
132 return 0;
133}
134
135static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
136{
137 struct cnic_uio_dev *udev = uinfo->priv;
138
139 udev->uio_dev = -1;
140 return 0;
141}
142
143static inline void cnic_hold(struct cnic_dev *dev)
144{
145 atomic_inc(&dev->ref_count);
146}
147
148static inline void cnic_put(struct cnic_dev *dev)
149{
150 atomic_dec(&dev->ref_count);
151}
152
153static inline void csk_hold(struct cnic_sock *csk)
154{
155 atomic_inc(&csk->ref_count);
156}
157
158static inline void csk_put(struct cnic_sock *csk)
159{
160 atomic_dec(&csk->ref_count);
161}
162
163static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
164{
165 struct cnic_dev *cdev;
166
167 read_lock(&cnic_dev_lock);
168 list_for_each_entry(cdev, &cnic_dev_list, list) {
169 if (netdev == cdev->netdev) {
170 cnic_hold(cdev);
171 read_unlock(&cnic_dev_lock);
172 return cdev;
173 }
174 }
175 read_unlock(&cnic_dev_lock);
176 return NULL;
177}
178
179static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
180{
181 atomic_inc(&ulp_ops->ref_count);
182}
183
184static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
185{
186 atomic_dec(&ulp_ops->ref_count);
187}
188
189static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190{
191 struct cnic_local *cp = dev->cnic_priv;
192 struct cnic_eth_dev *ethdev = cp->ethdev;
193 struct drv_ctl_info info;
194 struct drv_ctl_io *io = &info.data.io;
195
196 memset(&info, 0, sizeof(struct drv_ctl_info));
197 info.cmd = DRV_CTL_CTX_WR_CMD;
198 io->cid_addr = cid_addr;
199 io->offset = off;
200 io->data = val;
201 ethdev->drv_ctl(dev->netdev, &info);
202}
203
204static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
205{
206 struct cnic_local *cp = dev->cnic_priv;
207 struct cnic_eth_dev *ethdev = cp->ethdev;
208 struct drv_ctl_info info;
209 struct drv_ctl_io *io = &info.data.io;
210
211 memset(&info, 0, sizeof(struct drv_ctl_info));
212 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
213 io->offset = off;
214 io->dma_addr = addr;
215 ethdev->drv_ctl(dev->netdev, &info);
216}
217
218static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
219{
220 struct cnic_local *cp = dev->cnic_priv;
221 struct cnic_eth_dev *ethdev = cp->ethdev;
222 struct drv_ctl_info info;
223 struct drv_ctl_l2_ring *ring = &info.data.ring;
224
225 memset(&info, 0, sizeof(struct drv_ctl_info));
226 if (start)
227 info.cmd = DRV_CTL_START_L2_CMD;
228 else
229 info.cmd = DRV_CTL_STOP_L2_CMD;
230
231 ring->cid = cid;
232 ring->client_id = cl_id;
233 ethdev->drv_ctl(dev->netdev, &info);
234}
235
236static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
237{
238 struct cnic_local *cp = dev->cnic_priv;
239 struct cnic_eth_dev *ethdev = cp->ethdev;
240 struct drv_ctl_info info;
241 struct drv_ctl_io *io = &info.data.io;
242
243 memset(&info, 0, sizeof(struct drv_ctl_info));
244 info.cmd = DRV_CTL_IO_WR_CMD;
245 io->offset = off;
246 io->data = val;
247 ethdev->drv_ctl(dev->netdev, &info);
248}
249
250static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
251{
252 struct cnic_local *cp = dev->cnic_priv;
253 struct cnic_eth_dev *ethdev = cp->ethdev;
254 struct drv_ctl_info info;
255 struct drv_ctl_io *io = &info.data.io;
256
257 memset(&info, 0, sizeof(struct drv_ctl_info));
258 info.cmd = DRV_CTL_IO_RD_CMD;
259 io->offset = off;
260 ethdev->drv_ctl(dev->netdev, &info);
261 return io->data;
262}
263
264static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
265{
266 struct cnic_local *cp = dev->cnic_priv;
267 struct cnic_eth_dev *ethdev = cp->ethdev;
268 struct drv_ctl_info info;
269 struct fcoe_capabilities *fcoe_cap =
270 &info.data.register_data.fcoe_features;
271
272 memset(&info, 0, sizeof(struct drv_ctl_info));
273 if (reg) {
274 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
275 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
276 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
277 } else {
278 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
279 }
280
281 info.data.ulp_type = ulp_type;
282 info.drv_state = state;
283 ethdev->drv_ctl(dev->netdev, &info);
284}
285
286static int cnic_in_use(struct cnic_sock *csk)
287{
288 return test_bit(SK_F_INUSE, &csk->flags);
289}
290
291static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
292{
293 struct cnic_local *cp = dev->cnic_priv;
294 struct cnic_eth_dev *ethdev = cp->ethdev;
295 struct drv_ctl_info info;
296
297 memset(&info, 0, sizeof(struct drv_ctl_info));
298 info.cmd = cmd;
299 info.data.credit.credit_count = count;
300 ethdev->drv_ctl(dev->netdev, &info);
301}
302
303static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
304{
305 u32 i;
306
307 if (!cp->ctx_tbl)
308 return -EINVAL;
309
310 for (i = 0; i < cp->max_cid_space; i++) {
311 if (cp->ctx_tbl[i].cid == cid) {
312 *l5_cid = i;
313 return 0;
314 }
315 }
316 return -EINVAL;
317}
318
319static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
320 struct cnic_sock *csk)
321{
322 struct iscsi_path path_req;
323 char *buf = NULL;
324 u16 len = 0;
325 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
326 struct cnic_ulp_ops *ulp_ops;
327 struct cnic_uio_dev *udev = cp->udev;
328 int rc = 0, retry = 0;
329
330 if (!udev || udev->uio_dev == -1)
331 return -ENODEV;
332
333 if (csk) {
334 len = sizeof(path_req);
335 buf = (char *) &path_req;
336 memset(&path_req, 0, len);
337
338 msg_type = ISCSI_KEVENT_PATH_REQ;
339 path_req.handle = (u64) csk->l5_cid;
340 if (test_bit(SK_F_IPV6, &csk->flags)) {
341 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
342 sizeof(struct in6_addr));
343 path_req.ip_addr_len = 16;
344 } else {
345 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
346 sizeof(struct in_addr));
347 path_req.ip_addr_len = 4;
348 }
349 path_req.vlan_id = csk->vlan_id;
350 path_req.pmtu = csk->mtu;
351 }
352
353 while (retry < 3) {
354 rc = 0;
355 rcu_read_lock();
356 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
357 if (ulp_ops)
358 rc = ulp_ops->iscsi_nl_send_msg(
359 cp->ulp_handle[CNIC_ULP_ISCSI],
360 msg_type, buf, len);
361 rcu_read_unlock();
362 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
363 break;
364
365 msleep(100);
366 retry++;
367 }
368 return rc;
369}
370
371static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
372
373static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
374 char *buf, u16 len)
375{
376 int rc = -EINVAL;
377
378 switch (msg_type) {
379 case ISCSI_UEVENT_PATH_UPDATE: {
380 struct cnic_local *cp;
381 u32 l5_cid;
382 struct cnic_sock *csk;
383 struct iscsi_path *path_resp;
384
385 if (len < sizeof(*path_resp))
386 break;
387
388 path_resp = (struct iscsi_path *) buf;
389 cp = dev->cnic_priv;
390 l5_cid = (u32) path_resp->handle;
391 if (l5_cid >= MAX_CM_SK_TBL_SZ)
392 break;
393
394 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
395 rc = -ENODEV;
396 break;
397 }
398 csk = &cp->csk_tbl[l5_cid];
399 csk_hold(csk);
400 if (cnic_in_use(csk) &&
401 test_bit(SK_F_CONNECT_START, &csk->flags)) {
402
403 csk->vlan_id = path_resp->vlan_id;
404
405 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
406 if (test_bit(SK_F_IPV6, &csk->flags))
407 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
408 sizeof(struct in6_addr));
409 else
410 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
411 sizeof(struct in_addr));
412
413 if (is_valid_ether_addr(csk->ha)) {
414 cnic_cm_set_pg(csk);
415 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
416 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
417
418 cnic_cm_upcall(cp, csk,
419 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
420 clear_bit(SK_F_CONNECT_START, &csk->flags);
421 }
422 }
423 csk_put(csk);
424 rc = 0;
425 }
426 }
427
428 return rc;
429}
430
431static int cnic_offld_prep(struct cnic_sock *csk)
432{
433 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
434 return 0;
435
436 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
437 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
438 return 0;
439 }
440
441 return 1;
442}
443
444static int cnic_close_prep(struct cnic_sock *csk)
445{
446 clear_bit(SK_F_CONNECT_START, &csk->flags);
447 smp_mb__after_atomic();
448
449 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
450 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
451 msleep(1);
452
453 return 1;
454 }
455 return 0;
456}
457
458static int cnic_abort_prep(struct cnic_sock *csk)
459{
460 clear_bit(SK_F_CONNECT_START, &csk->flags);
461 smp_mb__after_atomic();
462
463 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
464 msleep(1);
465
466 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
467 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
468 return 1;
469 }
470
471 return 0;
472}
473
474int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
475{
476 struct cnic_dev *dev;
477
478 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
479 pr_err("%s: Bad type %d\n", __func__, ulp_type);
480 return -EINVAL;
481 }
482 mutex_lock(&cnic_lock);
483 if (cnic_ulp_tbl_prot(ulp_type)) {
484 pr_err("%s: Type %d has already been registered\n",
485 __func__, ulp_type);
486 mutex_unlock(&cnic_lock);
487 return -EBUSY;
488 }
489
490 read_lock(&cnic_dev_lock);
491 list_for_each_entry(dev, &cnic_dev_list, list) {
492 struct cnic_local *cp = dev->cnic_priv;
493
494 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
495 }
496 read_unlock(&cnic_dev_lock);
497
498 atomic_set(&ulp_ops->ref_count, 0);
499 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
500 mutex_unlock(&cnic_lock);
501
502 /* Prevent race conditions with netdev_event */
503 rtnl_lock();
504 list_for_each_entry(dev, &cnic_dev_list, list) {
505 struct cnic_local *cp = dev->cnic_priv;
506
507 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
508 ulp_ops->cnic_init(dev);
509 }
510 rtnl_unlock();
511
512 return 0;
513}
514
515int cnic_unregister_driver(int ulp_type)
516{
517 struct cnic_dev *dev;
518 struct cnic_ulp_ops *ulp_ops;
519 int i = 0;
520
521 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
522 pr_err("%s: Bad type %d\n", __func__, ulp_type);
523 return -EINVAL;
524 }
525 mutex_lock(&cnic_lock);
526 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
527 if (!ulp_ops) {
528 pr_err("%s: Type %d has not been registered\n",
529 __func__, ulp_type);
530 goto out_unlock;
531 }
532 read_lock(&cnic_dev_lock);
533 list_for_each_entry(dev, &cnic_dev_list, list) {
534 struct cnic_local *cp = dev->cnic_priv;
535
536 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
537 pr_err("%s: Type %d still has devices registered\n",
538 __func__, ulp_type);
539 read_unlock(&cnic_dev_lock);
540 goto out_unlock;
541 }
542 }
543 read_unlock(&cnic_dev_lock);
544
545 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
546
547 mutex_unlock(&cnic_lock);
548 synchronize_rcu();
549 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
550 msleep(100);
551 i++;
552 }
553
554 if (atomic_read(&ulp_ops->ref_count) != 0)
555 pr_warn("%s: Failed waiting for ref count to go to zero\n",
556 __func__);
557 return 0;
558
559out_unlock:
560 mutex_unlock(&cnic_lock);
561 return -EINVAL;
562}
563
564static int cnic_start_hw(struct cnic_dev *);
565static void cnic_stop_hw(struct cnic_dev *);
566
567static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
568 void *ulp_ctx)
569{
570 struct cnic_local *cp = dev->cnic_priv;
571 struct cnic_ulp_ops *ulp_ops;
572
573 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
574 pr_err("%s: Bad type %d\n", __func__, ulp_type);
575 return -EINVAL;
576 }
577 mutex_lock(&cnic_lock);
578 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
579 pr_err("%s: Driver with type %d has not been registered\n",
580 __func__, ulp_type);
581 mutex_unlock(&cnic_lock);
582 return -EAGAIN;
583 }
584 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
585 pr_err("%s: Type %d has already been registered to this device\n",
586 __func__, ulp_type);
587 mutex_unlock(&cnic_lock);
588 return -EBUSY;
589 }
590
591 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
592 cp->ulp_handle[ulp_type] = ulp_ctx;
593 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
594 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
595 cnic_hold(dev);
596
597 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
598 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
599 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
600
601 mutex_unlock(&cnic_lock);
602
603 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
604
605 return 0;
606
607}
608EXPORT_SYMBOL(cnic_register_driver);
609
610static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
611{
612 struct cnic_local *cp = dev->cnic_priv;
613 int i = 0;
614
615 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
616 pr_err("%s: Bad type %d\n", __func__, ulp_type);
617 return -EINVAL;
618 }
619
620 if (ulp_type == CNIC_ULP_ISCSI)
621 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
622
623 mutex_lock(&cnic_lock);
624 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
625 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
626 cnic_put(dev);
627 } else {
628 pr_err("%s: device not registered to this ulp type %d\n",
629 __func__, ulp_type);
630 mutex_unlock(&cnic_lock);
631 return -EINVAL;
632 }
633 mutex_unlock(&cnic_lock);
634
635 if (ulp_type == CNIC_ULP_FCOE)
636 dev->fcoe_cap = NULL;
637
638 synchronize_rcu();
639
640 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
641 i < 20) {
642 msleep(100);
643 i++;
644 }
645 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
646 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
647
648 if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
649 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
650 else
651 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
652
653 return 0;
654}
655EXPORT_SYMBOL(cnic_unregister_driver);
656
657static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
658 u32 next)
659{
660 id_tbl->start = start_id;
661 id_tbl->max = size;
662 id_tbl->next = next;
663 spin_lock_init(&id_tbl->lock);
664 id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
665 if (!id_tbl->table)
666 return -ENOMEM;
667
668 return 0;
669}
670
671static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
672{
673 bitmap_free(id_tbl->table);
674 id_tbl->table = NULL;
675}
676
677static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
678{
679 int ret = -1;
680
681 id -= id_tbl->start;
682 if (id >= id_tbl->max)
683 return ret;
684
685 spin_lock(&id_tbl->lock);
686 if (!test_bit(id, id_tbl->table)) {
687 set_bit(id, id_tbl->table);
688 ret = 0;
689 }
690 spin_unlock(&id_tbl->lock);
691 return ret;
692}
693
694/* Returns -1 if not successful */
695static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
696{
697 u32 id;
698
699 spin_lock(&id_tbl->lock);
700 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
701 if (id >= id_tbl->max) {
702 id = -1;
703 if (id_tbl->next != 0) {
704 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
705 if (id >= id_tbl->next)
706 id = -1;
707 }
708 }
709
710 if (id < id_tbl->max) {
711 set_bit(id, id_tbl->table);
712 id_tbl->next = (id + 1) & (id_tbl->max - 1);
713 id += id_tbl->start;
714 }
715
716 spin_unlock(&id_tbl->lock);
717
718 return id;
719}
720
721static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
722{
723 if (id == -1)
724 return;
725
726 id -= id_tbl->start;
727 if (id >= id_tbl->max)
728 return;
729
730 clear_bit(id, id_tbl->table);
731}
732
733static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
734{
735 int i;
736
737 if (!dma->pg_arr)
738 return;
739
740 for (i = 0; i < dma->num_pages; i++) {
741 if (dma->pg_arr[i]) {
742 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
743 dma->pg_arr[i], dma->pg_map_arr[i]);
744 dma->pg_arr[i] = NULL;
745 }
746 }
747 if (dma->pgtbl) {
748 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
749 dma->pgtbl, dma->pgtbl_map);
750 dma->pgtbl = NULL;
751 }
752 kfree(dma->pg_arr);
753 dma->pg_arr = NULL;
754 dma->num_pages = 0;
755}
756
757static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
758{
759 int i;
760 __le32 *page_table = (__le32 *) dma->pgtbl;
761
762 for (i = 0; i < dma->num_pages; i++) {
763 /* Each entry needs to be in big endian format. */
764 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
765 page_table++;
766 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
767 page_table++;
768 }
769}
770
771static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
772{
773 int i;
774 __le32 *page_table = (__le32 *) dma->pgtbl;
775
776 for (i = 0; i < dma->num_pages; i++) {
777 /* Each entry needs to be in little endian format. */
778 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
779 page_table++;
780 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
781 page_table++;
782 }
783}
784
785static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
786 int pages, int use_pg_tbl)
787{
788 int i, size;
789 struct cnic_local *cp = dev->cnic_priv;
790
791 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
792 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
793 if (dma->pg_arr == NULL)
794 return -ENOMEM;
795
796 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
797 dma->num_pages = pages;
798
799 for (i = 0; i < pages; i++) {
800 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
801 CNIC_PAGE_SIZE,
802 &dma->pg_map_arr[i],
803 GFP_ATOMIC);
804 if (dma->pg_arr[i] == NULL)
805 goto error;
806 }
807 if (!use_pg_tbl)
808 return 0;
809
810 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
811 ~(CNIC_PAGE_SIZE - 1);
812 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
813 &dma->pgtbl_map, GFP_ATOMIC);
814 if (dma->pgtbl == NULL)
815 goto error;
816
817 cp->setup_pgtbl(dev, dma);
818
819 return 0;
820
821error:
822 cnic_free_dma(dev, dma);
823 return -ENOMEM;
824}
825
826static void cnic_free_context(struct cnic_dev *dev)
827{
828 struct cnic_local *cp = dev->cnic_priv;
829 int i;
830
831 for (i = 0; i < cp->ctx_blks; i++) {
832 if (cp->ctx_arr[i].ctx) {
833 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
834 cp->ctx_arr[i].ctx,
835 cp->ctx_arr[i].mapping);
836 cp->ctx_arr[i].ctx = NULL;
837 }
838 }
839}
840
841static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
842{
843 if (udev->l2_buf) {
844 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
845 udev->l2_buf, udev->l2_buf_map);
846 udev->l2_buf = NULL;
847 }
848
849 if (udev->l2_ring) {
850 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
851 udev->l2_ring, udev->l2_ring_map);
852 udev->l2_ring = NULL;
853 }
854
855}
856
857static void __cnic_free_uio(struct cnic_uio_dev *udev)
858{
859 uio_unregister_device(&udev->cnic_uinfo);
860
861 __cnic_free_uio_rings(udev);
862
863 pci_dev_put(udev->pdev);
864 kfree(udev);
865}
866
867static void cnic_free_uio(struct cnic_uio_dev *udev)
868{
869 if (!udev)
870 return;
871
872 write_lock(&cnic_dev_lock);
873 list_del_init(&udev->list);
874 write_unlock(&cnic_dev_lock);
875 __cnic_free_uio(udev);
876}
877
878static void cnic_free_resc(struct cnic_dev *dev)
879{
880 struct cnic_local *cp = dev->cnic_priv;
881 struct cnic_uio_dev *udev = cp->udev;
882
883 if (udev) {
884 udev->dev = NULL;
885 cp->udev = NULL;
886 if (udev->uio_dev == -1)
887 __cnic_free_uio_rings(udev);
888 }
889
890 cnic_free_context(dev);
891 kfree(cp->ctx_arr);
892 cp->ctx_arr = NULL;
893 cp->ctx_blks = 0;
894
895 cnic_free_dma(dev, &cp->gbl_buf_info);
896 cnic_free_dma(dev, &cp->kwq_info);
897 cnic_free_dma(dev, &cp->kwq_16_data_info);
898 cnic_free_dma(dev, &cp->kcq2.dma);
899 cnic_free_dma(dev, &cp->kcq1.dma);
900 kfree(cp->iscsi_tbl);
901 cp->iscsi_tbl = NULL;
902 kfree(cp->ctx_tbl);
903 cp->ctx_tbl = NULL;
904
905 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
906 cnic_free_id_tbl(&cp->cid_tbl);
907}
908
909static int cnic_alloc_context(struct cnic_dev *dev)
910{
911 struct cnic_local *cp = dev->cnic_priv;
912
913 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
914 int i, k, arr_size;
915
916 cp->ctx_blk_size = CNIC_PAGE_SIZE;
917 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
918 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
919 sizeof(struct cnic_ctx);
920 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
921 if (cp->ctx_arr == NULL)
922 return -ENOMEM;
923
924 k = 0;
925 for (i = 0; i < 2; i++) {
926 u32 j, reg, off, lo, hi;
927
928 if (i == 0)
929 off = BNX2_PG_CTX_MAP;
930 else
931 off = BNX2_ISCSI_CTX_MAP;
932
933 reg = cnic_reg_rd_ind(dev, off);
934 lo = reg >> 16;
935 hi = reg & 0xffff;
936 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
937 cp->ctx_arr[k].cid = j;
938 }
939
940 cp->ctx_blks = k;
941 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
942 cp->ctx_blks = 0;
943 return -ENOMEM;
944 }
945
946 for (i = 0; i < cp->ctx_blks; i++) {
947 cp->ctx_arr[i].ctx =
948 dma_alloc_coherent(&dev->pcidev->dev,
949 CNIC_PAGE_SIZE,
950 &cp->ctx_arr[i].mapping,
951 GFP_KERNEL);
952 if (cp->ctx_arr[i].ctx == NULL)
953 return -ENOMEM;
954 }
955 }
956 return 0;
957}
958
959static u16 cnic_bnx2_next_idx(u16 idx)
960{
961 return idx + 1;
962}
963
964static u16 cnic_bnx2_hw_idx(u16 idx)
965{
966 return idx;
967}
968
969static u16 cnic_bnx2x_next_idx(u16 idx)
970{
971 idx++;
972 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
973 idx++;
974
975 return idx;
976}
977
978static u16 cnic_bnx2x_hw_idx(u16 idx)
979{
980 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
981 idx++;
982 return idx;
983}
984
985static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
986 bool use_pg_tbl)
987{
988 int err, i, use_page_tbl = 0;
989 struct kcqe **kcq;
990
991 if (use_pg_tbl)
992 use_page_tbl = 1;
993
994 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
995 if (err)
996 return err;
997
998 kcq = (struct kcqe **) info->dma.pg_arr;
999 info->kcq = kcq;
1000
1001 info->next_idx = cnic_bnx2_next_idx;
1002 info->hw_idx = cnic_bnx2_hw_idx;
1003 if (use_pg_tbl)
1004 return 0;
1005
1006 info->next_idx = cnic_bnx2x_next_idx;
1007 info->hw_idx = cnic_bnx2x_hw_idx;
1008
1009 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1010 struct bnx2x_bd_chain_next *next =
1011 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1012 int j = i + 1;
1013
1014 if (j >= KCQ_PAGE_CNT)
1015 j = 0;
1016 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1017 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1018 }
1019 return 0;
1020}
1021
1022static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1023{
1024 struct cnic_local *cp = udev->dev->cnic_priv;
1025
1026 if (udev->l2_ring)
1027 return 0;
1028
1029 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1030 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1031 &udev->l2_ring_map, GFP_KERNEL);
1032 if (!udev->l2_ring)
1033 return -ENOMEM;
1034
1035 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1036 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1037 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1038 &udev->l2_buf_map, GFP_KERNEL);
1039 if (!udev->l2_buf) {
1040 __cnic_free_uio_rings(udev);
1041 return -ENOMEM;
1042 }
1043
1044 return 0;
1045
1046}
1047
1048static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1049{
1050 struct cnic_local *cp = dev->cnic_priv;
1051 struct cnic_uio_dev *udev;
1052
1053 list_for_each_entry(udev, &cnic_udev_list, list) {
1054 if (udev->pdev == dev->pcidev) {
1055 udev->dev = dev;
1056 if (__cnic_alloc_uio_rings(udev, pages)) {
1057 udev->dev = NULL;
1058 return -ENOMEM;
1059 }
1060 cp->udev = udev;
1061 return 0;
1062 }
1063 }
1064
1065 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1066 if (!udev)
1067 return -ENOMEM;
1068
1069 udev->uio_dev = -1;
1070
1071 udev->dev = dev;
1072 udev->pdev = dev->pcidev;
1073
1074 if (__cnic_alloc_uio_rings(udev, pages))
1075 goto err_udev;
1076
1077 list_add(&udev->list, &cnic_udev_list);
1078
1079 pci_dev_get(udev->pdev);
1080
1081 cp->udev = udev;
1082
1083 return 0;
1084
1085 err_udev:
1086 kfree(udev);
1087 return -ENOMEM;
1088}
1089
1090static int cnic_init_uio(struct cnic_dev *dev)
1091{
1092 struct cnic_local *cp = dev->cnic_priv;
1093 struct cnic_uio_dev *udev = cp->udev;
1094 struct uio_info *uinfo;
1095 int ret = 0;
1096
1097 if (!udev)
1098 return -ENOMEM;
1099
1100 uinfo = &udev->cnic_uinfo;
1101
1102 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1103 uinfo->mem[0].internal_addr = dev->regview;
1104 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1105
1106 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1107 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1108 TX_MAX_TSS_RINGS + 1);
1109 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1110 CNIC_PAGE_MASK;
1111 uinfo->mem[1].dma_addr = cp->status_blk_map;
1112 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1113 uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
1114 else
1115 uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
1116
1117 uinfo->name = "bnx2_cnic";
1118 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1119 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1120
1121 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1122 CNIC_PAGE_MASK;
1123 uinfo->mem[1].dma_addr = cp->status_blk_map;
1124 uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
1125
1126 uinfo->name = "bnx2x_cnic";
1127 }
1128
1129 uinfo->mem[1].dma_device = &dev->pcidev->dev;
1130 uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
1131
1132 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1133 uinfo->mem[2].dma_addr = udev->l2_ring_map;
1134 uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
1135 uinfo->mem[2].dma_device = &dev->pcidev->dev;
1136 uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
1137
1138 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1139 uinfo->mem[3].dma_addr = udev->l2_buf_map;
1140 uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
1141 uinfo->mem[3].dma_device = &dev->pcidev->dev;
1142 uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
1143
1144 uinfo->version = CNIC_MODULE_VERSION;
1145 uinfo->irq = UIO_IRQ_CUSTOM;
1146
1147 uinfo->open = cnic_uio_open;
1148 uinfo->release = cnic_uio_close;
1149
1150 if (udev->uio_dev == -1) {
1151 if (!uinfo->priv) {
1152 uinfo->priv = udev;
1153
1154 ret = uio_register_device(&udev->pdev->dev, uinfo);
1155 }
1156 } else {
1157 cnic_init_rings(dev);
1158 }
1159
1160 return ret;
1161}
1162
1163static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1164{
1165 struct cnic_local *cp = dev->cnic_priv;
1166 int ret;
1167
1168 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1169 if (ret)
1170 goto error;
1171 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1172
1173 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1174 if (ret)
1175 goto error;
1176
1177 ret = cnic_alloc_context(dev);
1178 if (ret)
1179 goto error;
1180
1181 ret = cnic_alloc_uio_rings(dev, 2);
1182 if (ret)
1183 goto error;
1184
1185 ret = cnic_init_uio(dev);
1186 if (ret)
1187 goto error;
1188
1189 return 0;
1190
1191error:
1192 cnic_free_resc(dev);
1193 return ret;
1194}
1195
1196static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1197{
1198 struct cnic_local *cp = dev->cnic_priv;
1199 struct bnx2x *bp = netdev_priv(dev->netdev);
1200 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1201 int total_mem, blks, i;
1202
1203 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1204 blks = total_mem / ctx_blk_size;
1205 if (total_mem % ctx_blk_size)
1206 blks++;
1207
1208 if (blks > cp->ethdev->ctx_tbl_len)
1209 return -ENOMEM;
1210
1211 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1212 if (cp->ctx_arr == NULL)
1213 return -ENOMEM;
1214
1215 cp->ctx_blks = blks;
1216 cp->ctx_blk_size = ctx_blk_size;
1217 if (!CHIP_IS_E1(bp))
1218 cp->ctx_align = 0;
1219 else
1220 cp->ctx_align = ctx_blk_size;
1221
1222 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1223
1224 for (i = 0; i < blks; i++) {
1225 cp->ctx_arr[i].ctx =
1226 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1227 &cp->ctx_arr[i].mapping,
1228 GFP_KERNEL);
1229 if (cp->ctx_arr[i].ctx == NULL)
1230 return -ENOMEM;
1231
1232 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1233 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1234 cnic_free_context(dev);
1235 cp->ctx_blk_size += cp->ctx_align;
1236 i = -1;
1237 continue;
1238 }
1239 }
1240 }
1241 return 0;
1242}
1243
1244static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1245{
1246 struct cnic_local *cp = dev->cnic_priv;
1247 struct bnx2x *bp = netdev_priv(dev->netdev);
1248 struct cnic_eth_dev *ethdev = cp->ethdev;
1249 u32 start_cid = ethdev->starting_cid;
1250 int i, j, n, ret, pages;
1251 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1252
1253 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1254 cp->iscsi_start_cid = start_cid;
1255 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1256
1257 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1258 cp->max_cid_space += dev->max_fcoe_conn;
1259 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1260 if (!cp->fcoe_init_cid)
1261 cp->fcoe_init_cid = 0x10;
1262 }
1263
1264 cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1265 GFP_KERNEL);
1266 if (!cp->iscsi_tbl)
1267 goto error;
1268
1269 cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1270 GFP_KERNEL);
1271 if (!cp->ctx_tbl)
1272 goto error;
1273
1274 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1275 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1276 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1277 }
1278
1279 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1280 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1281
1282 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1283 CNIC_PAGE_SIZE;
1284
1285 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1286 if (ret)
1287 goto error;
1288
1289 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1290 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1291 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1292
1293 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1294 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1295 off;
1296
1297 if ((i % n) == (n - 1))
1298 j++;
1299 }
1300
1301 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1302 if (ret)
1303 goto error;
1304
1305 if (CNIC_SUPPORTS_FCOE(bp)) {
1306 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1307 if (ret)
1308 goto error;
1309 }
1310
1311 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1312 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1313 if (ret)
1314 goto error;
1315
1316 ret = cnic_alloc_bnx2x_context(dev);
1317 if (ret)
1318 goto error;
1319
1320 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1321 return 0;
1322
1323 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1324 cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
1325
1326 cp->l2_rx_ring_size = 15;
1327
1328 ret = cnic_alloc_uio_rings(dev, 4);
1329 if (ret)
1330 goto error;
1331
1332 ret = cnic_init_uio(dev);
1333 if (ret)
1334 goto error;
1335
1336 return 0;
1337
1338error:
1339 cnic_free_resc(dev);
1340 return -ENOMEM;
1341}
1342
1343static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1344{
1345 return cp->max_kwq_idx -
1346 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1347}
1348
1349static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1350 u32 num_wqes)
1351{
1352 struct cnic_local *cp = dev->cnic_priv;
1353 struct kwqe *prod_qe;
1354 u16 prod, sw_prod, i;
1355
1356 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1357 return -EAGAIN; /* bnx2 is down */
1358
1359 spin_lock_bh(&cp->cnic_ulp_lock);
1360 if (num_wqes > cnic_kwq_avail(cp) &&
1361 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1362 spin_unlock_bh(&cp->cnic_ulp_lock);
1363 return -EAGAIN;
1364 }
1365
1366 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1367
1368 prod = cp->kwq_prod_idx;
1369 sw_prod = prod & MAX_KWQ_IDX;
1370 for (i = 0; i < num_wqes; i++) {
1371 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1372 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1373 prod++;
1374 sw_prod = prod & MAX_KWQ_IDX;
1375 }
1376 cp->kwq_prod_idx = prod;
1377
1378 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1379
1380 spin_unlock_bh(&cp->cnic_ulp_lock);
1381 return 0;
1382}
1383
1384static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1385 union l5cm_specific_data *l5_data)
1386{
1387 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1388 dma_addr_t map;
1389
1390 map = ctx->kwqe_data_mapping;
1391 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1392 l5_data->phy_address.hi = (u64) map >> 32;
1393 return ctx->kwqe_data;
1394}
1395
1396static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1397 u32 type, union l5cm_specific_data *l5_data)
1398{
1399 struct cnic_local *cp = dev->cnic_priv;
1400 struct bnx2x *bp = netdev_priv(dev->netdev);
1401 struct l5cm_spe kwqe;
1402 struct kwqe_16 *kwq[1];
1403 u16 type_16;
1404 int ret;
1405
1406 kwqe.hdr.conn_and_cmd_data =
1407 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1408 BNX2X_HW_CID(bp, cid)));
1409
1410 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1411 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1412 SPE_HDR_FUNCTION_ID;
1413
1414 kwqe.hdr.type = cpu_to_le16(type_16);
1415 kwqe.hdr.reserved1 = 0;
1416 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1417 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1418
1419 kwq[0] = (struct kwqe_16 *) &kwqe;
1420
1421 spin_lock_bh(&cp->cnic_ulp_lock);
1422 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1423 spin_unlock_bh(&cp->cnic_ulp_lock);
1424
1425 if (ret == 1)
1426 return 0;
1427
1428 return ret;
1429}
1430
1431static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1432 struct kcqe *cqes[], u32 num_cqes)
1433{
1434 struct cnic_local *cp = dev->cnic_priv;
1435 struct cnic_ulp_ops *ulp_ops;
1436
1437 rcu_read_lock();
1438 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1439 if (likely(ulp_ops)) {
1440 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1441 cqes, num_cqes);
1442 }
1443 rcu_read_unlock();
1444}
1445
1446static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1447 int en_tcp_dack)
1448{
1449 struct bnx2x *bp = netdev_priv(dev->netdev);
1450 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1451 u16 tstorm_flags = 0;
1452
1453 if (time_stamps) {
1454 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1455 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1456 }
1457 if (en_tcp_dack)
1458 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1459
1460 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1461 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1462
1463 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1464 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1465}
1466
1467static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1468{
1469 struct cnic_local *cp = dev->cnic_priv;
1470 struct bnx2x *bp = netdev_priv(dev->netdev);
1471 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1472 int hq_bds, pages;
1473 u32 pfid = bp->pfid;
1474
1475 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1476 cp->num_ccells = req1->num_ccells_per_conn;
1477 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1478 cp->num_iscsi_tasks;
1479 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1480 BNX2X_ISCSI_R2TQE_SIZE;
1481 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1482 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1483 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1484 cp->num_cqs = req1->num_cqs;
1485
1486 if (!dev->max_iscsi_conn)
1487 return 0;
1488
1489 /* init Tstorm RAM */
1490 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1491 req1->rq_num_wqes);
1492 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1493 CNIC_PAGE_SIZE);
1494 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1495 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1496 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1497 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1498 req1->num_tasks_per_conn);
1499
1500 /* init Ustorm RAM */
1501 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1502 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1503 req1->rq_buffer_size);
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1505 CNIC_PAGE_SIZE);
1506 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1507 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1508 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1509 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1510 req1->num_tasks_per_conn);
1511 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1512 req1->rq_num_wqes);
1513 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1514 req1->cq_num_wqes);
1515 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1516 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1517
1518 /* init Xstorm RAM */
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1520 CNIC_PAGE_SIZE);
1521 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1522 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1523 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1524 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1525 req1->num_tasks_per_conn);
1526 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1527 hq_bds);
1528 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1529 req1->num_tasks_per_conn);
1530 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1531 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1532
1533 /* init Cstorm RAM */
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1535 CNIC_PAGE_SIZE);
1536 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1537 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1538 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1539 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1540 req1->num_tasks_per_conn);
1541 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1542 req1->cq_num_wqes);
1543 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1544 hq_bds);
1545
1546 cnic_bnx2x_set_tcp_options(dev,
1547 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1548 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1549
1550 return 0;
1551}
1552
1553static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1554{
1555 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1556 struct bnx2x *bp = netdev_priv(dev->netdev);
1557 u32 pfid = bp->pfid;
1558 struct iscsi_kcqe kcqe;
1559 struct kcqe *cqes[1];
1560
1561 memset(&kcqe, 0, sizeof(kcqe));
1562 if (!dev->max_iscsi_conn) {
1563 kcqe.completion_status =
1564 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1565 goto done;
1566 }
1567
1568 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1569 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1570 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1571 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1572 req2->error_bit_map[1]);
1573
1574 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1575 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1576 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1577 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1578 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1579 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1580 req2->error_bit_map[1]);
1581
1582 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1583 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1584
1585 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1586
1587done:
1588 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1589 cqes[0] = (struct kcqe *) &kcqe;
1590 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1591
1592 return 0;
1593}
1594
1595static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1596{
1597 struct cnic_local *cp = dev->cnic_priv;
1598 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1599
1600 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1601 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1602
1603 cnic_free_dma(dev, &iscsi->hq_info);
1604 cnic_free_dma(dev, &iscsi->r2tq_info);
1605 cnic_free_dma(dev, &iscsi->task_array_info);
1606 cnic_free_id(&cp->cid_tbl, ctx->cid);
1607 } else {
1608 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1609 }
1610
1611 ctx->cid = 0;
1612}
1613
1614static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1615{
1616 u32 cid;
1617 int ret, pages;
1618 struct cnic_local *cp = dev->cnic_priv;
1619 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1620 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1621
1622 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1623 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1624 if (cid == -1) {
1625 ret = -ENOMEM;
1626 goto error;
1627 }
1628 ctx->cid = cid;
1629 return 0;
1630 }
1631
1632 cid = cnic_alloc_new_id(&cp->cid_tbl);
1633 if (cid == -1) {
1634 ret = -ENOMEM;
1635 goto error;
1636 }
1637
1638 ctx->cid = cid;
1639 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1640
1641 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1642 if (ret)
1643 goto error;
1644
1645 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1646 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1647 if (ret)
1648 goto error;
1649
1650 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1651 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1652 if (ret)
1653 goto error;
1654
1655 return 0;
1656
1657error:
1658 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1659 return ret;
1660}
1661
1662static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1663 struct regpair *ctx_addr)
1664{
1665 struct cnic_local *cp = dev->cnic_priv;
1666 struct cnic_eth_dev *ethdev = cp->ethdev;
1667 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1668 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1669 unsigned long align_off = 0;
1670 dma_addr_t ctx_map;
1671 void *ctx;
1672
1673 if (cp->ctx_align) {
1674 unsigned long mask = cp->ctx_align - 1;
1675
1676 if (cp->ctx_arr[blk].mapping & mask)
1677 align_off = cp->ctx_align -
1678 (cp->ctx_arr[blk].mapping & mask);
1679 }
1680 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1681 (off * BNX2X_CONTEXT_MEM_SIZE);
1682 ctx = cp->ctx_arr[blk].ctx + align_off +
1683 (off * BNX2X_CONTEXT_MEM_SIZE);
1684 if (init)
1685 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1686
1687 ctx_addr->lo = ctx_map & 0xffffffff;
1688 ctx_addr->hi = (u64) ctx_map >> 32;
1689 return ctx;
1690}
1691
1692static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1693 u32 num)
1694{
1695 struct cnic_local *cp = dev->cnic_priv;
1696 struct bnx2x *bp = netdev_priv(dev->netdev);
1697 struct iscsi_kwqe_conn_offload1 *req1 =
1698 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1699 struct iscsi_kwqe_conn_offload2 *req2 =
1700 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1701 struct iscsi_kwqe_conn_offload3 *req3;
1702 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1703 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1704 u32 cid = ctx->cid;
1705 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1706 struct iscsi_context *ictx;
1707 struct regpair context_addr;
1708 int i, j, n = 2, n_max;
1709 u8 port = BP_PORT(bp);
1710
1711 ctx->ctx_flags = 0;
1712 if (!req2->num_additional_wqes)
1713 return -EINVAL;
1714
1715 n_max = req2->num_additional_wqes + 2;
1716
1717 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1718 if (ictx == NULL)
1719 return -ENOMEM;
1720
1721 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1722
1723 ictx->xstorm_ag_context.hq_prod = 1;
1724
1725 ictx->xstorm_st_context.iscsi.first_burst_length =
1726 ISCSI_DEF_FIRST_BURST_LEN;
1727 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1728 ISCSI_DEF_MAX_RECV_SEG_LEN;
1729 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1730 req1->sq_page_table_addr_lo;
1731 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1732 req1->sq_page_table_addr_hi;
1733 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1734 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1735 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1736 iscsi->hq_info.pgtbl_map & 0xffffffff;
1737 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1738 (u64) iscsi->hq_info.pgtbl_map >> 32;
1739 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1740 iscsi->hq_info.pgtbl[0];
1741 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1742 iscsi->hq_info.pgtbl[1];
1743 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1744 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1745 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1746 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1747 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1748 iscsi->r2tq_info.pgtbl[0];
1749 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1750 iscsi->r2tq_info.pgtbl[1];
1751 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1752 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1753 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1754 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1755 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1756 BNX2X_ISCSI_PBL_NOT_CACHED;
1757 ictx->xstorm_st_context.iscsi.flags.flags |=
1758 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1759 ictx->xstorm_st_context.iscsi.flags.flags |=
1760 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1761 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1762 ETH_P_8021Q;
1763 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1764 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1765
1766 port = 0;
1767 }
1768 ictx->xstorm_st_context.common.flags =
1769 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1770 ictx->xstorm_st_context.common.flags =
1771 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1772
1773 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1774 /* TSTORM requires the base address of RQ DB & not PTE */
1775 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1776 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1777 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1778 req2->rq_page_table_addr_hi;
1779 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1780 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1781 ictx->tstorm_st_context.tcp.flags2 |=
1782 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1783 ictx->tstorm_st_context.tcp.ooo_support_mode =
1784 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1785
1786 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1787
1788 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1789 req2->rq_page_table_addr_lo;
1790 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1791 req2->rq_page_table_addr_hi;
1792 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1793 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1794 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1795 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1796 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1797 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1798 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1799 iscsi->r2tq_info.pgtbl[0];
1800 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1801 iscsi->r2tq_info.pgtbl[1];
1802 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1803 req1->cq_page_table_addr_lo;
1804 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1805 req1->cq_page_table_addr_hi;
1806 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1807 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1808 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1809 ictx->ustorm_st_context.task_pbe_cache_index =
1810 BNX2X_ISCSI_PBL_NOT_CACHED;
1811 ictx->ustorm_st_context.task_pdu_cache_index =
1812 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1813
1814 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1815 if (j == 3) {
1816 if (n >= n_max)
1817 break;
1818 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1819 j = 0;
1820 }
1821 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1822 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1823 req3->qp_first_pte[j].hi;
1824 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1825 req3->qp_first_pte[j].lo;
1826 }
1827
1828 ictx->ustorm_st_context.task_pbl_base.lo =
1829 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1830 ictx->ustorm_st_context.task_pbl_base.hi =
1831 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1832 ictx->ustorm_st_context.tce_phy_addr.lo =
1833 iscsi->task_array_info.pgtbl[0];
1834 ictx->ustorm_st_context.tce_phy_addr.hi =
1835 iscsi->task_array_info.pgtbl[1];
1836 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1837 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1838 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1839 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1840 ISCSI_DEF_MAX_BURST_LEN;
1841 ictx->ustorm_st_context.negotiated_rx |=
1842 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1843 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1844
1845 ictx->cstorm_st_context.hq_pbl_base.lo =
1846 iscsi->hq_info.pgtbl_map & 0xffffffff;
1847 ictx->cstorm_st_context.hq_pbl_base.hi =
1848 (u64) iscsi->hq_info.pgtbl_map >> 32;
1849 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1850 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1851 ictx->cstorm_st_context.task_pbl_base.lo =
1852 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1853 ictx->cstorm_st_context.task_pbl_base.hi =
1854 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1855 /* CSTORM and USTORM initialization is different, CSTORM requires
1856 * CQ DB base & not PTE addr */
1857 ictx->cstorm_st_context.cq_db_base.lo =
1858 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1859 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1860 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1861 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1862 for (i = 0; i < cp->num_cqs; i++) {
1863 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1864 ISCSI_INITIAL_SN;
1865 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1866 ISCSI_INITIAL_SN;
1867 }
1868
1869 ictx->xstorm_ag_context.cdu_reserved =
1870 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1871 ISCSI_CONNECTION_TYPE);
1872 ictx->ustorm_ag_context.cdu_usage =
1873 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1874 ISCSI_CONNECTION_TYPE);
1875 return 0;
1876
1877}
1878
1879static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1880 u32 num, int *work)
1881{
1882 struct iscsi_kwqe_conn_offload1 *req1;
1883 struct iscsi_kwqe_conn_offload2 *req2;
1884 struct cnic_local *cp = dev->cnic_priv;
1885 struct bnx2x *bp = netdev_priv(dev->netdev);
1886 struct cnic_context *ctx;
1887 struct iscsi_kcqe kcqe;
1888 struct kcqe *cqes[1];
1889 u32 l5_cid;
1890 int ret = 0;
1891
1892 if (num < 2) {
1893 *work = num;
1894 return -EINVAL;
1895 }
1896
1897 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1898 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1899 if ((num - 2) < req2->num_additional_wqes) {
1900 *work = num;
1901 return -EINVAL;
1902 }
1903 *work = 2 + req2->num_additional_wqes;
1904
1905 l5_cid = req1->iscsi_conn_id;
1906 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1907 return -EINVAL;
1908
1909 memset(&kcqe, 0, sizeof(kcqe));
1910 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1911 kcqe.iscsi_conn_id = l5_cid;
1912 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1913
1914 ctx = &cp->ctx_tbl[l5_cid];
1915 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1916 kcqe.completion_status =
1917 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1918 goto done;
1919 }
1920
1921 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1922 atomic_dec(&cp->iscsi_conn);
1923 goto done;
1924 }
1925 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1926 if (ret) {
1927 atomic_dec(&cp->iscsi_conn);
1928 goto done;
1929 }
1930 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1931 if (ret < 0) {
1932 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1933 atomic_dec(&cp->iscsi_conn);
1934 goto done;
1935 }
1936
1937 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1938 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1939
1940done:
1941 cqes[0] = (struct kcqe *) &kcqe;
1942 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1943 return 0;
1944}
1945
1946
1947static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1948{
1949 struct cnic_local *cp = dev->cnic_priv;
1950 struct iscsi_kwqe_conn_update *req =
1951 (struct iscsi_kwqe_conn_update *) kwqe;
1952 void *data;
1953 union l5cm_specific_data l5_data;
1954 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1955 int ret;
1956
1957 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1958 return -EINVAL;
1959
1960 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1961 if (!data)
1962 return -ENOMEM;
1963
1964 memcpy(data, kwqe, sizeof(struct kwqe));
1965
1966 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1967 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1968 return ret;
1969}
1970
1971static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1972{
1973 struct cnic_local *cp = dev->cnic_priv;
1974 struct bnx2x *bp = netdev_priv(dev->netdev);
1975 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1976 union l5cm_specific_data l5_data;
1977 int ret;
1978 u32 hw_cid;
1979
1980 init_waitqueue_head(&ctx->waitq);
1981 ctx->wait_cond = 0;
1982 memset(&l5_data, 0, sizeof(l5_data));
1983 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1984
1985 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1986 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1987
1988 if (ret == 0) {
1989 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1990 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1991 return -EBUSY;
1992 }
1993
1994 return 0;
1995}
1996
1997static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1998{
1999 struct cnic_local *cp = dev->cnic_priv;
2000 struct iscsi_kwqe_conn_destroy *req =
2001 (struct iscsi_kwqe_conn_destroy *) kwqe;
2002 u32 l5_cid = req->reserved0;
2003 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2004 int ret = 0;
2005 struct iscsi_kcqe kcqe;
2006 struct kcqe *cqes[1];
2007
2008 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2009 goto skip_cfc_delete;
2010
2011 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2012 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2013
2014 if (delta > (2 * HZ))
2015 delta = 0;
2016
2017 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2018 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2019 goto destroy_reply;
2020 }
2021
2022 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2023
2024skip_cfc_delete:
2025 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2026
2027 if (!ret) {
2028 atomic_dec(&cp->iscsi_conn);
2029 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2030 }
2031
2032destroy_reply:
2033 memset(&kcqe, 0, sizeof(kcqe));
2034 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2035 kcqe.iscsi_conn_id = l5_cid;
2036 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2037 kcqe.iscsi_conn_context_id = req->context_id;
2038
2039 cqes[0] = (struct kcqe *) &kcqe;
2040 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2041
2042 return 0;
2043}
2044
2045static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2046 struct l4_kwq_connect_req1 *kwqe1,
2047 struct l4_kwq_connect_req3 *kwqe3,
2048 struct l5cm_active_conn_buffer *conn_buf)
2049{
2050 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2051 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2052 &conn_buf->xstorm_conn_buffer;
2053 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2054 &conn_buf->tstorm_conn_buffer;
2055 struct regpair context_addr;
2056 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2057 struct in6_addr src_ip, dst_ip;
2058 int i;
2059 u32 *addrp;
2060
2061 addrp = (u32 *) &conn_addr->local_ip_addr;
2062 for (i = 0; i < 4; i++, addrp++)
2063 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2064
2065 addrp = (u32 *) &conn_addr->remote_ip_addr;
2066 for (i = 0; i < 4; i++, addrp++)
2067 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2068
2069 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2070
2071 xstorm_buf->context_addr.hi = context_addr.hi;
2072 xstorm_buf->context_addr.lo = context_addr.lo;
2073 xstorm_buf->mss = 0xffff;
2074 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2075 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2076 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2077 xstorm_buf->pseudo_header_checksum =
2078 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2079
2080 if (kwqe3->ka_timeout) {
2081 tstorm_buf->ka_enable = 1;
2082 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2083 tstorm_buf->ka_interval = kwqe3->ka_interval;
2084 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2085 }
2086 tstorm_buf->max_rt_time = 0xffffffff;
2087}
2088
2089static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2090{
2091 struct bnx2x *bp = netdev_priv(dev->netdev);
2092 u32 pfid = bp->pfid;
2093 u8 *mac = dev->mac_addr;
2094
2095 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2096 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2097 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2098 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2099 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2100 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2101 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2102 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2103 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2104 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2105 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2106 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2107
2108 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2109 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2110 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2111 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2112 mac[4]);
2113 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2114 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2115 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2116 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2117 mac[2]);
2118 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2119 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2120 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2121 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2122 mac[0]);
2123}
2124
2125static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2126 u32 num, int *work)
2127{
2128 struct cnic_local *cp = dev->cnic_priv;
2129 struct bnx2x *bp = netdev_priv(dev->netdev);
2130 struct l4_kwq_connect_req1 *kwqe1 =
2131 (struct l4_kwq_connect_req1 *) wqes[0];
2132 struct l4_kwq_connect_req3 *kwqe3;
2133 struct l5cm_active_conn_buffer *conn_buf;
2134 struct l5cm_conn_addr_params *conn_addr;
2135 union l5cm_specific_data l5_data;
2136 u32 l5_cid = kwqe1->pg_cid;
2137 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2138 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2139 int ret;
2140
2141 if (num < 2) {
2142 *work = num;
2143 return -EINVAL;
2144 }
2145
2146 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2147 *work = 3;
2148 else
2149 *work = 2;
2150
2151 if (num < *work) {
2152 *work = num;
2153 return -EINVAL;
2154 }
2155
2156 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2157 netdev_err(dev->netdev, "conn_buf size too big\n");
2158 return -ENOMEM;
2159 }
2160 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2161 if (!conn_buf)
2162 return -ENOMEM;
2163
2164 memset(conn_buf, 0, sizeof(*conn_buf));
2165
2166 conn_addr = &conn_buf->conn_addr_buf;
2167 conn_addr->remote_addr_0 = csk->ha[0];
2168 conn_addr->remote_addr_1 = csk->ha[1];
2169 conn_addr->remote_addr_2 = csk->ha[2];
2170 conn_addr->remote_addr_3 = csk->ha[3];
2171 conn_addr->remote_addr_4 = csk->ha[4];
2172 conn_addr->remote_addr_5 = csk->ha[5];
2173
2174 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2175 struct l4_kwq_connect_req2 *kwqe2 =
2176 (struct l4_kwq_connect_req2 *) wqes[1];
2177
2178 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2179 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2180 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2181
2182 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2183 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2184 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2185 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2186 }
2187 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2188
2189 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2190 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2191 conn_addr->local_tcp_port = kwqe1->src_port;
2192 conn_addr->remote_tcp_port = kwqe1->dst_port;
2193
2194 conn_addr->pmtu = kwqe3->pmtu;
2195 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2196
2197 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2198 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2199
2200 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2201 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2202 if (!ret)
2203 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2204
2205 return ret;
2206}
2207
2208static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2209{
2210 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2211 union l5cm_specific_data l5_data;
2212 int ret;
2213
2214 memset(&l5_data, 0, sizeof(l5_data));
2215 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2216 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2217 return ret;
2218}
2219
2220static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2221{
2222 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2223 union l5cm_specific_data l5_data;
2224 int ret;
2225
2226 memset(&l5_data, 0, sizeof(l5_data));
2227 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2228 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2229 return ret;
2230}
2231static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2232{
2233 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2234 struct l4_kcq kcqe;
2235 struct kcqe *cqes[1];
2236
2237 memset(&kcqe, 0, sizeof(kcqe));
2238 kcqe.pg_host_opaque = req->host_opaque;
2239 kcqe.pg_cid = req->host_opaque;
2240 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2241 cqes[0] = (struct kcqe *) &kcqe;
2242 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2243 return 0;
2244}
2245
2246static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2247{
2248 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2249 struct l4_kcq kcqe;
2250 struct kcqe *cqes[1];
2251
2252 memset(&kcqe, 0, sizeof(kcqe));
2253 kcqe.pg_host_opaque = req->pg_host_opaque;
2254 kcqe.pg_cid = req->pg_cid;
2255 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2256 cqes[0] = (struct kcqe *) &kcqe;
2257 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2258 return 0;
2259}
2260
2261static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2262{
2263 struct fcoe_kwqe_stat *req;
2264 struct fcoe_stat_ramrod_params *fcoe_stat;
2265 union l5cm_specific_data l5_data;
2266 struct cnic_local *cp = dev->cnic_priv;
2267 struct bnx2x *bp = netdev_priv(dev->netdev);
2268 int ret;
2269 u32 cid;
2270
2271 req = (struct fcoe_kwqe_stat *) kwqe;
2272 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2273
2274 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2275 if (!fcoe_stat)
2276 return -ENOMEM;
2277
2278 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2279 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2280
2281 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2282 FCOE_CONNECTION_TYPE, &l5_data);
2283 return ret;
2284}
2285
2286static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2287 u32 num, int *work)
2288{
2289 int ret;
2290 struct cnic_local *cp = dev->cnic_priv;
2291 struct bnx2x *bp = netdev_priv(dev->netdev);
2292 u32 cid;
2293 struct fcoe_init_ramrod_params *fcoe_init;
2294 struct fcoe_kwqe_init1 *req1;
2295 struct fcoe_kwqe_init2 *req2;
2296 struct fcoe_kwqe_init3 *req3;
2297 union l5cm_specific_data l5_data;
2298
2299 if (num < 3) {
2300 *work = num;
2301 return -EINVAL;
2302 }
2303 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2304 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2305 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2306 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2307 *work = 1;
2308 return -EINVAL;
2309 }
2310 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2311 *work = 2;
2312 return -EINVAL;
2313 }
2314
2315 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2316 netdev_err(dev->netdev, "fcoe_init size too big\n");
2317 return -ENOMEM;
2318 }
2319 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2320 if (!fcoe_init)
2321 return -ENOMEM;
2322
2323 memset(fcoe_init, 0, sizeof(*fcoe_init));
2324 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2325 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2326 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2327 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2328 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2329 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2330
2331 fcoe_init->sb_num = cp->status_blk_num;
2332 fcoe_init->eq_prod = MAX_KCQ_IDX;
2333 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2334 cp->kcq2.sw_prod_idx = 0;
2335
2336 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2337 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2338 FCOE_CONNECTION_TYPE, &l5_data);
2339 *work = 3;
2340 return ret;
2341}
2342
2343static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2344 u32 num, int *work)
2345{
2346 int ret = 0;
2347 u32 cid = -1, l5_cid;
2348 struct cnic_local *cp = dev->cnic_priv;
2349 struct bnx2x *bp = netdev_priv(dev->netdev);
2350 struct fcoe_kwqe_conn_offload1 *req1;
2351 struct fcoe_kwqe_conn_offload2 *req2;
2352 struct fcoe_kwqe_conn_offload3 *req3;
2353 struct fcoe_kwqe_conn_offload4 *req4;
2354 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2355 struct cnic_context *ctx;
2356 struct fcoe_context *fctx;
2357 struct regpair ctx_addr;
2358 union l5cm_specific_data l5_data;
2359 struct fcoe_kcqe kcqe;
2360 struct kcqe *cqes[1];
2361
2362 if (num < 4) {
2363 *work = num;
2364 return -EINVAL;
2365 }
2366 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2367 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2368 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2369 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2370
2371 *work = 4;
2372
2373 l5_cid = req1->fcoe_conn_id;
2374 if (l5_cid >= dev->max_fcoe_conn)
2375 goto err_reply;
2376
2377 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2378
2379 ctx = &cp->ctx_tbl[l5_cid];
2380 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2381 goto err_reply;
2382
2383 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2384 if (ret) {
2385 ret = 0;
2386 goto err_reply;
2387 }
2388 cid = ctx->cid;
2389
2390 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2391 if (fctx) {
2392 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2393 u32 val;
2394
2395 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2396 FCOE_CONNECTION_TYPE);
2397 fctx->xstorm_ag_context.cdu_reserved = val;
2398 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2399 FCOE_CONNECTION_TYPE);
2400 fctx->ustorm_ag_context.cdu_usage = val;
2401 }
2402 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2403 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2404 goto err_reply;
2405 }
2406 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2407 if (!fcoe_offload)
2408 goto err_reply;
2409
2410 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2411 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2412 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2413 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2414 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2415
2416 cid = BNX2X_HW_CID(bp, cid);
2417 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2418 FCOE_CONNECTION_TYPE, &l5_data);
2419 if (!ret)
2420 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2421
2422 return ret;
2423
2424err_reply:
2425 if (cid != -1)
2426 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2427
2428 memset(&kcqe, 0, sizeof(kcqe));
2429 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2430 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2431 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2432
2433 cqes[0] = (struct kcqe *) &kcqe;
2434 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2435 return ret;
2436}
2437
2438static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2439{
2440 struct fcoe_kwqe_conn_enable_disable *req;
2441 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2442 union l5cm_specific_data l5_data;
2443 int ret;
2444 u32 cid, l5_cid;
2445 struct cnic_local *cp = dev->cnic_priv;
2446
2447 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2448 cid = req->context_id;
2449 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2450
2451 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2452 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2453 return -ENOMEM;
2454 }
2455 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2456 if (!fcoe_enable)
2457 return -ENOMEM;
2458
2459 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2460 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2461 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2462 FCOE_CONNECTION_TYPE, &l5_data);
2463 return ret;
2464}
2465
2466static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2467{
2468 struct fcoe_kwqe_conn_enable_disable *req;
2469 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2470 union l5cm_specific_data l5_data;
2471 int ret;
2472 u32 cid, l5_cid;
2473 struct cnic_local *cp = dev->cnic_priv;
2474
2475 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2476 cid = req->context_id;
2477 l5_cid = req->conn_id;
2478 if (l5_cid >= dev->max_fcoe_conn)
2479 return -EINVAL;
2480
2481 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2482
2483 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2484 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2485 return -ENOMEM;
2486 }
2487 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2488 if (!fcoe_disable)
2489 return -ENOMEM;
2490
2491 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2492 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2493 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2494 FCOE_CONNECTION_TYPE, &l5_data);
2495 return ret;
2496}
2497
2498static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2499{
2500 struct fcoe_kwqe_conn_destroy *req;
2501 union l5cm_specific_data l5_data;
2502 int ret;
2503 u32 cid, l5_cid;
2504 struct cnic_local *cp = dev->cnic_priv;
2505 struct cnic_context *ctx;
2506 struct fcoe_kcqe kcqe;
2507 struct kcqe *cqes[1];
2508
2509 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2510 cid = req->context_id;
2511 l5_cid = req->conn_id;
2512 if (l5_cid >= dev->max_fcoe_conn)
2513 return -EINVAL;
2514
2515 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2516
2517 ctx = &cp->ctx_tbl[l5_cid];
2518
2519 init_waitqueue_head(&ctx->waitq);
2520 ctx->wait_cond = 0;
2521
2522 memset(&kcqe, 0, sizeof(kcqe));
2523 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2524 memset(&l5_data, 0, sizeof(l5_data));
2525 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2526 FCOE_CONNECTION_TYPE, &l5_data);
2527 if (ret == 0) {
2528 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2529 if (ctx->wait_cond)
2530 kcqe.completion_status = 0;
2531 }
2532
2533 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2534 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2535
2536 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2537 kcqe.fcoe_conn_id = req->conn_id;
2538 kcqe.fcoe_conn_context_id = cid;
2539
2540 cqes[0] = (struct kcqe *) &kcqe;
2541 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2542 return ret;
2543}
2544
2545static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2546{
2547 struct cnic_local *cp = dev->cnic_priv;
2548 u32 i;
2549
2550 for (i = start_cid; i < cp->max_cid_space; i++) {
2551 struct cnic_context *ctx = &cp->ctx_tbl[i];
2552 int j;
2553
2554 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2555 msleep(10);
2556
2557 for (j = 0; j < 5; j++) {
2558 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2559 break;
2560 msleep(20);
2561 }
2562
2563 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2564 netdev_warn(dev->netdev, "CID %x not deleted\n",
2565 ctx->cid);
2566 }
2567}
2568
2569static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2570{
2571 union l5cm_specific_data l5_data;
2572 struct cnic_local *cp = dev->cnic_priv;
2573 struct bnx2x *bp = netdev_priv(dev->netdev);
2574 int ret;
2575 u32 cid;
2576
2577 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2578
2579 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2580
2581 memset(&l5_data, 0, sizeof(l5_data));
2582 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2583 FCOE_CONNECTION_TYPE, &l5_data);
2584 return ret;
2585}
2586
2587static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2588{
2589 struct cnic_local *cp = dev->cnic_priv;
2590 struct kcqe kcqe;
2591 struct kcqe *cqes[1];
2592 u32 cid;
2593 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2594 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2595 u32 kcqe_op;
2596 int ulp_type;
2597
2598 cid = kwqe->kwqe_info0;
2599 memset(&kcqe, 0, sizeof(kcqe));
2600
2601 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2602 u32 l5_cid = 0;
2603
2604 ulp_type = CNIC_ULP_FCOE;
2605 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2606 struct fcoe_kwqe_conn_enable_disable *req;
2607
2608 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2609 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2610 cid = req->context_id;
2611 l5_cid = req->conn_id;
2612 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2613 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2614 } else {
2615 return;
2616 }
2617 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2618 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2619 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2620 kcqe.kcqe_info2 = cid;
2621 kcqe.kcqe_info0 = l5_cid;
2622
2623 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2624 ulp_type = CNIC_ULP_ISCSI;
2625 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2626 cid = kwqe->kwqe_info1;
2627
2628 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2629 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2630 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2631 kcqe.kcqe_info2 = cid;
2632 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2633
2634 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2635 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2636
2637 ulp_type = CNIC_ULP_L4;
2638 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2639 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2640 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2641 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2642 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2643 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2644 else
2645 return;
2646
2647 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2648 KCQE_FLAGS_LAYER_MASK_L4;
2649 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2650 l4kcqe->cid = cid;
2651 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2652 } else {
2653 return;
2654 }
2655
2656 cqes[0] = &kcqe;
2657 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2658}
2659
2660static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2661 struct kwqe *wqes[], u32 num_wqes)
2662{
2663 int i, work, ret;
2664 u32 opcode;
2665 struct kwqe *kwqe;
2666
2667 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2668 return -EAGAIN; /* bnx2 is down */
2669
2670 for (i = 0; i < num_wqes; ) {
2671 kwqe = wqes[i];
2672 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2673 work = 1;
2674
2675 switch (opcode) {
2676 case ISCSI_KWQE_OPCODE_INIT1:
2677 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2678 break;
2679 case ISCSI_KWQE_OPCODE_INIT2:
2680 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2681 break;
2682 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2683 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2684 num_wqes - i, &work);
2685 break;
2686 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2687 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2688 break;
2689 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2690 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2691 break;
2692 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2693 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2694 &work);
2695 break;
2696 case L4_KWQE_OPCODE_VALUE_CLOSE:
2697 ret = cnic_bnx2x_close(dev, kwqe);
2698 break;
2699 case L4_KWQE_OPCODE_VALUE_RESET:
2700 ret = cnic_bnx2x_reset(dev, kwqe);
2701 break;
2702 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2703 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2704 break;
2705 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2706 ret = cnic_bnx2x_update_pg(dev, kwqe);
2707 break;
2708 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2709 ret = 0;
2710 break;
2711 default:
2712 ret = 0;
2713 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2714 opcode);
2715 break;
2716 }
2717 if (ret < 0) {
2718 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2719 opcode);
2720
2721 /* Possibly bnx2x parity error, send completion
2722 * to ulp drivers with error code to speed up
2723 * cleanup and reset recovery.
2724 */
2725 if (ret == -EIO || ret == -EAGAIN)
2726 cnic_bnx2x_kwqe_err(dev, kwqe);
2727 }
2728 i += work;
2729 }
2730 return 0;
2731}
2732
2733static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2734 struct kwqe *wqes[], u32 num_wqes)
2735{
2736 struct bnx2x *bp = netdev_priv(dev->netdev);
2737 int i, work, ret;
2738 u32 opcode;
2739 struct kwqe *kwqe;
2740
2741 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2742 return -EAGAIN; /* bnx2 is down */
2743
2744 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2745 return -EINVAL;
2746
2747 for (i = 0; i < num_wqes; ) {
2748 kwqe = wqes[i];
2749 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2750 work = 1;
2751
2752 switch (opcode) {
2753 case FCOE_KWQE_OPCODE_INIT1:
2754 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2755 num_wqes - i, &work);
2756 break;
2757 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2758 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2759 num_wqes - i, &work);
2760 break;
2761 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2762 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2763 break;
2764 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2765 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2766 break;
2767 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2768 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2769 break;
2770 case FCOE_KWQE_OPCODE_DESTROY:
2771 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2772 break;
2773 case FCOE_KWQE_OPCODE_STAT:
2774 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2775 break;
2776 default:
2777 ret = 0;
2778 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2779 opcode);
2780 break;
2781 }
2782 if (ret < 0) {
2783 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2784 opcode);
2785
2786 /* Possibly bnx2x parity error, send completion
2787 * to ulp drivers with error code to speed up
2788 * cleanup and reset recovery.
2789 */
2790 if (ret == -EIO || ret == -EAGAIN)
2791 cnic_bnx2x_kwqe_err(dev, kwqe);
2792 }
2793 i += work;
2794 }
2795 return 0;
2796}
2797
2798static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2799 u32 num_wqes)
2800{
2801 int ret = -EINVAL;
2802 u32 layer_code;
2803
2804 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2805 return -EAGAIN; /* bnx2x is down */
2806
2807 if (!num_wqes)
2808 return 0;
2809
2810 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2811 switch (layer_code) {
2812 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2813 case KWQE_FLAGS_LAYER_MASK_L4:
2814 case KWQE_FLAGS_LAYER_MASK_L2:
2815 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2816 break;
2817
2818 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2819 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2820 break;
2821 }
2822 return ret;
2823}
2824
2825static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2826{
2827 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2828 return KCQE_FLAGS_LAYER_MASK_L4;
2829
2830 return opflag & KCQE_FLAGS_LAYER_MASK;
2831}
2832
2833static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2834{
2835 struct cnic_local *cp = dev->cnic_priv;
2836 int i, j, comp = 0;
2837
2838 i = 0;
2839 j = 1;
2840 while (num_cqes) {
2841 struct cnic_ulp_ops *ulp_ops;
2842 int ulp_type;
2843 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2844 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2845
2846 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2847 comp++;
2848
2849 while (j < num_cqes) {
2850 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2851
2852 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2853 break;
2854
2855 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2856 comp++;
2857 j++;
2858 }
2859
2860 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2861 ulp_type = CNIC_ULP_RDMA;
2862 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2863 ulp_type = CNIC_ULP_ISCSI;
2864 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2865 ulp_type = CNIC_ULP_FCOE;
2866 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2867 ulp_type = CNIC_ULP_L4;
2868 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2869 goto end;
2870 else {
2871 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2872 kcqe_op_flag);
2873 goto end;
2874 }
2875
2876 rcu_read_lock();
2877 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2878 if (likely(ulp_ops)) {
2879 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2880 cp->completed_kcq + i, j);
2881 }
2882 rcu_read_unlock();
2883end:
2884 num_cqes -= j;
2885 i += j;
2886 j = 1;
2887 }
2888 if (unlikely(comp))
2889 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2890}
2891
2892static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2893{
2894 struct cnic_local *cp = dev->cnic_priv;
2895 u16 i, ri, hw_prod, last;
2896 struct kcqe *kcqe;
2897 int kcqe_cnt = 0, last_cnt = 0;
2898
2899 i = ri = last = info->sw_prod_idx;
2900 ri &= MAX_KCQ_IDX;
2901 hw_prod = *info->hw_prod_idx_ptr;
2902 hw_prod = info->hw_idx(hw_prod);
2903
2904 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2905 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2906 cp->completed_kcq[kcqe_cnt++] = kcqe;
2907 i = info->next_idx(i);
2908 ri = i & MAX_KCQ_IDX;
2909 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2910 last_cnt = kcqe_cnt;
2911 last = i;
2912 }
2913 }
2914
2915 info->sw_prod_idx = last;
2916 return last_cnt;
2917}
2918
2919static int cnic_l2_completion(struct cnic_local *cp)
2920{
2921 u16 hw_cons, sw_cons;
2922 struct cnic_uio_dev *udev = cp->udev;
2923 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2924 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2925 u32 cmd;
2926 int comp = 0;
2927
2928 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2929 return 0;
2930
2931 hw_cons = *cp->rx_cons_ptr;
2932 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2933 hw_cons++;
2934
2935 sw_cons = cp->rx_cons;
2936 while (sw_cons != hw_cons) {
2937 u8 cqe_fp_flags;
2938
2939 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2940 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2941 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2942 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2943 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2944 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2945 cmd == RAMROD_CMD_ID_ETH_HALT)
2946 comp++;
2947 }
2948 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2949 }
2950 return comp;
2951}
2952
2953static void cnic_chk_pkt_rings(struct cnic_local *cp)
2954{
2955 u16 rx_cons, tx_cons;
2956 int comp = 0;
2957
2958 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2959 return;
2960
2961 rx_cons = *cp->rx_cons_ptr;
2962 tx_cons = *cp->tx_cons_ptr;
2963 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2964 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2965 comp = cnic_l2_completion(cp);
2966
2967 cp->tx_cons = tx_cons;
2968 cp->rx_cons = rx_cons;
2969
2970 if (cp->udev)
2971 uio_event_notify(&cp->udev->cnic_uinfo);
2972 }
2973 if (comp)
2974 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2975}
2976
2977static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2978{
2979 struct cnic_local *cp = dev->cnic_priv;
2980 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2981 int kcqe_cnt;
2982
2983 /* status block index must be read before reading other fields */
2984 rmb();
2985 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2986
2987 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2988
2989 service_kcqes(dev, kcqe_cnt);
2990
2991 /* Tell compiler that status_blk fields can change. */
2992 barrier();
2993 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2994 /* status block index must be read first */
2995 rmb();
2996 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2997 }
2998
2999 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
3000
3001 cnic_chk_pkt_rings(cp);
3002
3003 return status_idx;
3004}
3005
3006static int cnic_service_bnx2(void *data, void *status_blk)
3007{
3008 struct cnic_dev *dev = data;
3009
3010 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3011 struct status_block *sblk = status_blk;
3012
3013 return sblk->status_idx;
3014 }
3015
3016 return cnic_service_bnx2_queues(dev);
3017}
3018
3019static void cnic_service_bnx2_msix(struct work_struct *work)
3020{
3021 struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
3022 struct cnic_dev *dev = cp->dev;
3023
3024 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3025
3026 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3027 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3028}
3029
3030static void cnic_doirq(struct cnic_dev *dev)
3031{
3032 struct cnic_local *cp = dev->cnic_priv;
3033
3034 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3035 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3036
3037 prefetch(cp->status_blk.gen);
3038 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3039
3040 queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
3041 }
3042}
3043
3044static irqreturn_t cnic_irq(int irq, void *dev_instance)
3045{
3046 struct cnic_dev *dev = dev_instance;
3047 struct cnic_local *cp = dev->cnic_priv;
3048
3049 if (cp->ack_int)
3050 cp->ack_int(dev);
3051
3052 cnic_doirq(dev);
3053
3054 return IRQ_HANDLED;
3055}
3056
3057static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3058 u16 index, u8 op, u8 update)
3059{
3060 struct bnx2x *bp = netdev_priv(dev->netdev);
3061 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3062 COMMAND_REG_INT_ACK);
3063 struct igu_ack_register igu_ack;
3064
3065 igu_ack.status_block_index = index;
3066 igu_ack.sb_id_and_flags =
3067 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3068 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3069 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3070 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3071
3072 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3073}
3074
3075static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3076 u16 index, u8 op, u8 update)
3077{
3078 struct igu_regular cmd_data;
3079 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3080
3081 cmd_data.sb_id_and_flags =
3082 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3083 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3084 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3085 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3086
3087
3088 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3089}
3090
3091static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3092{
3093 struct cnic_local *cp = dev->cnic_priv;
3094
3095 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3096 IGU_INT_DISABLE, 0);
3097}
3098
3099static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3100{
3101 struct cnic_local *cp = dev->cnic_priv;
3102
3103 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3104 IGU_INT_DISABLE, 0);
3105}
3106
3107static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3108{
3109 struct cnic_local *cp = dev->cnic_priv;
3110
3111 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3112 IGU_INT_ENABLE, 1);
3113}
3114
3115static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3116{
3117 struct cnic_local *cp = dev->cnic_priv;
3118
3119 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3120 IGU_INT_ENABLE, 1);
3121}
3122
3123static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3124{
3125 u32 last_status = *info->status_idx_ptr;
3126 int kcqe_cnt;
3127
3128 /* status block index must be read before reading the KCQ */
3129 rmb();
3130 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3131
3132 service_kcqes(dev, kcqe_cnt);
3133
3134 /* Tell compiler that sblk fields can change. */
3135 barrier();
3136
3137 last_status = *info->status_idx_ptr;
3138 /* status block index must be read before reading the KCQ */
3139 rmb();
3140 }
3141 return last_status;
3142}
3143
3144static void cnic_service_bnx2x_bh_work(struct work_struct *work)
3145{
3146 struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
3147 struct cnic_dev *dev = cp->dev;
3148 struct bnx2x *bp = netdev_priv(dev->netdev);
3149 u32 status_idx, new_status_idx;
3150
3151 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3152 return;
3153
3154 while (1) {
3155 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3156
3157 CNIC_WR16(dev, cp->kcq1.io_addr,
3158 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3159
3160 if (!CNIC_SUPPORTS_FCOE(bp)) {
3161 cp->arm_int(dev, status_idx);
3162 break;
3163 }
3164
3165 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3166
3167 if (new_status_idx != status_idx)
3168 continue;
3169
3170 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3171 MAX_KCQ_IDX);
3172
3173 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3174 status_idx, IGU_INT_ENABLE, 1);
3175
3176 break;
3177 }
3178}
3179
3180static int cnic_service_bnx2x(void *data, void *status_blk)
3181{
3182 struct cnic_dev *dev = data;
3183 struct cnic_local *cp = dev->cnic_priv;
3184
3185 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3186 cnic_doirq(dev);
3187
3188 cnic_chk_pkt_rings(cp);
3189
3190 return 0;
3191}
3192
3193static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3194{
3195 struct cnic_ulp_ops *ulp_ops;
3196
3197 if (if_type == CNIC_ULP_ISCSI)
3198 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3199
3200 mutex_lock(&cnic_lock);
3201 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3202 lockdep_is_held(&cnic_lock));
3203 if (!ulp_ops) {
3204 mutex_unlock(&cnic_lock);
3205 return;
3206 }
3207 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3208 mutex_unlock(&cnic_lock);
3209
3210 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3211 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3212
3213 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3214}
3215
3216static void cnic_ulp_stop(struct cnic_dev *dev)
3217{
3218 struct cnic_local *cp = dev->cnic_priv;
3219 int if_type;
3220
3221 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3222 cnic_ulp_stop_one(cp, if_type);
3223}
3224
3225static void cnic_ulp_start(struct cnic_dev *dev)
3226{
3227 struct cnic_local *cp = dev->cnic_priv;
3228 int if_type;
3229
3230 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3231 struct cnic_ulp_ops *ulp_ops;
3232
3233 mutex_lock(&cnic_lock);
3234 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3235 lockdep_is_held(&cnic_lock));
3236 if (!ulp_ops || !ulp_ops->cnic_start) {
3237 mutex_unlock(&cnic_lock);
3238 continue;
3239 }
3240 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3241 mutex_unlock(&cnic_lock);
3242
3243 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3244 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3245
3246 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3247 }
3248}
3249
3250static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3251{
3252 struct cnic_local *cp = dev->cnic_priv;
3253 struct cnic_ulp_ops *ulp_ops;
3254 int rc;
3255
3256 mutex_lock(&cnic_lock);
3257 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3258 lockdep_is_held(&cnic_lock));
3259 if (ulp_ops && ulp_ops->cnic_get_stats)
3260 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3261 else
3262 rc = -ENODEV;
3263 mutex_unlock(&cnic_lock);
3264 return rc;
3265}
3266
3267static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3268{
3269 struct cnic_dev *dev = data;
3270 int ulp_type = CNIC_ULP_ISCSI;
3271
3272 switch (info->cmd) {
3273 case CNIC_CTL_STOP_CMD:
3274 cnic_hold(dev);
3275
3276 cnic_ulp_stop(dev);
3277 cnic_stop_hw(dev);
3278
3279 cnic_put(dev);
3280 break;
3281 case CNIC_CTL_START_CMD:
3282 cnic_hold(dev);
3283
3284 if (!cnic_start_hw(dev))
3285 cnic_ulp_start(dev);
3286
3287 cnic_put(dev);
3288 break;
3289 case CNIC_CTL_STOP_ISCSI_CMD: {
3290 struct cnic_local *cp = dev->cnic_priv;
3291 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3292 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3293 break;
3294 }
3295 case CNIC_CTL_COMPLETION_CMD: {
3296 struct cnic_ctl_completion *comp = &info->data.comp;
3297 u32 cid = BNX2X_SW_CID(comp->cid);
3298 u32 l5_cid;
3299 struct cnic_local *cp = dev->cnic_priv;
3300
3301 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3302 break;
3303
3304 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3305 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3306
3307 if (unlikely(comp->error)) {
3308 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3309 netdev_err(dev->netdev,
3310 "CID %x CFC delete comp error %x\n",
3311 cid, comp->error);
3312 }
3313
3314 ctx->wait_cond = 1;
3315 wake_up(&ctx->waitq);
3316 }
3317 break;
3318 }
3319 case CNIC_CTL_FCOE_STATS_GET_CMD:
3320 ulp_type = CNIC_ULP_FCOE;
3321 fallthrough;
3322 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3323 cnic_hold(dev);
3324 cnic_copy_ulp_stats(dev, ulp_type);
3325 cnic_put(dev);
3326 break;
3327
3328 default:
3329 return -EINVAL;
3330 }
3331 return 0;
3332}
3333
3334static void cnic_ulp_init(struct cnic_dev *dev)
3335{
3336 int i;
3337 struct cnic_local *cp = dev->cnic_priv;
3338
3339 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3340 struct cnic_ulp_ops *ulp_ops;
3341
3342 mutex_lock(&cnic_lock);
3343 ulp_ops = cnic_ulp_tbl_prot(i);
3344 if (!ulp_ops || !ulp_ops->cnic_init) {
3345 mutex_unlock(&cnic_lock);
3346 continue;
3347 }
3348 ulp_get(ulp_ops);
3349 mutex_unlock(&cnic_lock);
3350
3351 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3352 ulp_ops->cnic_init(dev);
3353
3354 ulp_put(ulp_ops);
3355 }
3356}
3357
3358static void cnic_ulp_exit(struct cnic_dev *dev)
3359{
3360 int i;
3361 struct cnic_local *cp = dev->cnic_priv;
3362
3363 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3364 struct cnic_ulp_ops *ulp_ops;
3365
3366 mutex_lock(&cnic_lock);
3367 ulp_ops = cnic_ulp_tbl_prot(i);
3368 if (!ulp_ops || !ulp_ops->cnic_exit) {
3369 mutex_unlock(&cnic_lock);
3370 continue;
3371 }
3372 ulp_get(ulp_ops);
3373 mutex_unlock(&cnic_lock);
3374
3375 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3376 ulp_ops->cnic_exit(dev);
3377
3378 ulp_put(ulp_ops);
3379 }
3380}
3381
3382static int cnic_cm_offload_pg(struct cnic_sock *csk)
3383{
3384 struct cnic_dev *dev = csk->dev;
3385 struct l4_kwq_offload_pg *l4kwqe;
3386 struct kwqe *wqes[1];
3387
3388 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3389 memset(l4kwqe, 0, sizeof(*l4kwqe));
3390 wqes[0] = (struct kwqe *) l4kwqe;
3391
3392 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3393 l4kwqe->flags =
3394 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3395 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3396
3397 l4kwqe->da0 = csk->ha[0];
3398 l4kwqe->da1 = csk->ha[1];
3399 l4kwqe->da2 = csk->ha[2];
3400 l4kwqe->da3 = csk->ha[3];
3401 l4kwqe->da4 = csk->ha[4];
3402 l4kwqe->da5 = csk->ha[5];
3403
3404 l4kwqe->sa0 = dev->mac_addr[0];
3405 l4kwqe->sa1 = dev->mac_addr[1];
3406 l4kwqe->sa2 = dev->mac_addr[2];
3407 l4kwqe->sa3 = dev->mac_addr[3];
3408 l4kwqe->sa4 = dev->mac_addr[4];
3409 l4kwqe->sa5 = dev->mac_addr[5];
3410
3411 l4kwqe->etype = ETH_P_IP;
3412 l4kwqe->ipid_start = DEF_IPID_START;
3413 l4kwqe->host_opaque = csk->l5_cid;
3414
3415 if (csk->vlan_id) {
3416 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3417 l4kwqe->vlan_tag = csk->vlan_id;
3418 l4kwqe->l2hdr_nbytes += 4;
3419 }
3420
3421 return dev->submit_kwqes(dev, wqes, 1);
3422}
3423
3424static int cnic_cm_update_pg(struct cnic_sock *csk)
3425{
3426 struct cnic_dev *dev = csk->dev;
3427 struct l4_kwq_update_pg *l4kwqe;
3428 struct kwqe *wqes[1];
3429
3430 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3431 memset(l4kwqe, 0, sizeof(*l4kwqe));
3432 wqes[0] = (struct kwqe *) l4kwqe;
3433
3434 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3435 l4kwqe->flags =
3436 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3437 l4kwqe->pg_cid = csk->pg_cid;
3438
3439 l4kwqe->da0 = csk->ha[0];
3440 l4kwqe->da1 = csk->ha[1];
3441 l4kwqe->da2 = csk->ha[2];
3442 l4kwqe->da3 = csk->ha[3];
3443 l4kwqe->da4 = csk->ha[4];
3444 l4kwqe->da5 = csk->ha[5];
3445
3446 l4kwqe->pg_host_opaque = csk->l5_cid;
3447 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3448
3449 return dev->submit_kwqes(dev, wqes, 1);
3450}
3451
3452static int cnic_cm_upload_pg(struct cnic_sock *csk)
3453{
3454 struct cnic_dev *dev = csk->dev;
3455 struct l4_kwq_upload *l4kwqe;
3456 struct kwqe *wqes[1];
3457
3458 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3459 memset(l4kwqe, 0, sizeof(*l4kwqe));
3460 wqes[0] = (struct kwqe *) l4kwqe;
3461
3462 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3463 l4kwqe->flags =
3464 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3465 l4kwqe->cid = csk->pg_cid;
3466
3467 return dev->submit_kwqes(dev, wqes, 1);
3468}
3469
3470static int cnic_cm_conn_req(struct cnic_sock *csk)
3471{
3472 struct cnic_dev *dev = csk->dev;
3473 struct l4_kwq_connect_req1 *l4kwqe1;
3474 struct l4_kwq_connect_req2 *l4kwqe2;
3475 struct l4_kwq_connect_req3 *l4kwqe3;
3476 struct kwqe *wqes[3];
3477 u8 tcp_flags = 0;
3478 int num_wqes = 2;
3479
3480 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3481 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3482 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3483 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3484 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3485 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3486
3487 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3488 l4kwqe3->flags =
3489 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3490 l4kwqe3->ka_timeout = csk->ka_timeout;
3491 l4kwqe3->ka_interval = csk->ka_interval;
3492 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3493 l4kwqe3->tos = csk->tos;
3494 l4kwqe3->ttl = csk->ttl;
3495 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3496 l4kwqe3->pmtu = csk->mtu;
3497 l4kwqe3->rcv_buf = csk->rcv_buf;
3498 l4kwqe3->snd_buf = csk->snd_buf;
3499 l4kwqe3->seed = csk->seed;
3500
3501 wqes[0] = (struct kwqe *) l4kwqe1;
3502 if (test_bit(SK_F_IPV6, &csk->flags)) {
3503 wqes[1] = (struct kwqe *) l4kwqe2;
3504 wqes[2] = (struct kwqe *) l4kwqe3;
3505 num_wqes = 3;
3506
3507 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3508 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3509 l4kwqe2->flags =
3510 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3511 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3512 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3513 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3514 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3515 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3516 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3517 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3518 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3519 sizeof(struct tcphdr);
3520 } else {
3521 wqes[1] = (struct kwqe *) l4kwqe3;
3522 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3523 sizeof(struct tcphdr);
3524 }
3525
3526 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3527 l4kwqe1->flags =
3528 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3529 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3530 l4kwqe1->cid = csk->cid;
3531 l4kwqe1->pg_cid = csk->pg_cid;
3532 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3533 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3534 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3535 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3536 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3537 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3538 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3539 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3540 if (csk->tcp_flags & SK_TCP_NAGLE)
3541 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3542 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3543 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3544 if (csk->tcp_flags & SK_TCP_SACK)
3545 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3546 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3547 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3548
3549 l4kwqe1->tcp_flags = tcp_flags;
3550
3551 return dev->submit_kwqes(dev, wqes, num_wqes);
3552}
3553
3554static int cnic_cm_close_req(struct cnic_sock *csk)
3555{
3556 struct cnic_dev *dev = csk->dev;
3557 struct l4_kwq_close_req *l4kwqe;
3558 struct kwqe *wqes[1];
3559
3560 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3561 memset(l4kwqe, 0, sizeof(*l4kwqe));
3562 wqes[0] = (struct kwqe *) l4kwqe;
3563
3564 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3565 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3566 l4kwqe->cid = csk->cid;
3567
3568 return dev->submit_kwqes(dev, wqes, 1);
3569}
3570
3571static int cnic_cm_abort_req(struct cnic_sock *csk)
3572{
3573 struct cnic_dev *dev = csk->dev;
3574 struct l4_kwq_reset_req *l4kwqe;
3575 struct kwqe *wqes[1];
3576
3577 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3578 memset(l4kwqe, 0, sizeof(*l4kwqe));
3579 wqes[0] = (struct kwqe *) l4kwqe;
3580
3581 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3582 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3583 l4kwqe->cid = csk->cid;
3584
3585 return dev->submit_kwqes(dev, wqes, 1);
3586}
3587
3588static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3589 u32 l5_cid, struct cnic_sock **csk, void *context)
3590{
3591 struct cnic_local *cp = dev->cnic_priv;
3592 struct cnic_sock *csk1;
3593
3594 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3595 return -EINVAL;
3596
3597 if (cp->ctx_tbl) {
3598 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3599
3600 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3601 return -EAGAIN;
3602 }
3603
3604 csk1 = &cp->csk_tbl[l5_cid];
3605 if (atomic_read(&csk1->ref_count))
3606 return -EAGAIN;
3607
3608 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3609 return -EBUSY;
3610
3611 csk1->dev = dev;
3612 csk1->cid = cid;
3613 csk1->l5_cid = l5_cid;
3614 csk1->ulp_type = ulp_type;
3615 csk1->context = context;
3616
3617 csk1->ka_timeout = DEF_KA_TIMEOUT;
3618 csk1->ka_interval = DEF_KA_INTERVAL;
3619 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3620 csk1->tos = DEF_TOS;
3621 csk1->ttl = DEF_TTL;
3622 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3623 csk1->rcv_buf = DEF_RCV_BUF;
3624 csk1->snd_buf = DEF_SND_BUF;
3625 csk1->seed = DEF_SEED;
3626 csk1->tcp_flags = 0;
3627
3628 *csk = csk1;
3629 return 0;
3630}
3631
3632static void cnic_cm_cleanup(struct cnic_sock *csk)
3633{
3634 if (csk->src_port) {
3635 struct cnic_dev *dev = csk->dev;
3636 struct cnic_local *cp = dev->cnic_priv;
3637
3638 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3639 csk->src_port = 0;
3640 }
3641}
3642
3643static void cnic_close_conn(struct cnic_sock *csk)
3644{
3645 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3646 cnic_cm_upload_pg(csk);
3647 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3648 }
3649 cnic_cm_cleanup(csk);
3650}
3651
3652static int cnic_cm_destroy(struct cnic_sock *csk)
3653{
3654 if (!cnic_in_use(csk))
3655 return -EINVAL;
3656
3657 csk_hold(csk);
3658 clear_bit(SK_F_INUSE, &csk->flags);
3659 smp_mb__after_atomic();
3660 while (atomic_read(&csk->ref_count) != 1)
3661 msleep(1);
3662 cnic_cm_cleanup(csk);
3663
3664 csk->flags = 0;
3665 csk_put(csk);
3666 return 0;
3667}
3668
3669static inline u16 cnic_get_vlan(struct net_device *dev,
3670 struct net_device **vlan_dev)
3671{
3672 if (is_vlan_dev(dev)) {
3673 *vlan_dev = vlan_dev_real_dev(dev);
3674 return vlan_dev_vlan_id(dev);
3675 }
3676 *vlan_dev = dev;
3677 return 0;
3678}
3679
3680static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3681 struct dst_entry **dst)
3682{
3683#if defined(CONFIG_INET)
3684 struct rtable *rt;
3685
3686 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
3687 RT_SCOPE_UNIVERSE);
3688 if (!IS_ERR(rt)) {
3689 *dst = &rt->dst;
3690 return 0;
3691 }
3692 return PTR_ERR(rt);
3693#else
3694 return -ENETUNREACH;
3695#endif
3696}
3697
3698static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3699 struct dst_entry **dst)
3700{
3701#if IS_ENABLED(CONFIG_IPV6)
3702 struct flowi6 fl6;
3703
3704 memset(&fl6, 0, sizeof(fl6));
3705 fl6.daddr = dst_addr->sin6_addr;
3706 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3707 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3708
3709 *dst = ip6_route_output(&init_net, NULL, &fl6);
3710 if ((*dst)->error) {
3711 dst_release(*dst);
3712 *dst = NULL;
3713 return -ENETUNREACH;
3714 } else
3715 return 0;
3716#endif
3717
3718 return -ENETUNREACH;
3719}
3720
3721static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3722 int ulp_type)
3723{
3724 struct cnic_dev *dev = NULL;
3725 struct dst_entry *dst;
3726 struct net_device *netdev = NULL;
3727 int err = -ENETUNREACH;
3728
3729 if (dst_addr->sin_family == AF_INET)
3730 err = cnic_get_v4_route(dst_addr, &dst);
3731 else if (dst_addr->sin_family == AF_INET6) {
3732 struct sockaddr_in6 *dst_addr6 =
3733 (struct sockaddr_in6 *) dst_addr;
3734
3735 err = cnic_get_v6_route(dst_addr6, &dst);
3736 } else
3737 return NULL;
3738
3739 if (err)
3740 return NULL;
3741
3742 if (!dst->dev)
3743 goto done;
3744
3745 cnic_get_vlan(dst->dev, &netdev);
3746
3747 dev = cnic_from_netdev(netdev);
3748
3749done:
3750 dst_release(dst);
3751 if (dev)
3752 cnic_put(dev);
3753 return dev;
3754}
3755
3756static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3757{
3758 struct cnic_dev *dev = csk->dev;
3759 struct cnic_local *cp = dev->cnic_priv;
3760
3761 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3762}
3763
3764static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3765{
3766 struct cnic_dev *dev = csk->dev;
3767 struct cnic_local *cp = dev->cnic_priv;
3768 int is_v6, rc = 0;
3769 struct dst_entry *dst = NULL;
3770 struct net_device *realdev;
3771 __be16 local_port;
3772 u32 port_id;
3773
3774 if (saddr->local.v6.sin6_family == AF_INET6 &&
3775 saddr->remote.v6.sin6_family == AF_INET6)
3776 is_v6 = 1;
3777 else if (saddr->local.v4.sin_family == AF_INET &&
3778 saddr->remote.v4.sin_family == AF_INET)
3779 is_v6 = 0;
3780 else
3781 return -EINVAL;
3782
3783 clear_bit(SK_F_IPV6, &csk->flags);
3784
3785 if (is_v6) {
3786 set_bit(SK_F_IPV6, &csk->flags);
3787 cnic_get_v6_route(&saddr->remote.v6, &dst);
3788
3789 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3790 sizeof(struct in6_addr));
3791 csk->dst_port = saddr->remote.v6.sin6_port;
3792 local_port = saddr->local.v6.sin6_port;
3793
3794 } else {
3795 cnic_get_v4_route(&saddr->remote.v4, &dst);
3796
3797 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3798 csk->dst_port = saddr->remote.v4.sin_port;
3799 local_port = saddr->local.v4.sin_port;
3800 }
3801
3802 csk->vlan_id = 0;
3803 csk->mtu = dev->netdev->mtu;
3804 if (dst && dst->dev) {
3805 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3806 if (realdev == dev->netdev) {
3807 csk->vlan_id = vlan;
3808 csk->mtu = dst_mtu(dst);
3809 }
3810 }
3811
3812 port_id = be16_to_cpu(local_port);
3813 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3814 port_id < CNIC_LOCAL_PORT_MAX) {
3815 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3816 port_id = 0;
3817 } else
3818 port_id = 0;
3819
3820 if (!port_id) {
3821 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3822 if (port_id == -1) {
3823 rc = -ENOMEM;
3824 goto err_out;
3825 }
3826 local_port = cpu_to_be16(port_id);
3827 }
3828 csk->src_port = local_port;
3829
3830err_out:
3831 dst_release(dst);
3832 return rc;
3833}
3834
3835static void cnic_init_csk_state(struct cnic_sock *csk)
3836{
3837 csk->state = 0;
3838 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3839 clear_bit(SK_F_CLOSING, &csk->flags);
3840}
3841
3842static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3843{
3844 struct cnic_local *cp = csk->dev->cnic_priv;
3845 int err = 0;
3846
3847 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3848 return -EOPNOTSUPP;
3849
3850 if (!cnic_in_use(csk))
3851 return -EINVAL;
3852
3853 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3854 return -EINVAL;
3855
3856 cnic_init_csk_state(csk);
3857
3858 err = cnic_get_route(csk, saddr);
3859 if (err)
3860 goto err_out;
3861
3862 err = cnic_resolve_addr(csk, saddr);
3863 if (!err)
3864 return 0;
3865
3866err_out:
3867 clear_bit(SK_F_CONNECT_START, &csk->flags);
3868 return err;
3869}
3870
3871static int cnic_cm_abort(struct cnic_sock *csk)
3872{
3873 struct cnic_local *cp = csk->dev->cnic_priv;
3874 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3875
3876 if (!cnic_in_use(csk))
3877 return -EINVAL;
3878
3879 if (cnic_abort_prep(csk))
3880 return cnic_cm_abort_req(csk);
3881
3882 /* Getting here means that we haven't started connect, or
3883 * connect was not successful, or it has been reset by the target.
3884 */
3885
3886 cp->close_conn(csk, opcode);
3887 if (csk->state != opcode) {
3888 /* Wait for remote reset sequence to complete */
3889 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3890 msleep(1);
3891
3892 return -EALREADY;
3893 }
3894
3895 return 0;
3896}
3897
3898static int cnic_cm_close(struct cnic_sock *csk)
3899{
3900 if (!cnic_in_use(csk))
3901 return -EINVAL;
3902
3903 if (cnic_close_prep(csk)) {
3904 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3905 return cnic_cm_close_req(csk);
3906 } else {
3907 /* Wait for remote reset sequence to complete */
3908 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3909 msleep(1);
3910
3911 return -EALREADY;
3912 }
3913 return 0;
3914}
3915
3916static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3917 u8 opcode)
3918{
3919 struct cnic_ulp_ops *ulp_ops;
3920 int ulp_type = csk->ulp_type;
3921
3922 rcu_read_lock();
3923 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3924 if (ulp_ops) {
3925 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3926 ulp_ops->cm_connect_complete(csk);
3927 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3928 ulp_ops->cm_close_complete(csk);
3929 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3930 ulp_ops->cm_remote_abort(csk);
3931 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3932 ulp_ops->cm_abort_complete(csk);
3933 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3934 ulp_ops->cm_remote_close(csk);
3935 }
3936 rcu_read_unlock();
3937}
3938
3939static int cnic_cm_set_pg(struct cnic_sock *csk)
3940{
3941 if (cnic_offld_prep(csk)) {
3942 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3943 cnic_cm_update_pg(csk);
3944 else
3945 cnic_cm_offload_pg(csk);
3946 }
3947 return 0;
3948}
3949
3950static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3951{
3952 struct cnic_local *cp = dev->cnic_priv;
3953 u32 l5_cid = kcqe->pg_host_opaque;
3954 u8 opcode = kcqe->op_code;
3955 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3956
3957 csk_hold(csk);
3958 if (!cnic_in_use(csk))
3959 goto done;
3960
3961 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3962 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3963 goto done;
3964 }
3965 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3966 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3967 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3968 cnic_cm_upcall(cp, csk,
3969 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3970 goto done;
3971 }
3972
3973 csk->pg_cid = kcqe->pg_cid;
3974 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3975 cnic_cm_conn_req(csk);
3976
3977done:
3978 csk_put(csk);
3979}
3980
3981static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3982{
3983 struct cnic_local *cp = dev->cnic_priv;
3984 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3985 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3986 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3987
3988 ctx->timestamp = jiffies;
3989 ctx->wait_cond = 1;
3990 wake_up(&ctx->waitq);
3991}
3992
3993static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3994{
3995 struct cnic_local *cp = dev->cnic_priv;
3996 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3997 u8 opcode = l4kcqe->op_code;
3998 u32 l5_cid;
3999 struct cnic_sock *csk;
4000
4001 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
4002 cnic_process_fcoe_term_conn(dev, kcqe);
4003 return;
4004 }
4005 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
4006 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
4007 cnic_cm_process_offld_pg(dev, l4kcqe);
4008 return;
4009 }
4010
4011 l5_cid = l4kcqe->conn_id;
4012 if (opcode & 0x80)
4013 l5_cid = l4kcqe->cid;
4014 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4015 return;
4016
4017 csk = &cp->csk_tbl[l5_cid];
4018 csk_hold(csk);
4019
4020 if (!cnic_in_use(csk)) {
4021 csk_put(csk);
4022 return;
4023 }
4024
4025 switch (opcode) {
4026 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4027 if (l4kcqe->status != 0) {
4028 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4029 cnic_cm_upcall(cp, csk,
4030 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4031 }
4032 break;
4033 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4034 if (l4kcqe->status == 0)
4035 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4036 else if (l4kcqe->status ==
4037 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4038 set_bit(SK_F_HW_ERR, &csk->flags);
4039
4040 smp_mb__before_atomic();
4041 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4042 cnic_cm_upcall(cp, csk, opcode);
4043 break;
4044
4045 case L5CM_RAMROD_CMD_ID_CLOSE: {
4046 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4047
4048 if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4049 break;
4050
4051 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4052 l4kcqe->status, l5kcqe->completion_status);
4053 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4054 }
4055 fallthrough;
4056 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4057 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4058 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4059 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4060 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4061 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4062 set_bit(SK_F_HW_ERR, &csk->flags);
4063
4064 cp->close_conn(csk, opcode);
4065 break;
4066
4067 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4068 /* after we already sent CLOSE_REQ */
4069 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4070 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4071 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4072 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4073 else
4074 cnic_cm_upcall(cp, csk, opcode);
4075 break;
4076 }
4077 csk_put(csk);
4078}
4079
4080static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4081{
4082 struct cnic_dev *dev = data;
4083 int i;
4084
4085 for (i = 0; i < num; i++)
4086 cnic_cm_process_kcqe(dev, kcqe[i]);
4087}
4088
4089static struct cnic_ulp_ops cm_ulp_ops = {
4090 .indicate_kcqes = cnic_cm_indicate_kcqe,
4091};
4092
4093static void cnic_cm_free_mem(struct cnic_dev *dev)
4094{
4095 struct cnic_local *cp = dev->cnic_priv;
4096
4097 kvfree(cp->csk_tbl);
4098 cp->csk_tbl = NULL;
4099 cnic_free_id_tbl(&cp->csk_port_tbl);
4100}
4101
4102static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4103{
4104 struct cnic_local *cp = dev->cnic_priv;
4105 u32 port_id;
4106 int i;
4107
4108 cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4109 GFP_KERNEL);
4110 if (!cp->csk_tbl)
4111 return -ENOMEM;
4112
4113 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4114 atomic_set(&cp->csk_tbl[i].ref_count, 0);
4115
4116 port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4117 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4118 CNIC_LOCAL_PORT_MIN, port_id)) {
4119 cnic_cm_free_mem(dev);
4120 return -ENOMEM;
4121 }
4122 return 0;
4123}
4124
4125static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4126{
4127 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4128 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4129 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4130 csk->state = opcode;
4131 }
4132
4133 /* 1. If event opcode matches the expected event in csk->state
4134 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4135 * event
4136 * 3. If the expected event is 0, meaning the connection was never
4137 * never established, we accept the opcode from cm_abort.
4138 */
4139 if (opcode == csk->state || csk->state == 0 ||
4140 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4141 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4142 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4143 if (csk->state == 0)
4144 csk->state = opcode;
4145 return 1;
4146 }
4147 }
4148 return 0;
4149}
4150
4151static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4152{
4153 struct cnic_dev *dev = csk->dev;
4154 struct cnic_local *cp = dev->cnic_priv;
4155
4156 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4157 cnic_cm_upcall(cp, csk, opcode);
4158 return;
4159 }
4160
4161 clear_bit(SK_F_CONNECT_START, &csk->flags);
4162 cnic_close_conn(csk);
4163 csk->state = opcode;
4164 cnic_cm_upcall(cp, csk, opcode);
4165}
4166
4167static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4168{
4169}
4170
4171static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4172{
4173 u32 seed;
4174
4175 seed = get_random_u32();
4176 cnic_ctx_wr(dev, 45, 0, seed);
4177 return 0;
4178}
4179
4180static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4181{
4182 struct cnic_dev *dev = csk->dev;
4183 struct cnic_local *cp = dev->cnic_priv;
4184 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4185 union l5cm_specific_data l5_data;
4186 u32 cmd = 0;
4187 int close_complete = 0;
4188
4189 switch (opcode) {
4190 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4191 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4192 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4193 if (cnic_ready_to_close(csk, opcode)) {
4194 if (test_bit(SK_F_HW_ERR, &csk->flags))
4195 close_complete = 1;
4196 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4197 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4198 else
4199 close_complete = 1;
4200 }
4201 break;
4202 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4203 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4204 break;
4205 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4206 close_complete = 1;
4207 break;
4208 }
4209 if (cmd) {
4210 memset(&l5_data, 0, sizeof(l5_data));
4211
4212 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4213 &l5_data);
4214 } else if (close_complete) {
4215 ctx->timestamp = jiffies;
4216 cnic_close_conn(csk);
4217 cnic_cm_upcall(cp, csk, csk->state);
4218 }
4219}
4220
4221static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4222{
4223 struct cnic_local *cp = dev->cnic_priv;
4224
4225 if (!cp->ctx_tbl)
4226 return;
4227
4228 if (!netif_running(dev->netdev))
4229 return;
4230
4231 cnic_bnx2x_delete_wait(dev, 0);
4232
4233 cancel_delayed_work(&cp->delete_task);
4234 flush_workqueue(cnic_wq);
4235
4236 if (atomic_read(&cp->iscsi_conn) != 0)
4237 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4238 atomic_read(&cp->iscsi_conn));
4239}
4240
4241static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4242{
4243 struct bnx2x *bp = netdev_priv(dev->netdev);
4244 u32 pfid = bp->pfid;
4245 u32 port = BP_PORT(bp);
4246
4247 cnic_init_bnx2x_mac(dev);
4248 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4249
4250 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4251 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4252
4253 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4254 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4255 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4256 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4257 DEF_MAX_DA_COUNT);
4258
4259 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4260 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4261 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4262 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4263 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4264 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4265 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4266 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4267
4268 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4269 DEF_MAX_CWND);
4270 return 0;
4271}
4272
4273static void cnic_delete_task(struct work_struct *work)
4274{
4275 struct cnic_local *cp;
4276 struct cnic_dev *dev;
4277 u32 i;
4278 int need_resched = 0;
4279
4280 cp = container_of(work, struct cnic_local, delete_task.work);
4281 dev = cp->dev;
4282
4283 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4284 struct drv_ctl_info info;
4285
4286 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4287
4288 memset(&info, 0, sizeof(struct drv_ctl_info));
4289 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4290 cp->ethdev->drv_ctl(dev->netdev, &info);
4291 }
4292
4293 for (i = 0; i < cp->max_cid_space; i++) {
4294 struct cnic_context *ctx = &cp->ctx_tbl[i];
4295 int err;
4296
4297 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4298 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4299 continue;
4300
4301 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4302 need_resched = 1;
4303 continue;
4304 }
4305
4306 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4307 continue;
4308
4309 err = cnic_bnx2x_destroy_ramrod(dev, i);
4310
4311 cnic_free_bnx2x_conn_resc(dev, i);
4312 if (!err) {
4313 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4314 atomic_dec(&cp->iscsi_conn);
4315
4316 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4317 }
4318 }
4319
4320 if (need_resched)
4321 queue_delayed_work(cnic_wq, &cp->delete_task,
4322 msecs_to_jiffies(10));
4323
4324}
4325
4326static int cnic_cm_open(struct cnic_dev *dev)
4327{
4328 struct cnic_local *cp = dev->cnic_priv;
4329 int err;
4330
4331 err = cnic_cm_alloc_mem(dev);
4332 if (err)
4333 return err;
4334
4335 err = cp->start_cm(dev);
4336
4337 if (err)
4338 goto err_out;
4339
4340 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4341
4342 dev->cm_create = cnic_cm_create;
4343 dev->cm_destroy = cnic_cm_destroy;
4344 dev->cm_connect = cnic_cm_connect;
4345 dev->cm_abort = cnic_cm_abort;
4346 dev->cm_close = cnic_cm_close;
4347 dev->cm_select_dev = cnic_cm_select_dev;
4348
4349 cp->ulp_handle[CNIC_ULP_L4] = dev;
4350 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4351 return 0;
4352
4353err_out:
4354 cnic_cm_free_mem(dev);
4355 return err;
4356}
4357
4358static int cnic_cm_shutdown(struct cnic_dev *dev)
4359{
4360 struct cnic_local *cp = dev->cnic_priv;
4361 int i;
4362
4363 if (!cp->csk_tbl)
4364 return 0;
4365
4366 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4367 struct cnic_sock *csk = &cp->csk_tbl[i];
4368
4369 clear_bit(SK_F_INUSE, &csk->flags);
4370 cnic_cm_cleanup(csk);
4371 }
4372 cnic_cm_free_mem(dev);
4373
4374 return 0;
4375}
4376
4377static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4378{
4379 u32 cid_addr;
4380 int i;
4381
4382 cid_addr = GET_CID_ADDR(cid);
4383
4384 for (i = 0; i < CTX_SIZE; i += 4)
4385 cnic_ctx_wr(dev, cid_addr, i, 0);
4386}
4387
4388static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4389{
4390 struct cnic_local *cp = dev->cnic_priv;
4391 int ret = 0, i;
4392 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4393
4394 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4395 return 0;
4396
4397 for (i = 0; i < cp->ctx_blks; i++) {
4398 int j;
4399 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4400 u32 val;
4401
4402 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4403
4404 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4405 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4406 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4407 (u64) cp->ctx_arr[i].mapping >> 32);
4408 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4409 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4410 for (j = 0; j < 10; j++) {
4411
4412 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4413 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4414 break;
4415 udelay(5);
4416 }
4417 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4418 ret = -EBUSY;
4419 break;
4420 }
4421 }
4422 return ret;
4423}
4424
4425static void cnic_free_irq(struct cnic_dev *dev)
4426{
4427 struct cnic_local *cp = dev->cnic_priv;
4428 struct cnic_eth_dev *ethdev = cp->ethdev;
4429
4430 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4431 cp->disable_int_sync(dev);
4432 cancel_work_sync(&cp->cnic_irq_bh_work);
4433 free_irq(ethdev->irq_arr[0].vector, dev);
4434 }
4435}
4436
4437static int cnic_request_irq(struct cnic_dev *dev)
4438{
4439 struct cnic_local *cp = dev->cnic_priv;
4440 struct cnic_eth_dev *ethdev = cp->ethdev;
4441 int err;
4442
4443 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4444 if (err)
4445 disable_work_sync(&cp->cnic_irq_bh_work);
4446
4447 return err;
4448}
4449
4450static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4451{
4452 struct cnic_local *cp = dev->cnic_priv;
4453 struct cnic_eth_dev *ethdev = cp->ethdev;
4454
4455 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4456 int err, i = 0;
4457 int sblk_num = cp->status_blk_num;
4458 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4459 BNX2_HC_SB_CONFIG_1;
4460
4461 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4462
4463 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4464 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4465 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4466
4467 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4468 INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
4469 err = cnic_request_irq(dev);
4470 if (err)
4471 return err;
4472
4473 while (cp->status_blk.bnx2->status_completion_producer_index &&
4474 i < 10) {
4475 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4476 1 << (11 + sblk_num));
4477 udelay(10);
4478 i++;
4479 barrier();
4480 }
4481 if (cp->status_blk.bnx2->status_completion_producer_index) {
4482 cnic_free_irq(dev);
4483 goto failed;
4484 }
4485
4486 } else {
4487 struct status_block *sblk = cp->status_blk.gen;
4488 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4489 int i = 0;
4490
4491 while (sblk->status_completion_producer_index && i < 10) {
4492 CNIC_WR(dev, BNX2_HC_COMMAND,
4493 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4494 udelay(10);
4495 i++;
4496 barrier();
4497 }
4498 if (sblk->status_completion_producer_index)
4499 goto failed;
4500
4501 }
4502 return 0;
4503
4504failed:
4505 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4506 return -EBUSY;
4507}
4508
4509static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4510{
4511 struct cnic_local *cp = dev->cnic_priv;
4512 struct cnic_eth_dev *ethdev = cp->ethdev;
4513
4514 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4515 return;
4516
4517 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4518 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4519}
4520
4521static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4522{
4523 struct cnic_local *cp = dev->cnic_priv;
4524 struct cnic_eth_dev *ethdev = cp->ethdev;
4525
4526 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4527 return;
4528
4529 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4530 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4531 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4532 synchronize_irq(ethdev->irq_arr[0].vector);
4533}
4534
4535static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4536{
4537 struct cnic_local *cp = dev->cnic_priv;
4538 struct cnic_eth_dev *ethdev = cp->ethdev;
4539 struct cnic_uio_dev *udev = cp->udev;
4540 u32 cid_addr, tx_cid, sb_id;
4541 u32 val, offset0, offset1, offset2, offset3;
4542 int i;
4543 struct bnx2_tx_bd *txbd;
4544 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4545 struct status_block *s_blk = cp->status_blk.gen;
4546
4547 sb_id = cp->status_blk_num;
4548 tx_cid = 20;
4549 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4550 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4551 struct status_block_msix *sblk = cp->status_blk.bnx2;
4552
4553 tx_cid = TX_TSS_CID + sb_id - 1;
4554 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4555 (TX_TSS_CID << 7));
4556 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4557 }
4558 cp->tx_cons = *cp->tx_cons_ptr;
4559
4560 cid_addr = GET_CID_ADDR(tx_cid);
4561 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4562 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4563
4564 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4565 cnic_ctx_wr(dev, cid_addr2, i, 0);
4566
4567 offset0 = BNX2_L2CTX_TYPE_XI;
4568 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4569 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4570 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4571 } else {
4572 cnic_init_context(dev, tx_cid);
4573 cnic_init_context(dev, tx_cid + 1);
4574
4575 offset0 = BNX2_L2CTX_TYPE;
4576 offset1 = BNX2_L2CTX_CMD_TYPE;
4577 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4578 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4579 }
4580 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4581 cnic_ctx_wr(dev, cid_addr, offset0, val);
4582
4583 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4584 cnic_ctx_wr(dev, cid_addr, offset1, val);
4585
4586 txbd = udev->l2_ring;
4587
4588 buf_map = udev->l2_buf_map;
4589 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4590 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4591 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4592 }
4593 val = (u64) ring_map >> 32;
4594 cnic_ctx_wr(dev, cid_addr, offset2, val);
4595 txbd->tx_bd_haddr_hi = val;
4596
4597 val = (u64) ring_map & 0xffffffff;
4598 cnic_ctx_wr(dev, cid_addr, offset3, val);
4599 txbd->tx_bd_haddr_lo = val;
4600}
4601
4602static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4603{
4604 struct cnic_local *cp = dev->cnic_priv;
4605 struct cnic_eth_dev *ethdev = cp->ethdev;
4606 struct cnic_uio_dev *udev = cp->udev;
4607 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4608 int i;
4609 struct bnx2_rx_bd *rxbd;
4610 struct status_block *s_blk = cp->status_blk.gen;
4611 dma_addr_t ring_map = udev->l2_ring_map;
4612
4613 sb_id = cp->status_blk_num;
4614 cnic_init_context(dev, 2);
4615 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4616 coal_reg = BNX2_HC_COMMAND;
4617 coal_val = CNIC_RD(dev, coal_reg);
4618 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4619 struct status_block_msix *sblk = cp->status_blk.bnx2;
4620
4621 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4622 coal_reg = BNX2_HC_COALESCE_NOW;
4623 coal_val = 1 << (11 + sb_id);
4624 }
4625 i = 0;
4626 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4627 CNIC_WR(dev, coal_reg, coal_val);
4628 udelay(10);
4629 i++;
4630 barrier();
4631 }
4632 cp->rx_cons = *cp->rx_cons_ptr;
4633
4634 cid_addr = GET_CID_ADDR(2);
4635 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4636 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4637 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4638
4639 if (sb_id == 0)
4640 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4641 else
4642 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4643 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4644
4645 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4646 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4647 dma_addr_t buf_map;
4648 int n = (i % cp->l2_rx_ring_size) + 1;
4649
4650 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4651 rxbd->rx_bd_len = cp->l2_single_buf_size;
4652 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4653 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4654 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4655 }
4656 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4657 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4658 rxbd->rx_bd_haddr_hi = val;
4659
4660 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4661 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4662 rxbd->rx_bd_haddr_lo = val;
4663
4664 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4665 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4666}
4667
4668static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4669{
4670 struct kwqe *wqes[1], l2kwqe;
4671
4672 memset(&l2kwqe, 0, sizeof(l2kwqe));
4673 wqes[0] = &l2kwqe;
4674 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4675 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4676 KWQE_OPCODE_SHIFT) | 2;
4677 dev->submit_kwqes(dev, wqes, 1);
4678}
4679
4680static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4681{
4682 struct cnic_local *cp = dev->cnic_priv;
4683 u32 val;
4684
4685 val = cp->func << 2;
4686
4687 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4688
4689 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4690 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4691 dev->mac_addr[0] = (u8) (val >> 8);
4692 dev->mac_addr[1] = (u8) val;
4693
4694 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4695
4696 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4697 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4698 dev->mac_addr[2] = (u8) (val >> 24);
4699 dev->mac_addr[3] = (u8) (val >> 16);
4700 dev->mac_addr[4] = (u8) (val >> 8);
4701 dev->mac_addr[5] = (u8) val;
4702
4703 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4704
4705 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4706 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4707 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4708
4709 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4710 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4711 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4712}
4713
4714static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4715{
4716 struct cnic_local *cp = dev->cnic_priv;
4717 struct cnic_eth_dev *ethdev = cp->ethdev;
4718 struct status_block *sblk = cp->status_blk.gen;
4719 u32 val, kcq_cid_addr, kwq_cid_addr;
4720 int err;
4721
4722 cnic_set_bnx2_mac(dev);
4723
4724 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4725 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4726 if (CNIC_PAGE_BITS > 12)
4727 val |= (12 - 8) << 4;
4728 else
4729 val |= (CNIC_PAGE_BITS - 8) << 4;
4730
4731 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4732
4733 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4734 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4735 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4736
4737 err = cnic_setup_5709_context(dev, 1);
4738 if (err)
4739 return err;
4740
4741 cnic_init_context(dev, KWQ_CID);
4742 cnic_init_context(dev, KCQ_CID);
4743
4744 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4745 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4746
4747 cp->max_kwq_idx = MAX_KWQ_IDX;
4748 cp->kwq_prod_idx = 0;
4749 cp->kwq_con_idx = 0;
4750 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4751
4752 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4753 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4754 else
4755 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4756
4757 /* Initialize the kernel work queue context. */
4758 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4759 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4760 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4761
4762 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4763 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4764
4765 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4766 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4767
4768 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4769 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4770
4771 val = (u32) cp->kwq_info.pgtbl_map;
4772 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4773
4774 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4775 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4776
4777 cp->kcq1.sw_prod_idx = 0;
4778 cp->kcq1.hw_prod_idx_ptr =
4779 &sblk->status_completion_producer_index;
4780
4781 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4782
4783 /* Initialize the kernel complete queue context. */
4784 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4785 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4786 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4787
4788 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4789 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4790
4791 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4792 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4793
4794 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4795 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4796
4797 val = (u32) cp->kcq1.dma.pgtbl_map;
4798 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4799
4800 cp->int_num = 0;
4801 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4802 struct status_block_msix *msblk = cp->status_blk.bnx2;
4803 u32 sb_id = cp->status_blk_num;
4804 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4805
4806 cp->kcq1.hw_prod_idx_ptr =
4807 &msblk->status_completion_producer_index;
4808 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4809 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4810 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4811 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4812 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4813 }
4814
4815 /* Enable Commnad Scheduler notification when we write to the
4816 * host producer index of the kernel contexts. */
4817 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4818
4819 /* Enable Command Scheduler notification when we write to either
4820 * the Send Queue or Receive Queue producer indexes of the kernel
4821 * bypass contexts. */
4822 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4823 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4824
4825 /* Notify COM when the driver post an application buffer. */
4826 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4827
4828 /* Set the CP and COM doorbells. These two processors polls the
4829 * doorbell for a non zero value before running. This must be done
4830 * after setting up the kernel queue contexts. */
4831 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4832 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4833
4834 cnic_init_bnx2_tx_ring(dev);
4835 cnic_init_bnx2_rx_ring(dev);
4836
4837 err = cnic_init_bnx2_irq(dev);
4838 if (err) {
4839 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4840 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4841 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4842 return err;
4843 }
4844
4845 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4846
4847 return 0;
4848}
4849
4850static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4851{
4852 struct cnic_local *cp = dev->cnic_priv;
4853 struct cnic_eth_dev *ethdev = cp->ethdev;
4854 u32 start_offset = ethdev->ctx_tbl_offset;
4855 int i;
4856
4857 for (i = 0; i < cp->ctx_blks; i++) {
4858 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4859 dma_addr_t map = ctx->mapping;
4860
4861 if (cp->ctx_align) {
4862 unsigned long mask = cp->ctx_align - 1;
4863
4864 map = (map + mask) & ~mask;
4865 }
4866
4867 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4868 }
4869}
4870
4871static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4872{
4873 struct cnic_local *cp = dev->cnic_priv;
4874 struct cnic_eth_dev *ethdev = cp->ethdev;
4875 int err = 0;
4876
4877 INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
4878 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4879 err = cnic_request_irq(dev);
4880
4881 return err;
4882}
4883
4884static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4885 u16 sb_id, u8 sb_index,
4886 u8 disable)
4887{
4888 struct bnx2x *bp = netdev_priv(dev->netdev);
4889
4890 u32 addr = BAR_CSTRORM_INTMEM +
4891 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4892 offsetof(struct hc_status_block_data_e1x, index_data) +
4893 sizeof(struct hc_index_data)*sb_index +
4894 offsetof(struct hc_index_data, flags);
4895 u16 flags = CNIC_RD16(dev, addr);
4896 /* clear and set */
4897 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4898 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4899 HC_INDEX_DATA_HC_ENABLED);
4900 CNIC_WR16(dev, addr, flags);
4901}
4902
4903static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4904{
4905 struct cnic_local *cp = dev->cnic_priv;
4906 struct bnx2x *bp = netdev_priv(dev->netdev);
4907 u8 sb_id = cp->status_blk_num;
4908
4909 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4910 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4911 offsetof(struct hc_status_block_data_e1x, index_data) +
4912 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4913 offsetof(struct hc_index_data, timeout), 64 / 4);
4914 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4915}
4916
4917static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4918{
4919}
4920
4921static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4922 struct client_init_ramrod_data *data)
4923{
4924 struct cnic_local *cp = dev->cnic_priv;
4925 struct bnx2x *bp = netdev_priv(dev->netdev);
4926 struct cnic_uio_dev *udev = cp->udev;
4927 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4928 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4929 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4930 int i;
4931 u32 cli = cp->ethdev->iscsi_l2_client_id;
4932 u32 val;
4933
4934 memset(txbd, 0, CNIC_PAGE_SIZE);
4935
4936 buf_map = udev->l2_buf_map;
4937 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4938 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4939 struct eth_tx_parse_bd_e1x *pbd_e1x =
4940 &((txbd + 1)->parse_bd_e1x);
4941 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4942 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4943
4944 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4945 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4946 reg_bd->addr_hi = start_bd->addr_hi;
4947 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4948 start_bd->nbytes = cpu_to_le16(0x10);
4949 start_bd->nbd = cpu_to_le16(3);
4950 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4951 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4952 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4953
4954 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4955 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4956 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4957 else
4958 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4959 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4960 }
4961
4962 val = (u64) ring_map >> 32;
4963 txbd->next_bd.addr_hi = cpu_to_le32(val);
4964
4965 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4966
4967 val = (u64) ring_map & 0xffffffff;
4968 txbd->next_bd.addr_lo = cpu_to_le32(val);
4969
4970 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4971
4972 /* Other ramrod params */
4973 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4974 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4975
4976 /* reset xstorm per client statistics */
4977 if (cli < MAX_STAT_COUNTER_ID) {
4978 data->general.statistics_zero_flg = 1;
4979 data->general.statistics_en_flg = 1;
4980 data->general.statistics_counter_id = cli;
4981 }
4982
4983 cp->tx_cons_ptr =
4984 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4985}
4986
4987static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4988 struct client_init_ramrod_data *data)
4989{
4990 struct cnic_local *cp = dev->cnic_priv;
4991 struct bnx2x *bp = netdev_priv(dev->netdev);
4992 struct cnic_uio_dev *udev = cp->udev;
4993 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4994 CNIC_PAGE_SIZE);
4995 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4996 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4997 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4998 int i;
4999 u32 cli = cp->ethdev->iscsi_l2_client_id;
5000 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5001 u32 val;
5002 dma_addr_t ring_map = udev->l2_ring_map;
5003
5004 /* General data */
5005 data->general.client_id = cli;
5006 data->general.activate_flg = 1;
5007 data->general.sp_client_id = cli;
5008 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
5009 data->general.func_id = bp->pfid;
5010
5011 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5012 dma_addr_t buf_map;
5013 int n = (i % cp->l2_rx_ring_size) + 1;
5014
5015 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5016 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5017 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5018 }
5019
5020 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5021 rxbd->addr_hi = cpu_to_le32(val);
5022 data->rx.bd_page_base.hi = cpu_to_le32(val);
5023
5024 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5025 rxbd->addr_lo = cpu_to_le32(val);
5026 data->rx.bd_page_base.lo = cpu_to_le32(val);
5027
5028 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5029 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5030 rxcqe->addr_hi = cpu_to_le32(val);
5031 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5032
5033 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5034 rxcqe->addr_lo = cpu_to_le32(val);
5035 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5036
5037 /* Other ramrod params */
5038 data->rx.client_qzone_id = cl_qzone_id;
5039 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5040 data->rx.status_block_id = BNX2X_DEF_SB_ID;
5041
5042 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5043
5044 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5045 data->rx.outer_vlan_removal_enable_flg = 1;
5046 data->rx.silent_vlan_removal_flg = 1;
5047 data->rx.silent_vlan_value = 0;
5048 data->rx.silent_vlan_mask = 0xffff;
5049
5050 cp->rx_cons_ptr =
5051 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5052 cp->rx_cons = *cp->rx_cons_ptr;
5053}
5054
5055static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5056{
5057 struct cnic_local *cp = dev->cnic_priv;
5058 struct bnx2x *bp = netdev_priv(dev->netdev);
5059 u32 pfid = bp->pfid;
5060
5061 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5062 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5063 cp->kcq1.sw_prod_idx = 0;
5064
5065 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5066 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5067
5068 cp->kcq1.hw_prod_idx_ptr =
5069 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5070 cp->kcq1.status_idx_ptr =
5071 &sb->sb.running_index[SM_RX_ID];
5072 } else {
5073 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5074
5075 cp->kcq1.hw_prod_idx_ptr =
5076 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5077 cp->kcq1.status_idx_ptr =
5078 &sb->sb.running_index[SM_RX_ID];
5079 }
5080
5081 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5082 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5083
5084 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5085 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5086 cp->kcq2.sw_prod_idx = 0;
5087 cp->kcq2.hw_prod_idx_ptr =
5088 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5089 cp->kcq2.status_idx_ptr =
5090 &sb->sb.running_index[SM_RX_ID];
5091 }
5092}
5093
5094static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5095{
5096 struct cnic_local *cp = dev->cnic_priv;
5097 struct bnx2x *bp = netdev_priv(dev->netdev);
5098 struct cnic_eth_dev *ethdev = cp->ethdev;
5099 int ret;
5100 u32 pfid;
5101
5102 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5103 cp->func = bp->pf_num;
5104
5105 pfid = bp->pfid;
5106
5107 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5108 cp->iscsi_start_cid, 0);
5109
5110 if (ret)
5111 return -ENOMEM;
5112
5113 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5114 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5115 cp->fcoe_start_cid, 0);
5116
5117 if (ret)
5118 return -ENOMEM;
5119 }
5120
5121 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5122
5123 cnic_init_bnx2x_kcq(dev);
5124
5125 /* Only 1 EQ */
5126 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5127 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5129 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5130 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5131 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5132 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5133 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5134 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5135 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5136 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5137 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5138 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5139 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5140 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5141 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5142 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5143 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5144 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5145 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5146 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5147 HC_INDEX_ISCSI_EQ_CONS);
5148
5149 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5150 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5151 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5152 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5153 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5154 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5155
5156 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5157 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5158
5159 cnic_setup_bnx2x_context(dev);
5160
5161 ret = cnic_init_bnx2x_irq(dev);
5162 if (ret)
5163 return ret;
5164
5165 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5166 return 0;
5167}
5168
5169static void cnic_init_rings(struct cnic_dev *dev)
5170{
5171 struct cnic_local *cp = dev->cnic_priv;
5172 struct bnx2x *bp = netdev_priv(dev->netdev);
5173 struct cnic_uio_dev *udev = cp->udev;
5174
5175 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5176 return;
5177
5178 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5179 cnic_init_bnx2_tx_ring(dev);
5180 cnic_init_bnx2_rx_ring(dev);
5181 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5182 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5183 u32 cli = cp->ethdev->iscsi_l2_client_id;
5184 u32 cid = cp->ethdev->iscsi_l2_cid;
5185 u32 cl_qzone_id;
5186 struct client_init_ramrod_data *data;
5187 union l5cm_specific_data l5_data;
5188 struct ustorm_eth_rx_producers rx_prods = {0};
5189 u32 off, i, *cid_ptr;
5190
5191 rx_prods.bd_prod = 0;
5192 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5193 barrier();
5194
5195 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5196
5197 off = BAR_USTRORM_INTMEM +
5198 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5199 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5200 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5201
5202 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5203 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5204
5205 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5206
5207 data = udev->l2_buf;
5208 cid_ptr = udev->l2_buf + 12;
5209
5210 memset(data, 0, sizeof(*data));
5211
5212 cnic_init_bnx2x_tx_ring(dev, data);
5213 cnic_init_bnx2x_rx_ring(dev, data);
5214
5215 data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
5216
5217 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5218 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5219
5220 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5221
5222 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5223 cid, ETH_CONNECTION_TYPE, &l5_data);
5224
5225 i = 0;
5226 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5227 ++i < 10)
5228 msleep(1);
5229
5230 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5231 netdev_err(dev->netdev,
5232 "iSCSI CLIENT_SETUP did not complete\n");
5233 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5234 cnic_ring_ctl(dev, cid, cli, 1);
5235 *cid_ptr = cid >> 4;
5236 *(cid_ptr + 1) = cid * bp->db_size;
5237 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5238 }
5239}
5240
5241static void cnic_shutdown_rings(struct cnic_dev *dev)
5242{
5243 struct cnic_local *cp = dev->cnic_priv;
5244 struct cnic_uio_dev *udev = cp->udev;
5245 void *rx_ring;
5246
5247 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5248 return;
5249
5250 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5251 cnic_shutdown_bnx2_rx_ring(dev);
5252 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5253 u32 cli = cp->ethdev->iscsi_l2_client_id;
5254 u32 cid = cp->ethdev->iscsi_l2_cid;
5255 union l5cm_specific_data l5_data;
5256 int i;
5257
5258 cnic_ring_ctl(dev, cid, cli, 0);
5259
5260 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5261
5262 l5_data.phy_address.lo = cli;
5263 l5_data.phy_address.hi = 0;
5264 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5265 cid, ETH_CONNECTION_TYPE, &l5_data);
5266 i = 0;
5267 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5268 ++i < 10)
5269 msleep(1);
5270
5271 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5272 netdev_err(dev->netdev,
5273 "iSCSI CLIENT_HALT did not complete\n");
5274 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5275
5276 memset(&l5_data, 0, sizeof(l5_data));
5277 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5278 cid, NONE_CONNECTION_TYPE, &l5_data);
5279 msleep(10);
5280 }
5281 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5282 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5283 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5284}
5285
5286static int cnic_register_netdev(struct cnic_dev *dev)
5287{
5288 struct cnic_local *cp = dev->cnic_priv;
5289 struct cnic_eth_dev *ethdev = cp->ethdev;
5290 int err;
5291
5292 if (!ethdev)
5293 return -ENODEV;
5294
5295 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5296 return 0;
5297
5298 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5299 if (err)
5300 netdev_err(dev->netdev, "register_cnic failed\n");
5301
5302 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5303 * can change after firmware is downloaded.
5304 */
5305 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5306 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5307 dev->max_iscsi_conn = 0;
5308
5309 return err;
5310}
5311
5312static void cnic_unregister_netdev(struct cnic_dev *dev)
5313{
5314 struct cnic_local *cp = dev->cnic_priv;
5315 struct cnic_eth_dev *ethdev = cp->ethdev;
5316
5317 if (!ethdev)
5318 return;
5319
5320 ethdev->drv_unregister_cnic(dev->netdev);
5321}
5322
5323static int cnic_start_hw(struct cnic_dev *dev)
5324{
5325 struct cnic_local *cp = dev->cnic_priv;
5326 struct cnic_eth_dev *ethdev = cp->ethdev;
5327 int err;
5328
5329 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5330 return -EALREADY;
5331
5332 dev->regview = ethdev->io_base;
5333 pci_dev_get(dev->pcidev);
5334 cp->func = PCI_FUNC(dev->pcidev->devfn);
5335 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5336 cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
5337 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5338
5339 err = cp->alloc_resc(dev);
5340 if (err) {
5341 netdev_err(dev->netdev, "allocate resource failure\n");
5342 goto err1;
5343 }
5344
5345 err = cp->start_hw(dev);
5346 if (err)
5347 goto err1;
5348
5349 err = cnic_cm_open(dev);
5350 if (err)
5351 goto err1;
5352
5353 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5354
5355 cp->enable_int(dev);
5356
5357 return 0;
5358
5359err1:
5360 if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5361 cp->stop_hw(dev);
5362 else
5363 cp->free_resc(dev);
5364 pci_dev_put(dev->pcidev);
5365 return err;
5366}
5367
5368static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5369{
5370 cnic_disable_bnx2_int_sync(dev);
5371
5372 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5373 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5374
5375 cnic_init_context(dev, KWQ_CID);
5376 cnic_init_context(dev, KCQ_CID);
5377
5378 cnic_setup_5709_context(dev, 0);
5379 cnic_free_irq(dev);
5380
5381 cnic_free_resc(dev);
5382}
5383
5384
5385static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5386{
5387 struct cnic_local *cp = dev->cnic_priv;
5388 struct bnx2x *bp = netdev_priv(dev->netdev);
5389 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5390 u32 sb_id = cp->status_blk_num;
5391 u32 idx_off, syn_off;
5392
5393 cnic_free_irq(dev);
5394
5395 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5396 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5397 (hc_index * sizeof(u16));
5398
5399 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5400 } else {
5401 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5402 (hc_index * sizeof(u16));
5403
5404 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5405 }
5406 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5407 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5408 idx_off, 0);
5409
5410 *cp->kcq1.hw_prod_idx_ptr = 0;
5411 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5412 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5413 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5414 cnic_free_resc(dev);
5415}
5416
5417static void cnic_stop_hw(struct cnic_dev *dev)
5418{
5419 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5420 struct cnic_local *cp = dev->cnic_priv;
5421 int i = 0;
5422
5423 /* Need to wait for the ring shutdown event to complete
5424 * before clearing the CNIC_UP flag.
5425 */
5426 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5427 msleep(100);
5428 i++;
5429 }
5430 cnic_shutdown_rings(dev);
5431 cp->stop_cm(dev);
5432 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5433 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5434 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5435 synchronize_rcu();
5436 cnic_cm_shutdown(dev);
5437 cp->stop_hw(dev);
5438 pci_dev_put(dev->pcidev);
5439 }
5440}
5441
5442static void cnic_free_dev(struct cnic_dev *dev)
5443{
5444 int i = 0;
5445
5446 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5447 msleep(100);
5448 i++;
5449 }
5450 if (atomic_read(&dev->ref_count) != 0)
5451 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5452
5453 netdev_info(dev->netdev, "Removed CNIC device\n");
5454 dev_put(dev->netdev);
5455 kfree(dev);
5456}
5457
5458static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5459 struct cnic_fc_npiv_tbl *npiv_tbl)
5460{
5461 struct cnic_local *cp = dev->cnic_priv;
5462 struct bnx2x *bp = netdev_priv(dev->netdev);
5463 int ret;
5464
5465 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5466 return -EAGAIN; /* bnx2x is down */
5467
5468 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5469 return -EINVAL;
5470
5471 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5472 return ret;
5473}
5474
5475static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5476 struct pci_dev *pdev)
5477{
5478 struct cnic_dev *cdev;
5479 struct cnic_local *cp;
5480 int alloc_size;
5481
5482 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5483
5484 cdev = kzalloc(alloc_size, GFP_KERNEL);
5485 if (cdev == NULL)
5486 return NULL;
5487
5488 cdev->netdev = dev;
5489 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5490 cdev->register_device = cnic_register_device;
5491 cdev->unregister_device = cnic_unregister_device;
5492 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5493 cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5494 atomic_set(&cdev->ref_count, 0);
5495
5496 cp = cdev->cnic_priv;
5497 cp->dev = cdev;
5498 cp->l2_single_buf_size = 0x400;
5499 cp->l2_rx_ring_size = 3;
5500
5501 spin_lock_init(&cp->cnic_ulp_lock);
5502
5503 netdev_info(dev, "Added CNIC device\n");
5504
5505 return cdev;
5506}
5507
5508static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5509{
5510 struct pci_dev *pdev;
5511 struct cnic_dev *cdev;
5512 struct cnic_local *cp;
5513 struct bnx2 *bp = netdev_priv(dev);
5514 struct cnic_eth_dev *ethdev = NULL;
5515
5516 if (bp->cnic_probe)
5517 ethdev = (bp->cnic_probe)(dev);
5518
5519 if (!ethdev)
5520 return NULL;
5521
5522 pdev = ethdev->pdev;
5523 if (!pdev)
5524 return NULL;
5525
5526 dev_hold(dev);
5527 pci_dev_get(pdev);
5528 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5529 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5530 (pdev->revision < 0x10)) {
5531 pci_dev_put(pdev);
5532 goto cnic_err;
5533 }
5534 pci_dev_put(pdev);
5535
5536 cdev = cnic_alloc_dev(dev, pdev);
5537 if (cdev == NULL)
5538 goto cnic_err;
5539
5540 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5541 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5542
5543 cp = cdev->cnic_priv;
5544 cp->ethdev = ethdev;
5545 cdev->pcidev = pdev;
5546 cp->chip_id = ethdev->chip_id;
5547
5548 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5549
5550 cp->cnic_ops = &cnic_bnx2_ops;
5551 cp->start_hw = cnic_start_bnx2_hw;
5552 cp->stop_hw = cnic_stop_bnx2_hw;
5553 cp->setup_pgtbl = cnic_setup_page_tbl;
5554 cp->alloc_resc = cnic_alloc_bnx2_resc;
5555 cp->free_resc = cnic_free_resc;
5556 cp->start_cm = cnic_cm_init_bnx2_hw;
5557 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5558 cp->enable_int = cnic_enable_bnx2_int;
5559 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5560 cp->close_conn = cnic_close_bnx2_conn;
5561 return cdev;
5562
5563cnic_err:
5564 dev_put(dev);
5565 return NULL;
5566}
5567
5568static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5569{
5570 struct pci_dev *pdev;
5571 struct cnic_dev *cdev;
5572 struct cnic_local *cp;
5573 struct bnx2x *bp = netdev_priv(dev);
5574 struct cnic_eth_dev *ethdev = NULL;
5575
5576 if (bp->cnic_probe)
5577 ethdev = bp->cnic_probe(dev);
5578
5579 if (!ethdev)
5580 return NULL;
5581
5582 pdev = ethdev->pdev;
5583 if (!pdev)
5584 return NULL;
5585
5586 dev_hold(dev);
5587 cdev = cnic_alloc_dev(dev, pdev);
5588 if (cdev == NULL) {
5589 dev_put(dev);
5590 return NULL;
5591 }
5592
5593 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5594 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5595
5596 cp = cdev->cnic_priv;
5597 cp->ethdev = ethdev;
5598 cdev->pcidev = pdev;
5599 cp->chip_id = ethdev->chip_id;
5600
5601 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5602
5603 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5604 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5605 if (CNIC_SUPPORTS_FCOE(bp)) {
5606 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5607 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5608 }
5609
5610 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5611 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5612
5613 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5614
5615 cp->cnic_ops = &cnic_bnx2x_ops;
5616 cp->start_hw = cnic_start_bnx2x_hw;
5617 cp->stop_hw = cnic_stop_bnx2x_hw;
5618 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5619 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5620 cp->free_resc = cnic_free_resc;
5621 cp->start_cm = cnic_cm_init_bnx2x_hw;
5622 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5623 cp->enable_int = cnic_enable_bnx2x_int;
5624 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5625 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5626 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5627 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5628 } else {
5629 cp->ack_int = cnic_ack_bnx2x_msix;
5630 cp->arm_int = cnic_arm_bnx2x_msix;
5631 }
5632 cp->close_conn = cnic_close_bnx2x_conn;
5633 return cdev;
5634}
5635
5636static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5637{
5638 struct ethtool_drvinfo drvinfo;
5639 struct cnic_dev *cdev = NULL;
5640
5641 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5642 memset(&drvinfo, 0, sizeof(drvinfo));
5643 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5644
5645 if (!strcmp(drvinfo.driver, "bnx2"))
5646 cdev = init_bnx2_cnic(dev);
5647 if (!strcmp(drvinfo.driver, "bnx2x"))
5648 cdev = init_bnx2x_cnic(dev);
5649 if (cdev) {
5650 write_lock(&cnic_dev_lock);
5651 list_add(&cdev->list, &cnic_dev_list);
5652 write_unlock(&cnic_dev_lock);
5653 }
5654 }
5655 return cdev;
5656}
5657
5658static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5659 u16 vlan_id)
5660{
5661 int if_type;
5662
5663 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5664 struct cnic_ulp_ops *ulp_ops;
5665 void *ctx;
5666
5667 mutex_lock(&cnic_lock);
5668 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5669 lockdep_is_held(&cnic_lock));
5670 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5671 mutex_unlock(&cnic_lock);
5672 continue;
5673 }
5674
5675 ctx = cp->ulp_handle[if_type];
5676
5677 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5678 mutex_unlock(&cnic_lock);
5679
5680 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5681
5682 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5683 }
5684}
5685
5686/* netdev event handler */
5687static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5688 void *ptr)
5689{
5690 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5691 struct cnic_dev *dev;
5692 int new_dev = 0;
5693
5694 dev = cnic_from_netdev(netdev);
5695
5696 if (!dev && event == NETDEV_REGISTER) {
5697 /* Check for the hot-plug device */
5698 dev = is_cnic_dev(netdev);
5699 if (dev) {
5700 new_dev = 1;
5701 cnic_hold(dev);
5702 }
5703 }
5704 if (dev) {
5705 struct cnic_local *cp = dev->cnic_priv;
5706
5707 if (new_dev)
5708 cnic_ulp_init(dev);
5709 else if (event == NETDEV_UNREGISTER)
5710 cnic_ulp_exit(dev);
5711
5712 if (event == NETDEV_UP) {
5713 if (cnic_register_netdev(dev) != 0) {
5714 cnic_put(dev);
5715 goto done;
5716 }
5717 if (!cnic_start_hw(dev))
5718 cnic_ulp_start(dev);
5719 }
5720
5721 cnic_rcv_netevent(cp, event, 0);
5722
5723 if (event == NETDEV_GOING_DOWN) {
5724 cnic_ulp_stop(dev);
5725 cnic_stop_hw(dev);
5726 cnic_unregister_netdev(dev);
5727 } else if (event == NETDEV_UNREGISTER) {
5728 write_lock(&cnic_dev_lock);
5729 list_del_init(&dev->list);
5730 write_unlock(&cnic_dev_lock);
5731
5732 cnic_put(dev);
5733 cnic_free_dev(dev);
5734 goto done;
5735 }
5736 cnic_put(dev);
5737 } else {
5738 struct net_device *realdev;
5739 u16 vid;
5740
5741 vid = cnic_get_vlan(netdev, &realdev);
5742 if (realdev) {
5743 dev = cnic_from_netdev(realdev);
5744 if (dev) {
5745 vid |= VLAN_CFI_MASK; /* make non-zero */
5746 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5747 cnic_put(dev);
5748 }
5749 }
5750 }
5751done:
5752 return NOTIFY_DONE;
5753}
5754
5755static struct notifier_block cnic_netdev_notifier = {
5756 .notifier_call = cnic_netdev_event
5757};
5758
5759static void cnic_release(void)
5760{
5761 struct cnic_uio_dev *udev;
5762
5763 while (!list_empty(&cnic_udev_list)) {
5764 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5765 list);
5766 cnic_free_uio(udev);
5767 }
5768}
5769
5770static int __init cnic_init(void)
5771{
5772 int rc = 0;
5773
5774 pr_info("%s", version);
5775
5776 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5777 if (rc) {
5778 cnic_release();
5779 return rc;
5780 }
5781
5782 cnic_wq = create_singlethread_workqueue("cnic_wq");
5783 if (!cnic_wq) {
5784 cnic_release();
5785 unregister_netdevice_notifier(&cnic_netdev_notifier);
5786 return -ENOMEM;
5787 }
5788
5789 return 0;
5790}
5791
5792static void __exit cnic_exit(void)
5793{
5794 unregister_netdevice_notifier(&cnic_netdev_notifier);
5795 cnic_release();
5796 destroy_workqueue(cnic_wq);
5797}
5798
5799module_init(cnic_init);
5800module_exit(cnic_exit);