Loading...
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_addr.h>
35#include <rdma/ib_user_verbs.h>
36#include <rdma/iw_cm.h>
37#include <rdma/ib_mad.h>
38#include <linux/netdevice.h>
39#include <linux/iommu.h>
40#include <linux/pci.h>
41#include <net/addrconf.h>
42
43#include <linux/qed/qed_chain.h>
44#include <linux/qed/qed_if.h>
45#include "qedr.h"
46#include "verbs.h"
47#include <rdma/qedr-abi.h>
48#include "qedr_iw_cm.h"
49
50MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51MODULE_AUTHOR("QLogic Corporation");
52MODULE_LICENSE("Dual BSD/GPL");
53
54#define QEDR_WQ_MULTIPLIER_DFT (3)
55
56static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
57 enum ib_event_type type)
58{
59 struct ib_event ibev;
60
61 ibev.device = &dev->ibdev;
62 ibev.element.port_num = port_num;
63 ibev.event = type;
64
65 ib_dispatch_event(&ibev);
66}
67
68static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
69 u32 port_num)
70{
71 return IB_LINK_LAYER_ETHERNET;
72}
73
74static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
75{
76 struct qedr_dev *qedr = get_qedr_dev(ibdev);
77 u32 fw_ver = (u32)qedr->attr.fw_ver;
78
79 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
80 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
81 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
82}
83
84static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
85 struct ib_port_immutable *immutable)
86{
87 struct ib_port_attr attr;
88 int err;
89
90 err = qedr_query_port(ibdev, port_num, &attr);
91 if (err)
92 return err;
93
94 immutable->pkey_tbl_len = attr.pkey_tbl_len;
95 immutable->gid_tbl_len = attr.gid_tbl_len;
96 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
98 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
99
100 return 0;
101}
102
103static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
104 struct ib_port_immutable *immutable)
105{
106 struct ib_port_attr attr;
107 int err;
108
109 err = qedr_query_port(ibdev, port_num, &attr);
110 if (err)
111 return err;
112
113 immutable->gid_tbl_len = 1;
114 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
115 immutable->max_mad_size = 0;
116
117 return 0;
118}
119
120/* QEDR sysfs interface */
121static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
122 char *buf)
123{
124 struct qedr_dev *dev =
125 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
126
127 return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
128}
129static DEVICE_ATTR_RO(hw_rev);
130
131static ssize_t hca_type_show(struct device *device,
132 struct device_attribute *attr, char *buf)
133{
134 struct qedr_dev *dev =
135 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
136
137 return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
138 rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
139 "RoCE");
140}
141static DEVICE_ATTR_RO(hca_type);
142
143static struct attribute *qedr_attributes[] = {
144 &dev_attr_hw_rev.attr,
145 &dev_attr_hca_type.attr,
146 NULL
147};
148
149static const struct attribute_group qedr_attr_group = {
150 .attrs = qedr_attributes,
151};
152
153static const struct ib_device_ops qedr_iw_dev_ops = {
154 .get_port_immutable = qedr_iw_port_immutable,
155 .iw_accept = qedr_iw_accept,
156 .iw_add_ref = qedr_iw_qp_add_ref,
157 .iw_connect = qedr_iw_connect,
158 .iw_create_listen = qedr_iw_create_listen,
159 .iw_destroy_listen = qedr_iw_destroy_listen,
160 .iw_get_qp = qedr_iw_get_qp,
161 .iw_reject = qedr_iw_reject,
162 .iw_rem_ref = qedr_iw_qp_rem_ref,
163 .query_gid = qedr_iw_query_gid,
164};
165
166static int qedr_iw_register_device(struct qedr_dev *dev)
167{
168 dev->ibdev.node_type = RDMA_NODE_RNIC;
169
170 ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
171
172 memcpy(dev->ibdev.iw_ifname,
173 dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
174
175 return 0;
176}
177
178static const struct ib_device_ops qedr_roce_dev_ops = {
179 .alloc_xrcd = qedr_alloc_xrcd,
180 .dealloc_xrcd = qedr_dealloc_xrcd,
181 .get_port_immutable = qedr_roce_port_immutable,
182 .query_pkey = qedr_query_pkey,
183};
184
185static void qedr_roce_register_device(struct qedr_dev *dev)
186{
187 dev->ibdev.node_type = RDMA_NODE_IB_CA;
188
189 ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
190}
191
192static const struct ib_device_ops qedr_dev_ops = {
193 .owner = THIS_MODULE,
194 .driver_id = RDMA_DRIVER_QEDR,
195 .uverbs_abi_ver = QEDR_ABI_VERSION,
196
197 .alloc_mr = qedr_alloc_mr,
198 .alloc_pd = qedr_alloc_pd,
199 .alloc_ucontext = qedr_alloc_ucontext,
200 .create_ah = qedr_create_ah,
201 .create_cq = qedr_create_cq,
202 .create_qp = qedr_create_qp,
203 .create_srq = qedr_create_srq,
204 .dealloc_pd = qedr_dealloc_pd,
205 .dealloc_ucontext = qedr_dealloc_ucontext,
206 .dereg_mr = qedr_dereg_mr,
207 .destroy_ah = qedr_destroy_ah,
208 .destroy_cq = qedr_destroy_cq,
209 .destroy_qp = qedr_destroy_qp,
210 .destroy_srq = qedr_destroy_srq,
211 .device_group = &qedr_attr_group,
212 .get_dev_fw_str = qedr_get_dev_fw_str,
213 .get_dma_mr = qedr_get_dma_mr,
214 .get_link_layer = qedr_link_layer,
215 .map_mr_sg = qedr_map_mr_sg,
216 .mmap = qedr_mmap,
217 .mmap_free = qedr_mmap_free,
218 .modify_qp = qedr_modify_qp,
219 .modify_srq = qedr_modify_srq,
220 .poll_cq = qedr_poll_cq,
221 .post_recv = qedr_post_recv,
222 .post_send = qedr_post_send,
223 .post_srq_recv = qedr_post_srq_recv,
224 .process_mad = qedr_process_mad,
225 .query_device = qedr_query_device,
226 .query_port = qedr_query_port,
227 .query_qp = qedr_query_qp,
228 .query_srq = qedr_query_srq,
229 .reg_user_mr = qedr_reg_user_mr,
230 .req_notify_cq = qedr_arm_cq,
231
232 INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
233 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
234 INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
235 INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
236 INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
237 INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
238 INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
239};
240
241static int qedr_register_device(struct qedr_dev *dev)
242{
243 int rc;
244
245 dev->ibdev.node_guid = dev->attr.node_guid;
246 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
247
248 if (IS_IWARP(dev)) {
249 rc = qedr_iw_register_device(dev);
250 if (rc)
251 return rc;
252 } else {
253 qedr_roce_register_device(dev);
254 }
255
256 dev->ibdev.phys_port_cnt = 1;
257 dev->ibdev.num_comp_vectors = dev->num_cnq;
258 dev->ibdev.dev.parent = &dev->pdev->dev;
259
260 ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
261
262 rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
263 if (rc)
264 return rc;
265
266 dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
267 return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
268}
269
270/* This function allocates fast-path status block memory */
271static int qedr_alloc_mem_sb(struct qedr_dev *dev,
272 struct qed_sb_info *sb_info, u16 sb_id)
273{
274 struct status_block *sb_virt;
275 dma_addr_t sb_phys;
276 int rc;
277
278 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
279 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
280 if (!sb_virt)
281 return -ENOMEM;
282
283 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
284 sb_virt, sb_phys, sb_id,
285 QED_SB_TYPE_CNQ);
286 if (rc) {
287 pr_err("Status block initialization failed\n");
288 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
289 sb_virt, sb_phys);
290 return rc;
291 }
292
293 return 0;
294}
295
296static void qedr_free_mem_sb(struct qedr_dev *dev,
297 struct qed_sb_info *sb_info, int sb_id)
298{
299 if (sb_info->sb_virt) {
300 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
301 QED_SB_TYPE_CNQ);
302 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
303 (void *)sb_info->sb_virt, sb_info->sb_phys);
304 }
305}
306
307static void qedr_free_resources(struct qedr_dev *dev)
308{
309 int i;
310
311 if (IS_IWARP(dev))
312 destroy_workqueue(dev->iwarp_wq);
313
314 for (i = 0; i < dev->num_cnq; i++) {
315 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
316 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
317 }
318
319 kfree(dev->cnq_array);
320 kfree(dev->sb_array);
321 kfree(dev->sgid_tbl);
322}
323
324static int qedr_alloc_resources(struct qedr_dev *dev)
325{
326 struct qed_chain_init_params params = {
327 .mode = QED_CHAIN_MODE_PBL,
328 .intended_use = QED_CHAIN_USE_TO_CONSUME,
329 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
330 .elem_size = sizeof(struct regpair *),
331 };
332 struct qedr_cnq *cnq;
333 __le16 *cons_pi;
334 int i, rc;
335
336 dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
337 GFP_KERNEL);
338 if (!dev->sgid_tbl)
339 return -ENOMEM;
340
341 spin_lock_init(&dev->sgid_lock);
342 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
343
344 if (IS_IWARP(dev)) {
345 xa_init(&dev->qps);
346 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
347 if (!dev->iwarp_wq) {
348 rc = -ENOMEM;
349 goto err1;
350 }
351 }
352
353 /* Allocate Status blocks for CNQ */
354 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
355 GFP_KERNEL);
356 if (!dev->sb_array) {
357 rc = -ENOMEM;
358 goto err_destroy_wq;
359 }
360
361 dev->cnq_array = kcalloc(dev->num_cnq,
362 sizeof(*dev->cnq_array), GFP_KERNEL);
363 if (!dev->cnq_array) {
364 rc = -ENOMEM;
365 goto err2;
366 }
367
368 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
369
370 /* Allocate CNQ PBLs */
371 params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
372 QEDR_ROCE_MAX_CNQ_SIZE);
373
374 for (i = 0; i < dev->num_cnq; i++) {
375 cnq = &dev->cnq_array[i];
376
377 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
378 dev->sb_start + i);
379 if (rc)
380 goto err3;
381
382 rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
383 ¶ms);
384 if (rc)
385 goto err4;
386
387 cnq->dev = dev;
388 cnq->sb = &dev->sb_array[i];
389 cons_pi = dev->sb_array[i].sb_virt->pi_array;
390 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
391 cnq->index = i;
392 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
393
394 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
395 i, qed_chain_get_cons_idx(&cnq->pbl));
396 }
397
398 return 0;
399err4:
400 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
401err3:
402 for (--i; i >= 0; i--) {
403 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
404 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
405 }
406 kfree(dev->cnq_array);
407err2:
408 kfree(dev->sb_array);
409err_destroy_wq:
410 if (IS_IWARP(dev))
411 destroy_workqueue(dev->iwarp_wq);
412err1:
413 kfree(dev->sgid_tbl);
414 return rc;
415}
416
417static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
418{
419 int rc = pci_enable_atomic_ops_to_root(pdev,
420 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
421
422 if (rc) {
423 dev->atomic_cap = IB_ATOMIC_NONE;
424 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
425 } else {
426 dev->atomic_cap = IB_ATOMIC_GLOB;
427 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
428 }
429}
430
431static const struct qed_rdma_ops *qed_ops;
432
433#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
434
435static irqreturn_t qedr_irq_handler(int irq, void *handle)
436{
437 u16 hw_comp_cons, sw_comp_cons;
438 struct qedr_cnq *cnq = handle;
439 struct regpair *cq_handle;
440 struct qedr_cq *cq;
441
442 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
443
444 qed_sb_update_sb_idx(cnq->sb);
445
446 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
447 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
448
449 /* Align protocol-index and chain reads */
450 rmb();
451
452 while (sw_comp_cons != hw_comp_cons) {
453 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
454 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
455 cq_handle->lo);
456
457 if (cq == NULL) {
458 DP_ERR(cnq->dev,
459 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
460 cq_handle->hi, cq_handle->lo, sw_comp_cons,
461 hw_comp_cons);
462
463 break;
464 }
465
466 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
467 DP_ERR(cnq->dev,
468 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
469 cq_handle->hi, cq_handle->lo, cq);
470 break;
471 }
472
473 cq->arm_flags = 0;
474
475 if (!cq->destroyed && cq->ibcq.comp_handler)
476 (*cq->ibcq.comp_handler)
477 (&cq->ibcq, cq->ibcq.cq_context);
478
479 /* The CQ's CNQ notification counter is checked before
480 * destroying the CQ in a busy-wait loop that waits for all of
481 * the CQ's CNQ interrupts to be processed. It is increased
482 * here, only after the completion handler, to ensure that
483 * the handler is not running when the CQ is destroyed.
484 */
485 cq->cnq_notif++;
486
487 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
488
489 cnq->n_comp++;
490 }
491
492 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
493 sw_comp_cons);
494
495 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
496
497 return IRQ_HANDLED;
498}
499
500static void qedr_sync_free_irqs(struct qedr_dev *dev)
501{
502 u32 vector;
503 u16 idx;
504 int i;
505
506 for (i = 0; i < dev->int_info.used_cnt; i++) {
507 if (dev->int_info.msix_cnt) {
508 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
509 vector = dev->int_info.msix[idx].vector;
510 free_irq(vector, &dev->cnq_array[i]);
511 }
512 }
513
514 dev->int_info.used_cnt = 0;
515}
516
517static int qedr_req_msix_irqs(struct qedr_dev *dev)
518{
519 int i, rc = 0;
520 u16 idx;
521
522 if (dev->num_cnq > dev->int_info.msix_cnt) {
523 DP_ERR(dev,
524 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
525 dev->num_cnq, dev->int_info.msix_cnt);
526 return -EINVAL;
527 }
528
529 for (i = 0; i < dev->num_cnq; i++) {
530 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
531 rc = request_irq(dev->int_info.msix[idx].vector,
532 qedr_irq_handler, 0, dev->cnq_array[i].name,
533 &dev->cnq_array[i]);
534 if (rc) {
535 DP_ERR(dev, "Request cnq %d irq failed\n", i);
536 qedr_sync_free_irqs(dev);
537 } else {
538 DP_DEBUG(dev, QEDR_MSG_INIT,
539 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
540 dev->cnq_array[i].name, i,
541 &dev->cnq_array[i]);
542 dev->int_info.used_cnt++;
543 }
544 }
545
546 return rc;
547}
548
549static int qedr_setup_irqs(struct qedr_dev *dev)
550{
551 int rc;
552
553 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
554
555 /* Learn Interrupt configuration */
556 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
557 if (rc < 0)
558 return rc;
559
560 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
561 if (rc) {
562 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
563 return rc;
564 }
565
566 if (dev->int_info.msix_cnt) {
567 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
568 dev->int_info.msix_cnt);
569 rc = qedr_req_msix_irqs(dev);
570 if (rc)
571 return rc;
572 }
573
574 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
575
576 return 0;
577}
578
579static int qedr_set_device_attr(struct qedr_dev *dev)
580{
581 struct qed_rdma_device *qed_attr;
582 struct qedr_device_attr *attr;
583 u32 page_size;
584
585 /* Part 1 - query core capabilities */
586 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
587
588 /* Part 2 - check capabilities */
589 page_size = ~qed_attr->page_size_caps + 1;
590 if (page_size > PAGE_SIZE) {
591 DP_ERR(dev,
592 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
593 PAGE_SIZE, page_size);
594 return -ENODEV;
595 }
596
597 /* Part 3 - copy and update capabilities */
598 attr = &dev->attr;
599 attr->vendor_id = qed_attr->vendor_id;
600 attr->vendor_part_id = qed_attr->vendor_part_id;
601 attr->hw_ver = qed_attr->hw_ver;
602 attr->fw_ver = qed_attr->fw_ver;
603 attr->node_guid = qed_attr->node_guid;
604 attr->sys_image_guid = qed_attr->sys_image_guid;
605 attr->max_cnq = qed_attr->max_cnq;
606 attr->max_sge = qed_attr->max_sge;
607 attr->max_inline = qed_attr->max_inline;
608 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
609 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
610 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
611 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
612 attr->max_dev_resp_rd_atomic_resc =
613 qed_attr->max_dev_resp_rd_atomic_resc;
614 attr->max_cq = qed_attr->max_cq;
615 attr->max_qp = qed_attr->max_qp;
616 attr->max_mr = qed_attr->max_mr;
617 attr->max_mr_size = qed_attr->max_mr_size;
618 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
619 attr->max_mw = qed_attr->max_mw;
620 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
621 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
622 attr->max_pd = qed_attr->max_pd;
623 attr->max_ah = qed_attr->max_ah;
624 attr->max_pkey = qed_attr->max_pkey;
625 attr->max_srq = qed_attr->max_srq;
626 attr->max_srq_wr = qed_attr->max_srq_wr;
627 attr->dev_caps = qed_attr->dev_caps;
628 attr->page_size_caps = qed_attr->page_size_caps;
629 attr->dev_ack_delay = qed_attr->dev_ack_delay;
630 attr->reserved_lkey = qed_attr->reserved_lkey;
631 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
632 attr->max_stats_queues = qed_attr->max_stats_queues;
633
634 return 0;
635}
636
637static void qedr_unaffiliated_event(void *context, u8 event_code)
638{
639 pr_err("unaffiliated event not implemented yet\n");
640}
641
642static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
643{
644#define EVENT_TYPE_NOT_DEFINED 0
645#define EVENT_TYPE_CQ 1
646#define EVENT_TYPE_QP 2
647#define EVENT_TYPE_SRQ 3
648 struct qedr_dev *dev = (struct qedr_dev *)context;
649 struct regpair *async_handle = (struct regpair *)fw_handle;
650 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
651 u8 event_type = EVENT_TYPE_NOT_DEFINED;
652 struct ib_event event;
653 struct ib_srq *ibsrq;
654 struct qedr_srq *srq;
655 unsigned long flags;
656 struct ib_cq *ibcq;
657 struct ib_qp *ibqp;
658 struct qedr_cq *cq;
659 struct qedr_qp *qp;
660 u16 srq_id;
661
662 if (IS_ROCE(dev)) {
663 switch (e_code) {
664 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
665 event.event = IB_EVENT_CQ_ERR;
666 event_type = EVENT_TYPE_CQ;
667 break;
668 case ROCE_ASYNC_EVENT_SQ_DRAINED:
669 event.event = IB_EVENT_SQ_DRAINED;
670 event_type = EVENT_TYPE_QP;
671 break;
672 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
673 event.event = IB_EVENT_QP_FATAL;
674 event_type = EVENT_TYPE_QP;
675 break;
676 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
677 event.event = IB_EVENT_QP_REQ_ERR;
678 event_type = EVENT_TYPE_QP;
679 break;
680 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
681 event.event = IB_EVENT_QP_ACCESS_ERR;
682 event_type = EVENT_TYPE_QP;
683 break;
684 case ROCE_ASYNC_EVENT_SRQ_LIMIT:
685 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
686 event_type = EVENT_TYPE_SRQ;
687 break;
688 case ROCE_ASYNC_EVENT_SRQ_EMPTY:
689 event.event = IB_EVENT_SRQ_ERR;
690 event_type = EVENT_TYPE_SRQ;
691 break;
692 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
693 event.event = IB_EVENT_QP_ACCESS_ERR;
694 event_type = EVENT_TYPE_QP;
695 break;
696 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
697 event.event = IB_EVENT_QP_ACCESS_ERR;
698 event_type = EVENT_TYPE_QP;
699 break;
700 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
701 event.event = IB_EVENT_CQ_ERR;
702 event_type = EVENT_TYPE_CQ;
703 break;
704 default:
705 DP_ERR(dev, "unsupported event %d on handle=%llx\n",
706 e_code, roce_handle64);
707 }
708 } else {
709 switch (e_code) {
710 case QED_IWARP_EVENT_SRQ_LIMIT:
711 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
712 event_type = EVENT_TYPE_SRQ;
713 break;
714 case QED_IWARP_EVENT_SRQ_EMPTY:
715 event.event = IB_EVENT_SRQ_ERR;
716 event_type = EVENT_TYPE_SRQ;
717 break;
718 default:
719 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
720 roce_handle64);
721 }
722 }
723 switch (event_type) {
724 case EVENT_TYPE_CQ:
725 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
726 if (cq) {
727 ibcq = &cq->ibcq;
728 if (ibcq->event_handler) {
729 event.device = ibcq->device;
730 event.element.cq = ibcq;
731 ibcq->event_handler(&event, ibcq->cq_context);
732 }
733 } else {
734 WARN(1,
735 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
736 roce_handle64);
737 }
738 DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
739 break;
740 case EVENT_TYPE_QP:
741 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
742 if (qp) {
743 ibqp = &qp->ibqp;
744 if (ibqp->event_handler) {
745 event.device = ibqp->device;
746 event.element.qp = ibqp;
747 ibqp->event_handler(&event, ibqp->qp_context);
748 }
749 } else {
750 WARN(1,
751 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
752 roce_handle64);
753 }
754 DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
755 break;
756 case EVENT_TYPE_SRQ:
757 srq_id = (u16)roce_handle64;
758 xa_lock_irqsave(&dev->srqs, flags);
759 srq = xa_load(&dev->srqs, srq_id);
760 if (srq) {
761 ibsrq = &srq->ibsrq;
762 if (ibsrq->event_handler) {
763 event.device = ibsrq->device;
764 event.element.srq = ibsrq;
765 ibsrq->event_handler(&event,
766 ibsrq->srq_context);
767 }
768 } else {
769 DP_NOTICE(dev,
770 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
771 roce_handle64);
772 }
773 xa_unlock_irqrestore(&dev->srqs, flags);
774 DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
775 break;
776 default:
777 break;
778 }
779}
780
781static int qedr_init_hw(struct qedr_dev *dev)
782{
783 struct qed_rdma_add_user_out_params out_params;
784 struct qed_rdma_start_in_params *in_params;
785 struct qed_rdma_cnq_params *cur_pbl;
786 struct qed_rdma_events events;
787 dma_addr_t p_phys_table;
788 u32 page_cnt;
789 int rc = 0;
790 int i;
791
792 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
793 if (!in_params) {
794 rc = -ENOMEM;
795 goto out;
796 }
797
798 in_params->desired_cnq = dev->num_cnq;
799 for (i = 0; i < dev->num_cnq; i++) {
800 cur_pbl = &in_params->cnq_pbl_list[i];
801
802 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
803 cur_pbl->num_pbl_pages = page_cnt;
804
805 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
806 cur_pbl->pbl_ptr = (u64)p_phys_table;
807 }
808
809 events.affiliated_event = qedr_affiliated_event;
810 events.unaffiliated_event = qedr_unaffiliated_event;
811 events.context = dev;
812
813 in_params->events = &events;
814 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
815 in_params->max_mtu = dev->ndev->mtu;
816 dev->iwarp_max_mtu = dev->ndev->mtu;
817 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
818
819 rc = dev->ops->rdma_init(dev->cdev, in_params);
820 if (rc)
821 goto out;
822
823 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
824 if (rc)
825 goto out;
826
827 dev->db_addr = out_params.dpi_addr;
828 dev->db_phys_addr = out_params.dpi_phys_addr;
829 dev->db_size = out_params.dpi_size;
830 dev->dpi = out_params.dpi;
831
832 rc = qedr_set_device_attr(dev);
833out:
834 kfree(in_params);
835 if (rc)
836 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
837
838 return rc;
839}
840
841static void qedr_stop_hw(struct qedr_dev *dev)
842{
843 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
844 dev->ops->rdma_stop(dev->rdma_ctx);
845}
846
847static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
848 struct net_device *ndev)
849{
850 struct qed_dev_rdma_info dev_info;
851 struct qedr_dev *dev;
852 int rc = 0;
853
854 dev = ib_alloc_device(qedr_dev, ibdev);
855 if (!dev) {
856 pr_err("Unable to allocate ib device\n");
857 return NULL;
858 }
859
860 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
861
862 dev->pdev = pdev;
863 dev->ndev = ndev;
864 dev->cdev = cdev;
865
866 qed_ops = qed_get_rdma_ops();
867 if (!qed_ops) {
868 DP_ERR(dev, "Failed to get qed roce operations\n");
869 goto init_err;
870 }
871
872 dev->ops = qed_ops;
873 rc = qed_ops->fill_dev_info(cdev, &dev_info);
874 if (rc)
875 goto init_err;
876
877 dev->user_dpm_enabled = dev_info.user_dpm_enabled;
878 dev->rdma_type = dev_info.rdma_type;
879 dev->num_hwfns = dev_info.common.num_hwfns;
880
881 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
882 rc = dev->ops->iwarp_set_engine_affin(cdev, false);
883 if (rc) {
884 DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
885 goto init_err;
886 }
887 }
888 dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
889
890 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
891
892 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
893 if (!dev->num_cnq) {
894 DP_ERR(dev, "Failed. At least one CNQ is required.\n");
895 rc = -ENOMEM;
896 goto init_err;
897 }
898
899 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
900
901 qedr_pci_set_atomic(dev, pdev);
902
903 rc = qedr_alloc_resources(dev);
904 if (rc)
905 goto init_err;
906
907 rc = qedr_init_hw(dev);
908 if (rc)
909 goto alloc_err;
910
911 rc = qedr_setup_irqs(dev);
912 if (rc)
913 goto irq_err;
914
915 rc = qedr_register_device(dev);
916 if (rc) {
917 DP_ERR(dev, "Unable to allocate register device\n");
918 goto reg_err;
919 }
920
921 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
922 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
923
924 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
925 return dev;
926
927reg_err:
928 qedr_sync_free_irqs(dev);
929irq_err:
930 qedr_stop_hw(dev);
931alloc_err:
932 qedr_free_resources(dev);
933init_err:
934 ib_dealloc_device(&dev->ibdev);
935 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
936
937 return NULL;
938}
939
940static void qedr_remove(struct qedr_dev *dev)
941{
942 /* First unregister with stack to stop all the active traffic
943 * of the registered clients.
944 */
945 ib_unregister_device(&dev->ibdev);
946
947 qedr_stop_hw(dev);
948 qedr_sync_free_irqs(dev);
949 qedr_free_resources(dev);
950
951 if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
952 dev->ops->iwarp_set_engine_affin(dev->cdev, true);
953
954 ib_dealloc_device(&dev->ibdev);
955}
956
957static void qedr_close(struct qedr_dev *dev)
958{
959 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
960 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
961}
962
963static void qedr_shutdown(struct qedr_dev *dev)
964{
965 qedr_close(dev);
966 qedr_remove(dev);
967}
968
969static void qedr_open(struct qedr_dev *dev)
970{
971 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
972 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
973}
974
975static void qedr_mac_address_change(struct qedr_dev *dev)
976{
977 union ib_gid *sgid = &dev->sgid_tbl[0];
978 u8 guid[8], mac_addr[6];
979 int rc;
980
981 /* Update SGID */
982 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
983 guid[0] = mac_addr[0] ^ 2;
984 guid[1] = mac_addr[1];
985 guid[2] = mac_addr[2];
986 guid[3] = 0xff;
987 guid[4] = 0xfe;
988 guid[5] = mac_addr[3];
989 guid[6] = mac_addr[4];
990 guid[7] = mac_addr[5];
991 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
992 memcpy(&sgid->raw[8], guid, sizeof(guid));
993
994 /* Update LL2 */
995 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
996 dev->gsi_ll2_mac_address,
997 dev->ndev->dev_addr);
998
999 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
1000
1001 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
1002
1003 if (rc)
1004 DP_ERR(dev, "Error updating mac filter\n");
1005}
1006
1007/* event handling via NIC driver ensures that all the NIC specific
1008 * initialization done before RoCE driver notifies
1009 * event to stack.
1010 */
1011static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1012{
1013 switch (event) {
1014 case QEDE_UP:
1015 qedr_open(dev);
1016 break;
1017 case QEDE_DOWN:
1018 qedr_close(dev);
1019 break;
1020 case QEDE_CLOSE:
1021 qedr_shutdown(dev);
1022 break;
1023 case QEDE_CHANGE_ADDR:
1024 qedr_mac_address_change(dev);
1025 break;
1026 case QEDE_CHANGE_MTU:
1027 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1028 if (dev->ndev->mtu != dev->iwarp_max_mtu)
1029 DP_NOTICE(dev,
1030 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1031 dev->iwarp_max_mtu, dev->ndev->mtu);
1032 break;
1033 default:
1034 pr_err("Event not supported\n");
1035 }
1036}
1037
1038static struct qedr_driver qedr_drv = {
1039 .name = "qedr_driver",
1040 .add = qedr_add,
1041 .remove = qedr_remove,
1042 .notify = qedr_notify,
1043};
1044
1045static int __init qedr_init_module(void)
1046{
1047 return qede_rdma_register_driver(&qedr_drv);
1048}
1049
1050static void __exit qedr_exit_module(void)
1051{
1052 qede_rdma_unregister_driver(&qedr_drv);
1053}
1054
1055module_init(qedr_init_module);
1056module_exit(qedr_exit_module);
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_addr.h>
35#include <rdma/ib_user_verbs.h>
36#include <linux/netdevice.h>
37#include <linux/iommu.h>
38#include <net/addrconf.h>
39#include <linux/qed/qede_roce.h>
40#include <linux/qed/qed_chain.h>
41#include <linux/qed/qed_if.h>
42#include "qedr.h"
43#include "verbs.h"
44#include <rdma/qedr-abi.h>
45
46MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_LICENSE("Dual BSD/GPL");
49MODULE_VERSION(QEDR_MODULE_VERSION);
50
51#define QEDR_WQ_MULTIPLIER_DFT (3)
52
53void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
54 enum ib_event_type type)
55{
56 struct ib_event ibev;
57
58 ibev.device = &dev->ibdev;
59 ibev.element.port_num = port_num;
60 ibev.event = type;
61
62 ib_dispatch_event(&ibev);
63}
64
65static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
66 u8 port_num)
67{
68 return IB_LINK_LAYER_ETHERNET;
69}
70
71static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
72 size_t str_len)
73{
74 struct qedr_dev *qedr = get_qedr_dev(ibdev);
75 u32 fw_ver = (u32)qedr->attr.fw_ver;
76
77 snprintf(str, str_len, "%d. %d. %d. %d",
78 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
79 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
80}
81
82static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
83{
84 struct qedr_dev *qdev;
85
86 qdev = get_qedr_dev(dev);
87 dev_hold(qdev->ndev);
88
89 /* The HW vendor's device driver must guarantee
90 * that this function returns NULL before the net device reaches
91 * NETDEV_UNREGISTER_FINAL state.
92 */
93 return qdev->ndev;
94}
95
96static int qedr_register_device(struct qedr_dev *dev)
97{
98 strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
99
100 dev->ibdev.node_guid = dev->attr.node_guid;
101 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
102 dev->ibdev.owner = THIS_MODULE;
103 dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
104
105 dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
106 QEDR_UVERBS(QUERY_DEVICE) |
107 QEDR_UVERBS(QUERY_PORT) |
108 QEDR_UVERBS(ALLOC_PD) |
109 QEDR_UVERBS(DEALLOC_PD) |
110 QEDR_UVERBS(CREATE_COMP_CHANNEL) |
111 QEDR_UVERBS(CREATE_CQ) |
112 QEDR_UVERBS(RESIZE_CQ) |
113 QEDR_UVERBS(DESTROY_CQ) |
114 QEDR_UVERBS(REQ_NOTIFY_CQ) |
115 QEDR_UVERBS(CREATE_QP) |
116 QEDR_UVERBS(MODIFY_QP) |
117 QEDR_UVERBS(QUERY_QP) |
118 QEDR_UVERBS(DESTROY_QP) |
119 QEDR_UVERBS(REG_MR) |
120 QEDR_UVERBS(DEREG_MR) |
121 QEDR_UVERBS(POLL_CQ) |
122 QEDR_UVERBS(POST_SEND) |
123 QEDR_UVERBS(POST_RECV);
124
125 dev->ibdev.phys_port_cnt = 1;
126 dev->ibdev.num_comp_vectors = dev->num_cnq;
127 dev->ibdev.node_type = RDMA_NODE_IB_CA;
128
129 dev->ibdev.query_device = qedr_query_device;
130 dev->ibdev.query_port = qedr_query_port;
131 dev->ibdev.modify_port = qedr_modify_port;
132
133 dev->ibdev.query_gid = qedr_query_gid;
134 dev->ibdev.add_gid = qedr_add_gid;
135 dev->ibdev.del_gid = qedr_del_gid;
136
137 dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
138 dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
139 dev->ibdev.mmap = qedr_mmap;
140
141 dev->ibdev.alloc_pd = qedr_alloc_pd;
142 dev->ibdev.dealloc_pd = qedr_dealloc_pd;
143
144 dev->ibdev.create_cq = qedr_create_cq;
145 dev->ibdev.destroy_cq = qedr_destroy_cq;
146 dev->ibdev.resize_cq = qedr_resize_cq;
147 dev->ibdev.req_notify_cq = qedr_arm_cq;
148
149 dev->ibdev.create_qp = qedr_create_qp;
150 dev->ibdev.modify_qp = qedr_modify_qp;
151 dev->ibdev.query_qp = qedr_query_qp;
152 dev->ibdev.destroy_qp = qedr_destroy_qp;
153
154 dev->ibdev.query_pkey = qedr_query_pkey;
155
156 dev->ibdev.create_ah = qedr_create_ah;
157 dev->ibdev.destroy_ah = qedr_destroy_ah;
158
159 dev->ibdev.get_dma_mr = qedr_get_dma_mr;
160 dev->ibdev.dereg_mr = qedr_dereg_mr;
161 dev->ibdev.reg_user_mr = qedr_reg_user_mr;
162 dev->ibdev.alloc_mr = qedr_alloc_mr;
163 dev->ibdev.map_mr_sg = qedr_map_mr_sg;
164
165 dev->ibdev.poll_cq = qedr_poll_cq;
166 dev->ibdev.post_send = qedr_post_send;
167 dev->ibdev.post_recv = qedr_post_recv;
168
169 dev->ibdev.process_mad = qedr_process_mad;
170 dev->ibdev.get_port_immutable = qedr_port_immutable;
171 dev->ibdev.get_netdev = qedr_get_netdev;
172
173 dev->ibdev.dma_device = &dev->pdev->dev;
174
175 dev->ibdev.get_link_layer = qedr_link_layer;
176 dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
177
178 return ib_register_device(&dev->ibdev, NULL);
179}
180
181/* This function allocates fast-path status block memory */
182static int qedr_alloc_mem_sb(struct qedr_dev *dev,
183 struct qed_sb_info *sb_info, u16 sb_id)
184{
185 struct status_block *sb_virt;
186 dma_addr_t sb_phys;
187 int rc;
188
189 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
190 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
191 if (!sb_virt)
192 return -ENOMEM;
193
194 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
195 sb_virt, sb_phys, sb_id,
196 QED_SB_TYPE_CNQ);
197 if (rc) {
198 pr_err("Status block initialization failed\n");
199 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
200 sb_virt, sb_phys);
201 return rc;
202 }
203
204 return 0;
205}
206
207static void qedr_free_mem_sb(struct qedr_dev *dev,
208 struct qed_sb_info *sb_info, int sb_id)
209{
210 if (sb_info->sb_virt) {
211 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
212 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
213 (void *)sb_info->sb_virt, sb_info->sb_phys);
214 }
215}
216
217static void qedr_free_resources(struct qedr_dev *dev)
218{
219 int i;
220
221 for (i = 0; i < dev->num_cnq; i++) {
222 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
223 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
224 }
225
226 kfree(dev->cnq_array);
227 kfree(dev->sb_array);
228 kfree(dev->sgid_tbl);
229}
230
231static int qedr_alloc_resources(struct qedr_dev *dev)
232{
233 struct qedr_cnq *cnq;
234 __le16 *cons_pi;
235 u16 n_entries;
236 int i, rc;
237
238 dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
239 QEDR_MAX_SGID, GFP_KERNEL);
240 if (!dev->sgid_tbl)
241 return -ENOMEM;
242
243 spin_lock_init(&dev->sgid_lock);
244
245 /* Allocate Status blocks for CNQ */
246 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
247 GFP_KERNEL);
248 if (!dev->sb_array) {
249 rc = -ENOMEM;
250 goto err1;
251 }
252
253 dev->cnq_array = kcalloc(dev->num_cnq,
254 sizeof(*dev->cnq_array), GFP_KERNEL);
255 if (!dev->cnq_array) {
256 rc = -ENOMEM;
257 goto err2;
258 }
259
260 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
261
262 /* Allocate CNQ PBLs */
263 n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
264 for (i = 0; i < dev->num_cnq; i++) {
265 cnq = &dev->cnq_array[i];
266
267 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
268 dev->sb_start + i);
269 if (rc)
270 goto err3;
271
272 rc = dev->ops->common->chain_alloc(dev->cdev,
273 QED_CHAIN_USE_TO_CONSUME,
274 QED_CHAIN_MODE_PBL,
275 QED_CHAIN_CNT_TYPE_U16,
276 n_entries,
277 sizeof(struct regpair *),
278 &cnq->pbl);
279 if (rc)
280 goto err4;
281
282 cnq->dev = dev;
283 cnq->sb = &dev->sb_array[i];
284 cons_pi = dev->sb_array[i].sb_virt->pi_array;
285 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
286 cnq->index = i;
287 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
288
289 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
290 i, qed_chain_get_cons_idx(&cnq->pbl));
291 }
292
293 return 0;
294err4:
295 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
296err3:
297 for (--i; i >= 0; i--) {
298 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
299 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
300 }
301 kfree(dev->cnq_array);
302err2:
303 kfree(dev->sb_array);
304err1:
305 kfree(dev->sgid_tbl);
306 return rc;
307}
308
309/* QEDR sysfs interface */
310static ssize_t show_rev(struct device *device, struct device_attribute *attr,
311 char *buf)
312{
313 struct qedr_dev *dev = dev_get_drvdata(device);
314
315 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
316}
317
318static ssize_t show_hca_type(struct device *device,
319 struct device_attribute *attr, char *buf)
320{
321 return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
322}
323
324static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
325static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
326
327static struct device_attribute *qedr_attributes[] = {
328 &dev_attr_hw_rev,
329 &dev_attr_hca_type
330};
331
332static void qedr_remove_sysfiles(struct qedr_dev *dev)
333{
334 int i;
335
336 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
337 device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
338}
339
340static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
341{
342 struct pci_dev *bridge;
343 u32 val;
344
345 dev->atomic_cap = IB_ATOMIC_NONE;
346
347 bridge = pdev->bus->self;
348 if (!bridge)
349 return;
350
351 /* Check whether we are connected directly or via a switch */
352 while (bridge && bridge->bus->parent) {
353 DP_DEBUG(dev, QEDR_MSG_INIT,
354 "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
355 bridge->bus->number, bridge->bus->primary);
356 /* Need to check Atomic Op Routing Supported all the way to
357 * root complex.
358 */
359 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
360 if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
361 pcie_capability_clear_word(pdev,
362 PCI_EXP_DEVCTL2,
363 PCI_EXP_DEVCTL2_ATOMIC_REQ);
364 return;
365 }
366 bridge = bridge->bus->parent->self;
367 }
368 bridge = pdev->bus->self;
369
370 /* according to bridge capability */
371 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
372 if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
373 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
374 PCI_EXP_DEVCTL2_ATOMIC_REQ);
375 dev->atomic_cap = IB_ATOMIC_GLOB;
376 } else {
377 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
378 PCI_EXP_DEVCTL2_ATOMIC_REQ);
379 }
380}
381
382static const struct qed_rdma_ops *qed_ops;
383
384#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
385
386static irqreturn_t qedr_irq_handler(int irq, void *handle)
387{
388 u16 hw_comp_cons, sw_comp_cons;
389 struct qedr_cnq *cnq = handle;
390 struct regpair *cq_handle;
391 struct qedr_cq *cq;
392
393 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
394
395 qed_sb_update_sb_idx(cnq->sb);
396
397 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
398 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
399
400 /* Align protocol-index and chain reads */
401 rmb();
402
403 while (sw_comp_cons != hw_comp_cons) {
404 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
405 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
406 cq_handle->lo);
407
408 if (cq == NULL) {
409 DP_ERR(cnq->dev,
410 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
411 cq_handle->hi, cq_handle->lo, sw_comp_cons,
412 hw_comp_cons);
413
414 break;
415 }
416
417 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
418 DP_ERR(cnq->dev,
419 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
420 cq_handle->hi, cq_handle->lo, cq);
421 break;
422 }
423
424 cq->arm_flags = 0;
425
426 if (cq->ibcq.comp_handler)
427 (*cq->ibcq.comp_handler)
428 (&cq->ibcq, cq->ibcq.cq_context);
429
430 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
431
432 cnq->n_comp++;
433
434 }
435
436 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
437 sw_comp_cons);
438
439 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
440
441 return IRQ_HANDLED;
442}
443
444static void qedr_sync_free_irqs(struct qedr_dev *dev)
445{
446 u32 vector;
447 int i;
448
449 for (i = 0; i < dev->int_info.used_cnt; i++) {
450 if (dev->int_info.msix_cnt) {
451 vector = dev->int_info.msix[i * dev->num_hwfns].vector;
452 synchronize_irq(vector);
453 free_irq(vector, &dev->cnq_array[i]);
454 }
455 }
456
457 dev->int_info.used_cnt = 0;
458}
459
460static int qedr_req_msix_irqs(struct qedr_dev *dev)
461{
462 int i, rc = 0;
463
464 if (dev->num_cnq > dev->int_info.msix_cnt) {
465 DP_ERR(dev,
466 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
467 dev->num_cnq, dev->int_info.msix_cnt);
468 return -EINVAL;
469 }
470
471 for (i = 0; i < dev->num_cnq; i++) {
472 rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
473 qedr_irq_handler, 0, dev->cnq_array[i].name,
474 &dev->cnq_array[i]);
475 if (rc) {
476 DP_ERR(dev, "Request cnq %d irq failed\n", i);
477 qedr_sync_free_irqs(dev);
478 } else {
479 DP_DEBUG(dev, QEDR_MSG_INIT,
480 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
481 dev->cnq_array[i].name, i,
482 &dev->cnq_array[i]);
483 dev->int_info.used_cnt++;
484 }
485 }
486
487 return rc;
488}
489
490static int qedr_setup_irqs(struct qedr_dev *dev)
491{
492 int rc;
493
494 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
495
496 /* Learn Interrupt configuration */
497 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
498 if (rc < 0)
499 return rc;
500
501 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
502 if (rc) {
503 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
504 return rc;
505 }
506
507 if (dev->int_info.msix_cnt) {
508 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
509 dev->int_info.msix_cnt);
510 rc = qedr_req_msix_irqs(dev);
511 if (rc)
512 return rc;
513 }
514
515 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
516
517 return 0;
518}
519
520static int qedr_set_device_attr(struct qedr_dev *dev)
521{
522 struct qed_rdma_device *qed_attr;
523 struct qedr_device_attr *attr;
524 u32 page_size;
525
526 /* Part 1 - query core capabilities */
527 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
528
529 /* Part 2 - check capabilities */
530 page_size = ~dev->attr.page_size_caps + 1;
531 if (page_size > PAGE_SIZE) {
532 DP_ERR(dev,
533 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
534 PAGE_SIZE, page_size);
535 return -ENODEV;
536 }
537
538 /* Part 3 - copy and update capabilities */
539 attr = &dev->attr;
540 attr->vendor_id = qed_attr->vendor_id;
541 attr->vendor_part_id = qed_attr->vendor_part_id;
542 attr->hw_ver = qed_attr->hw_ver;
543 attr->fw_ver = qed_attr->fw_ver;
544 attr->node_guid = qed_attr->node_guid;
545 attr->sys_image_guid = qed_attr->sys_image_guid;
546 attr->max_cnq = qed_attr->max_cnq;
547 attr->max_sge = qed_attr->max_sge;
548 attr->max_inline = qed_attr->max_inline;
549 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
550 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
551 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
552 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
553 attr->max_dev_resp_rd_atomic_resc =
554 qed_attr->max_dev_resp_rd_atomic_resc;
555 attr->max_cq = qed_attr->max_cq;
556 attr->max_qp = qed_attr->max_qp;
557 attr->max_mr = qed_attr->max_mr;
558 attr->max_mr_size = qed_attr->max_mr_size;
559 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
560 attr->max_mw = qed_attr->max_mw;
561 attr->max_fmr = qed_attr->max_fmr;
562 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
563 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
564 attr->max_pd = qed_attr->max_pd;
565 attr->max_ah = qed_attr->max_ah;
566 attr->max_pkey = qed_attr->max_pkey;
567 attr->max_srq = qed_attr->max_srq;
568 attr->max_srq_wr = qed_attr->max_srq_wr;
569 attr->dev_caps = qed_attr->dev_caps;
570 attr->page_size_caps = qed_attr->page_size_caps;
571 attr->dev_ack_delay = qed_attr->dev_ack_delay;
572 attr->reserved_lkey = qed_attr->reserved_lkey;
573 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
574 attr->max_stats_queues = qed_attr->max_stats_queues;
575
576 return 0;
577}
578
579void qedr_unaffiliated_event(void *context, u8 event_code)
580{
581 pr_err("unaffiliated event not implemented yet\n");
582}
583
584void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
585{
586#define EVENT_TYPE_NOT_DEFINED 0
587#define EVENT_TYPE_CQ 1
588#define EVENT_TYPE_QP 2
589 struct qedr_dev *dev = (struct qedr_dev *)context;
590 union event_ring_data *data = fw_handle;
591 u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
592 data->roce_handle.lo;
593 u8 event_type = EVENT_TYPE_NOT_DEFINED;
594 struct ib_event event;
595 struct ib_cq *ibcq;
596 struct ib_qp *ibqp;
597 struct qedr_cq *cq;
598 struct qedr_qp *qp;
599
600 switch (e_code) {
601 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
602 event.event = IB_EVENT_CQ_ERR;
603 event_type = EVENT_TYPE_CQ;
604 break;
605 case ROCE_ASYNC_EVENT_SQ_DRAINED:
606 event.event = IB_EVENT_SQ_DRAINED;
607 event_type = EVENT_TYPE_QP;
608 break;
609 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
610 event.event = IB_EVENT_QP_FATAL;
611 event_type = EVENT_TYPE_QP;
612 break;
613 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
614 event.event = IB_EVENT_QP_REQ_ERR;
615 event_type = EVENT_TYPE_QP;
616 break;
617 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
618 event.event = IB_EVENT_QP_ACCESS_ERR;
619 event_type = EVENT_TYPE_QP;
620 break;
621 default:
622 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
623 roce_handle64);
624 }
625
626 switch (event_type) {
627 case EVENT_TYPE_CQ:
628 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
629 if (cq) {
630 ibcq = &cq->ibcq;
631 if (ibcq->event_handler) {
632 event.device = ibcq->device;
633 event.element.cq = ibcq;
634 ibcq->event_handler(&event, ibcq->cq_context);
635 }
636 } else {
637 WARN(1,
638 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
639 roce_handle64);
640 }
641 DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
642 break;
643 case EVENT_TYPE_QP:
644 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
645 if (qp) {
646 ibqp = &qp->ibqp;
647 if (ibqp->event_handler) {
648 event.device = ibqp->device;
649 event.element.qp = ibqp;
650 ibqp->event_handler(&event, ibqp->qp_context);
651 }
652 } else {
653 WARN(1,
654 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
655 roce_handle64);
656 }
657 DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
658 break;
659 default:
660 break;
661 }
662}
663
664static int qedr_init_hw(struct qedr_dev *dev)
665{
666 struct qed_rdma_add_user_out_params out_params;
667 struct qed_rdma_start_in_params *in_params;
668 struct qed_rdma_cnq_params *cur_pbl;
669 struct qed_rdma_events events;
670 dma_addr_t p_phys_table;
671 u32 page_cnt;
672 int rc = 0;
673 int i;
674
675 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
676 if (!in_params) {
677 rc = -ENOMEM;
678 goto out;
679 }
680
681 in_params->desired_cnq = dev->num_cnq;
682 for (i = 0; i < dev->num_cnq; i++) {
683 cur_pbl = &in_params->cnq_pbl_list[i];
684
685 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
686 cur_pbl->num_pbl_pages = page_cnt;
687
688 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
689 cur_pbl->pbl_ptr = (u64)p_phys_table;
690 }
691
692 events.affiliated_event = qedr_affiliated_event;
693 events.unaffiliated_event = qedr_unaffiliated_event;
694 events.context = dev;
695
696 in_params->events = &events;
697 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
698 in_params->max_mtu = dev->ndev->mtu;
699 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
700
701 rc = dev->ops->rdma_init(dev->cdev, in_params);
702 if (rc)
703 goto out;
704
705 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
706 if (rc)
707 goto out;
708
709 dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
710 dev->db_phys_addr = out_params.dpi_phys_addr;
711 dev->db_size = out_params.dpi_size;
712 dev->dpi = out_params.dpi;
713
714 rc = qedr_set_device_attr(dev);
715out:
716 kfree(in_params);
717 if (rc)
718 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
719
720 return rc;
721}
722
723void qedr_stop_hw(struct qedr_dev *dev)
724{
725 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
726 dev->ops->rdma_stop(dev->rdma_ctx);
727}
728
729static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
730 struct net_device *ndev)
731{
732 struct qed_dev_rdma_info dev_info;
733 struct qedr_dev *dev;
734 int rc = 0, i;
735
736 dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
737 if (!dev) {
738 pr_err("Unable to allocate ib device\n");
739 return NULL;
740 }
741
742 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
743
744 dev->pdev = pdev;
745 dev->ndev = ndev;
746 dev->cdev = cdev;
747
748 qed_ops = qed_get_rdma_ops();
749 if (!qed_ops) {
750 DP_ERR(dev, "Failed to get qed roce operations\n");
751 goto init_err;
752 }
753
754 dev->ops = qed_ops;
755 rc = qed_ops->fill_dev_info(cdev, &dev_info);
756 if (rc)
757 goto init_err;
758
759 dev->num_hwfns = dev_info.common.num_hwfns;
760 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
761
762 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
763 if (!dev->num_cnq) {
764 DP_ERR(dev, "not enough CNQ resources.\n");
765 goto init_err;
766 }
767
768 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
769
770 qedr_pci_set_atomic(dev, pdev);
771
772 rc = qedr_alloc_resources(dev);
773 if (rc)
774 goto init_err;
775
776 rc = qedr_init_hw(dev);
777 if (rc)
778 goto alloc_err;
779
780 rc = qedr_setup_irqs(dev);
781 if (rc)
782 goto irq_err;
783
784 rc = qedr_register_device(dev);
785 if (rc) {
786 DP_ERR(dev, "Unable to allocate register device\n");
787 goto reg_err;
788 }
789
790 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
792 goto sysfs_err;
793
794 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
795 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
796
797 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
798 return dev;
799
800sysfs_err:
801 ib_unregister_device(&dev->ibdev);
802reg_err:
803 qedr_sync_free_irqs(dev);
804irq_err:
805 qedr_stop_hw(dev);
806alloc_err:
807 qedr_free_resources(dev);
808init_err:
809 ib_dealloc_device(&dev->ibdev);
810 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
811
812 return NULL;
813}
814
815static void qedr_remove(struct qedr_dev *dev)
816{
817 /* First unregister with stack to stop all the active traffic
818 * of the registered clients.
819 */
820 qedr_remove_sysfiles(dev);
821 ib_unregister_device(&dev->ibdev);
822
823 qedr_stop_hw(dev);
824 qedr_sync_free_irqs(dev);
825 qedr_free_resources(dev);
826 ib_dealloc_device(&dev->ibdev);
827}
828
829static void qedr_close(struct qedr_dev *dev)
830{
831 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
832 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
833}
834
835static void qedr_shutdown(struct qedr_dev *dev)
836{
837 qedr_close(dev);
838 qedr_remove(dev);
839}
840
841static void qedr_open(struct qedr_dev *dev)
842{
843 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
844 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
845}
846
847static void qedr_mac_address_change(struct qedr_dev *dev)
848{
849 union ib_gid *sgid = &dev->sgid_tbl[0];
850 u8 guid[8], mac_addr[6];
851 int rc;
852
853 /* Update SGID */
854 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
855 guid[0] = mac_addr[0] ^ 2;
856 guid[1] = mac_addr[1];
857 guid[2] = mac_addr[2];
858 guid[3] = 0xff;
859 guid[4] = 0xfe;
860 guid[5] = mac_addr[3];
861 guid[6] = mac_addr[4];
862 guid[7] = mac_addr[5];
863 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
864 memcpy(&sgid->raw[8], guid, sizeof(guid));
865
866 /* Update LL2 */
867 rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
868 dev->gsi_ll2_mac_address,
869 dev->ndev->dev_addr);
870
871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
872
873 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
874
875 if (rc)
876 DP_ERR(dev, "Error updating mac filter\n");
877}
878
879/* event handling via NIC driver ensures that all the NIC specific
880 * initialization done before RoCE driver notifies
881 * event to stack.
882 */
883static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
884{
885 switch (event) {
886 case QEDE_UP:
887 qedr_open(dev);
888 break;
889 case QEDE_DOWN:
890 qedr_close(dev);
891 break;
892 case QEDE_CLOSE:
893 qedr_shutdown(dev);
894 break;
895 case QEDE_CHANGE_ADDR:
896 qedr_mac_address_change(dev);
897 break;
898 default:
899 pr_err("Event not supported\n");
900 }
901}
902
903static struct qedr_driver qedr_drv = {
904 .name = "qedr_driver",
905 .add = qedr_add,
906 .remove = qedr_remove,
907 .notify = qedr_notify,
908};
909
910static int __init qedr_init_module(void)
911{
912 return qede_roce_register_driver(&qedr_drv);
913}
914
915static void __exit qedr_exit_module(void)
916{
917 qede_roce_unregister_driver(&qedr_drv);
918}
919
920module_init(qedr_init_module);
921module_exit(qedr_exit_module);