Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/utsname.h>
9#include <linux/version.h>
10
11#include <rdma/ib_user_verbs.h>
12#include <rdma/uverbs_ioctl.h>
13
14#include "efa.h"
15
16#define PCI_DEV_ID_EFA0_VF 0xefa0
17#define PCI_DEV_ID_EFA1_VF 0xefa1
18#define PCI_DEV_ID_EFA2_VF 0xefa2
19
20static const struct pci_device_id efa_pci_tbl[] = {
21 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
22 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
23 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
24 { }
25};
26
27MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
28MODULE_LICENSE("Dual BSD/GPL");
29MODULE_DESCRIPTION(DEVICE_NAME);
30MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
31
32#define EFA_REG_BAR 0
33#define EFA_MEM_BAR 2
34#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
35
36#define EFA_AENQ_ENABLED_GROUPS \
37 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
38 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
39
40extern const struct uapi_definition efa_uapi_defs[];
41
42/* This handler will called for unknown event group or unimplemented handlers */
43static void unimplemented_aenq_handler(void *data,
44 struct efa_admin_aenq_entry *aenq_e)
45{
46 struct efa_dev *dev = (struct efa_dev *)data;
47
48 ibdev_err(&dev->ibdev,
49 "Unknown event was received or event with unimplemented handler\n");
50}
51
52static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
53{
54 struct efa_dev *dev = (struct efa_dev *)data;
55
56 atomic64_inc(&dev->stats.keep_alive_rcvd);
57}
58
59static struct efa_aenq_handlers aenq_handlers = {
60 .handlers = {
61 [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
62 },
63 .unimplemented_handler = unimplemented_aenq_handler
64};
65
66static void efa_release_bars(struct efa_dev *dev, int bars_mask)
67{
68 struct pci_dev *pdev = dev->pdev;
69 int release_bars;
70
71 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
72 pci_release_selected_regions(pdev, release_bars);
73}
74
75static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
76{
77 u16 cqn = eqe->u.comp_event.cqn;
78 struct efa_cq *cq;
79
80 /* Safe to load as we're in irq and removal calls synchronize_irq() */
81 cq = xa_load(&dev->cqs_xa, cqn);
82 if (unlikely(!cq)) {
83 ibdev_err_ratelimited(&dev->ibdev,
84 "Completion event on non-existent CQ[%u]",
85 cqn);
86 return;
87 }
88
89 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
90}
91
92static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
93{
94 struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
95
96 if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
97 EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
98 efa_process_comp_eqe(dev, eqe);
99 else
100 ibdev_err_ratelimited(&dev->ibdev,
101 "Unknown event type received %lu",
102 EFA_GET(&eqe->common,
103 EFA_ADMIN_EQE_EVENT_TYPE));
104}
105
106static irqreturn_t efa_intr_msix_comp(int irq, void *data)
107{
108 struct efa_eq *eq = data;
109 struct efa_com_dev *edev = eq->eeq.edev;
110
111 efa_com_eq_comp_intr_handler(edev, &eq->eeq);
112
113 return IRQ_HANDLED;
114}
115
116static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
117{
118 struct efa_dev *dev = data;
119
120 efa_com_admin_q_comp_intr_handler(&dev->edev);
121 efa_com_aenq_intr_handler(&dev->edev, data);
122
123 return IRQ_HANDLED;
124}
125
126static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
127{
128 int err;
129
130 err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
131 if (err) {
132 dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
133 irq->name, err);
134 return err;
135 }
136
137 irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
138
139 return 0;
140}
141
142static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
143 int vector)
144{
145 u32 cpu;
146
147 cpu = vector - EFA_COMP_EQS_VEC_BASE;
148 snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
149 pci_name(dev->pdev));
150 eq->irq.handler = efa_intr_msix_comp;
151 eq->irq.data = eq;
152 eq->irq.vector = vector;
153 eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
154 cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
155}
156
157static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
158{
159 irq_set_affinity_hint(irq->irqn, NULL);
160 free_irq(irq->irqn, irq->data);
161}
162
163static void efa_setup_mgmnt_irq(struct efa_dev *dev)
164{
165 u32 cpu;
166
167 snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
168 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
169 dev->admin_irq.handler = efa_intr_msix_mgmnt;
170 dev->admin_irq.data = dev;
171 dev->admin_irq.vector = dev->admin_msix_vector_idx;
172 dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
173 dev->admin_msix_vector_idx);
174 cpu = cpumask_first(cpu_online_mask);
175 cpumask_set_cpu(cpu,
176 &dev->admin_irq.affinity_hint_mask);
177 dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
178 dev->admin_irq.irqn,
179 dev->admin_irq.name);
180}
181
182static int efa_set_mgmnt_irq(struct efa_dev *dev)
183{
184 efa_setup_mgmnt_irq(dev);
185
186 return efa_request_irq(dev, &dev->admin_irq);
187}
188
189static int efa_request_doorbell_bar(struct efa_dev *dev)
190{
191 u8 db_bar_idx = dev->dev_attr.db_bar;
192 struct pci_dev *pdev = dev->pdev;
193 int bars;
194 int err;
195
196 if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
197 bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
198
199 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
200 if (err) {
201 dev_err(&dev->pdev->dev,
202 "pci_request_selected_regions for bar %d failed %d\n",
203 db_bar_idx, err);
204 return err;
205 }
206 }
207
208 dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
209 dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
210
211 return 0;
212}
213
214static void efa_release_doorbell_bar(struct efa_dev *dev)
215{
216 if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
217 efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
218}
219
220static void efa_update_hw_hints(struct efa_dev *dev,
221 struct efa_com_get_hw_hints_result *hw_hints)
222{
223 struct efa_com_dev *edev = &dev->edev;
224
225 if (hw_hints->mmio_read_timeout)
226 edev->mmio_read.mmio_read_timeout =
227 hw_hints->mmio_read_timeout * 1000;
228
229 if (hw_hints->poll_interval)
230 edev->aq.poll_interval = hw_hints->poll_interval;
231
232 if (hw_hints->admin_completion_timeout)
233 edev->aq.completion_timeout =
234 hw_hints->admin_completion_timeout;
235}
236
237static void efa_stats_init(struct efa_dev *dev)
238{
239 atomic64_t *s = (atomic64_t *)&dev->stats;
240 int i;
241
242 for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
243 atomic64_set(s, 0);
244}
245
246static void efa_set_host_info(struct efa_dev *dev)
247{
248 struct efa_admin_set_feature_resp resp = {};
249 struct efa_admin_set_feature_cmd cmd = {};
250 struct efa_admin_host_info *hinf;
251 u32 bufsz = sizeof(*hinf);
252 dma_addr_t hinf_dma;
253
254 if (!efa_com_check_supported_feature_id(&dev->edev,
255 EFA_ADMIN_HOST_INFO))
256 return;
257
258 /* Failures in host info set shall not disturb probe */
259 hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
260 GFP_KERNEL);
261 if (!hinf)
262 return;
263
264 strscpy(hinf->os_dist_str, utsname()->release,
265 sizeof(hinf->os_dist_str));
266 hinf->os_type = EFA_ADMIN_OS_LINUX;
267 strscpy(hinf->kernel_ver_str, utsname()->version,
268 sizeof(hinf->kernel_ver_str));
269 hinf->kernel_ver = LINUX_VERSION_CODE;
270 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
271 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
272 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
273 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
274 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
275 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
276 PCI_SLOT(dev->pdev->devfn));
277 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
278 PCI_FUNC(dev->pdev->devfn));
279 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
280 EFA_COMMON_SPEC_VERSION_MAJOR);
281 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
282 EFA_COMMON_SPEC_VERSION_MINOR);
283 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
284 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
285
286 efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
287 hinf_dma, bufsz);
288
289 dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
290}
291
292static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
293{
294 efa_com_eq_destroy(&dev->edev, &eq->eeq);
295 efa_free_irq(dev, &eq->irq);
296}
297
298static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
299{
300 int err;
301
302 efa_setup_comp_irq(dev, eq, msix_vec);
303 err = efa_request_irq(dev, &eq->irq);
304 if (err)
305 return err;
306
307 err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
308 dev->dev_attr.max_eq_depth, msix_vec);
309 if (err)
310 goto err_free_comp_irq;
311
312 return 0;
313
314err_free_comp_irq:
315 efa_free_irq(dev, &eq->irq);
316 return err;
317}
318
319static int efa_create_eqs(struct efa_dev *dev)
320{
321 unsigned int neqs = dev->dev_attr.max_eq;
322 int err;
323 int i;
324
325 neqs = min_t(unsigned int, neqs, num_online_cpus());
326 dev->neqs = neqs;
327 dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
328 if (!dev->eqs)
329 return -ENOMEM;
330
331 for (i = 0; i < neqs; i++) {
332 err = efa_create_eq(dev, &dev->eqs[i],
333 i + EFA_COMP_EQS_VEC_BASE);
334 if (err)
335 goto err_destroy_eqs;
336 }
337
338 return 0;
339
340err_destroy_eqs:
341 for (i--; i >= 0; i--)
342 efa_destroy_eq(dev, &dev->eqs[i]);
343 kfree(dev->eqs);
344
345 return err;
346}
347
348static void efa_destroy_eqs(struct efa_dev *dev)
349{
350 int i;
351
352 for (i = 0; i < dev->neqs; i++)
353 efa_destroy_eq(dev, &dev->eqs[i]);
354
355 kfree(dev->eqs);
356}
357
358static const struct ib_device_ops efa_dev_ops = {
359 .owner = THIS_MODULE,
360 .driver_id = RDMA_DRIVER_EFA,
361 .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
362
363 .alloc_hw_port_stats = efa_alloc_hw_port_stats,
364 .alloc_hw_device_stats = efa_alloc_hw_device_stats,
365 .alloc_pd = efa_alloc_pd,
366 .alloc_ucontext = efa_alloc_ucontext,
367 .create_cq = efa_create_cq,
368 .create_qp = efa_create_qp,
369 .create_user_ah = efa_create_ah,
370 .dealloc_pd = efa_dealloc_pd,
371 .dealloc_ucontext = efa_dealloc_ucontext,
372 .dereg_mr = efa_dereg_mr,
373 .destroy_ah = efa_destroy_ah,
374 .destroy_cq = efa_destroy_cq,
375 .destroy_qp = efa_destroy_qp,
376 .get_hw_stats = efa_get_hw_stats,
377 .get_link_layer = efa_port_link_layer,
378 .get_port_immutable = efa_get_port_immutable,
379 .mmap = efa_mmap,
380 .mmap_free = efa_mmap_free,
381 .modify_qp = efa_modify_qp,
382 .query_device = efa_query_device,
383 .query_gid = efa_query_gid,
384 .query_pkey = efa_query_pkey,
385 .query_port = efa_query_port,
386 .query_qp = efa_query_qp,
387 .reg_user_mr = efa_reg_mr,
388 .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
389
390 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
391 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
392 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
393 INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
394 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
395};
396
397static int efa_ib_device_add(struct efa_dev *dev)
398{
399 struct efa_com_get_hw_hints_result hw_hints;
400 struct pci_dev *pdev = dev->pdev;
401 int err;
402
403 efa_stats_init(dev);
404
405 err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
406 if (err)
407 return err;
408
409 dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
410 err = efa_request_doorbell_bar(dev);
411 if (err)
412 return err;
413
414 err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
415 if (err)
416 goto err_release_doorbell_bar;
417
418 efa_update_hw_hints(dev, &hw_hints);
419
420 /* Try to enable all the available aenq groups */
421 err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
422 if (err)
423 goto err_release_doorbell_bar;
424
425 err = efa_create_eqs(dev);
426 if (err)
427 goto err_release_doorbell_bar;
428
429 efa_set_host_info(dev);
430
431 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
432 dev->ibdev.phys_port_cnt = 1;
433 dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
434 dev->ibdev.dev.parent = &pdev->dev;
435
436 ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
437
438 dev->ibdev.driver_def = efa_uapi_defs;
439
440 err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
441 if (err)
442 goto err_destroy_eqs;
443
444 ibdev_info(&dev->ibdev, "IB device registered\n");
445
446 return 0;
447
448err_destroy_eqs:
449 efa_destroy_eqs(dev);
450err_release_doorbell_bar:
451 efa_release_doorbell_bar(dev);
452 return err;
453}
454
455static void efa_ib_device_remove(struct efa_dev *dev)
456{
457 ibdev_info(&dev->ibdev, "Unregister ib device\n");
458 ib_unregister_device(&dev->ibdev);
459 efa_destroy_eqs(dev);
460 efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
461 efa_release_doorbell_bar(dev);
462}
463
464static void efa_disable_msix(struct efa_dev *dev)
465{
466 pci_free_irq_vectors(dev->pdev);
467}
468
469static int efa_enable_msix(struct efa_dev *dev)
470{
471 int msix_vecs, irq_num;
472
473 /*
474 * Reserve the max msix vectors we might need, one vector is reserved
475 * for admin.
476 */
477 msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
478 num_online_cpus() + 1);
479 dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
480 msix_vecs);
481
482 dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
483 irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
484 msix_vecs, PCI_IRQ_MSIX);
485
486 if (irq_num < 0) {
487 dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
488 irq_num);
489 return -ENOSPC;
490 }
491
492 if (irq_num != msix_vecs) {
493 efa_disable_msix(dev);
494 dev_err(&dev->pdev->dev,
495 "Allocated %d MSI-X (out of %d requested)\n",
496 irq_num, msix_vecs);
497 return -ENOSPC;
498 }
499
500 return 0;
501}
502
503static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
504{
505 int dma_width;
506 int err;
507
508 err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
509 if (err)
510 return err;
511
512 err = efa_com_validate_version(edev);
513 if (err)
514 return err;
515
516 dma_width = efa_com_get_dma_width(edev);
517 if (dma_width < 0) {
518 err = dma_width;
519 return err;
520 }
521
522 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
523 if (err) {
524 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
525 return err;
526 }
527
528 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
529 return 0;
530}
531
532static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
533{
534 struct efa_com_dev *edev;
535 struct efa_dev *dev;
536 int bars;
537 int err;
538
539 err = pci_enable_device_mem(pdev);
540 if (err) {
541 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
542 return ERR_PTR(err);
543 }
544
545 pci_set_master(pdev);
546
547 dev = ib_alloc_device(efa_dev, ibdev);
548 if (!dev) {
549 dev_err(&pdev->dev, "Device alloc failed\n");
550 err = -ENOMEM;
551 goto err_disable_device;
552 }
553
554 pci_set_drvdata(pdev, dev);
555 edev = &dev->edev;
556 edev->efa_dev = dev;
557 edev->dmadev = &pdev->dev;
558 dev->pdev = pdev;
559 xa_init(&dev->cqs_xa);
560
561 bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
562 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
563 if (err) {
564 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
565 err);
566 goto err_ibdev_destroy;
567 }
568
569 dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
570 dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
571 dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
572 dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
573
574 edev->reg_bar = devm_ioremap(&pdev->dev,
575 dev->reg_bar_addr,
576 dev->reg_bar_len);
577 if (!edev->reg_bar) {
578 dev_err(&pdev->dev, "Failed to remap register bar\n");
579 err = -EFAULT;
580 goto err_release_bars;
581 }
582
583 err = efa_com_mmio_reg_read_init(edev);
584 if (err) {
585 dev_err(&pdev->dev, "Failed to init readless MMIO\n");
586 goto err_iounmap;
587 }
588
589 err = efa_device_init(edev, pdev);
590 if (err) {
591 dev_err(&pdev->dev, "EFA device init failed\n");
592 if (err == -ETIME)
593 err = -EPROBE_DEFER;
594 goto err_reg_read_destroy;
595 }
596
597 err = efa_enable_msix(dev);
598 if (err)
599 goto err_reg_read_destroy;
600
601 edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
602 edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
603
604 err = efa_set_mgmnt_irq(dev);
605 if (err)
606 goto err_disable_msix;
607
608 err = efa_com_admin_init(edev, &aenq_handlers);
609 if (err)
610 goto err_free_mgmnt_irq;
611
612 return dev;
613
614err_free_mgmnt_irq:
615 efa_free_irq(dev, &dev->admin_irq);
616err_disable_msix:
617 efa_disable_msix(dev);
618err_reg_read_destroy:
619 efa_com_mmio_reg_read_destroy(edev);
620err_iounmap:
621 devm_iounmap(&pdev->dev, edev->reg_bar);
622err_release_bars:
623 efa_release_bars(dev, EFA_BASE_BAR_MASK);
624err_ibdev_destroy:
625 ib_dealloc_device(&dev->ibdev);
626err_disable_device:
627 pci_disable_device(pdev);
628 return ERR_PTR(err);
629}
630
631static void efa_remove_device(struct pci_dev *pdev)
632{
633 struct efa_dev *dev = pci_get_drvdata(pdev);
634 struct efa_com_dev *edev;
635
636 edev = &dev->edev;
637 efa_com_admin_destroy(edev);
638 efa_free_irq(dev, &dev->admin_irq);
639 efa_disable_msix(dev);
640 efa_com_mmio_reg_read_destroy(edev);
641 devm_iounmap(&pdev->dev, edev->reg_bar);
642 efa_release_bars(dev, EFA_BASE_BAR_MASK);
643 xa_destroy(&dev->cqs_xa);
644 ib_dealloc_device(&dev->ibdev);
645 pci_disable_device(pdev);
646}
647
648static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
649{
650 struct efa_dev *dev;
651 int err;
652
653 dev = efa_probe_device(pdev);
654 if (IS_ERR(dev))
655 return PTR_ERR(dev);
656
657 err = efa_ib_device_add(dev);
658 if (err)
659 goto err_remove_device;
660
661 return 0;
662
663err_remove_device:
664 efa_remove_device(pdev);
665 return err;
666}
667
668static void efa_remove(struct pci_dev *pdev)
669{
670 struct efa_dev *dev = pci_get_drvdata(pdev);
671
672 efa_ib_device_remove(dev);
673 efa_remove_device(pdev);
674}
675
676static struct pci_driver efa_pci_driver = {
677 .name = DRV_MODULE_NAME,
678 .id_table = efa_pci_tbl,
679 .probe = efa_probe,
680 .remove = efa_remove,
681};
682
683module_pci_driver(efa_pci_driver);
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/utsname.h>
9#include <linux/version.h>
10
11#include <rdma/ib_user_verbs.h>
12#include <rdma/uverbs_ioctl.h>
13
14#include "efa.h"
15
16#define PCI_DEV_ID_EFA0_VF 0xefa0
17#define PCI_DEV_ID_EFA1_VF 0xefa1
18#define PCI_DEV_ID_EFA2_VF 0xefa2
19#define PCI_DEV_ID_EFA3_VF 0xefa3
20
21static const struct pci_device_id efa_pci_tbl[] = {
22 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
23 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
24 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
25 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA3_VF) },
26 { }
27};
28
29MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
30MODULE_LICENSE("Dual BSD/GPL");
31MODULE_DESCRIPTION(DEVICE_NAME);
32MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
33
34#define EFA_REG_BAR 0
35#define EFA_MEM_BAR 2
36#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
37
38#define EFA_AENQ_ENABLED_GROUPS \
39 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
40 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
41
42extern const struct uapi_definition efa_uapi_defs[];
43
44/* This handler will called for unknown event group or unimplemented handlers */
45static void unimplemented_aenq_handler(void *data,
46 struct efa_admin_aenq_entry *aenq_e)
47{
48 struct efa_dev *dev = (struct efa_dev *)data;
49
50 ibdev_err(&dev->ibdev,
51 "Unknown event was received or event with unimplemented handler\n");
52}
53
54static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
55{
56 struct efa_dev *dev = (struct efa_dev *)data;
57
58 atomic64_inc(&dev->stats.keep_alive_rcvd);
59}
60
61static struct efa_aenq_handlers aenq_handlers = {
62 .handlers = {
63 [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
64 },
65 .unimplemented_handler = unimplemented_aenq_handler
66};
67
68static void efa_release_bars(struct efa_dev *dev, int bars_mask)
69{
70 struct pci_dev *pdev = dev->pdev;
71 int release_bars;
72
73 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
74 pci_release_selected_regions(pdev, release_bars);
75}
76
77static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
78{
79 u16 cqn = eqe->u.comp_event.cqn;
80 struct efa_cq *cq;
81
82 /* Safe to load as we're in irq and removal calls synchronize_irq() */
83 cq = xa_load(&dev->cqs_xa, cqn);
84 if (unlikely(!cq)) {
85 ibdev_err_ratelimited(&dev->ibdev,
86 "Completion event on non-existent CQ[%u]",
87 cqn);
88 return;
89 }
90
91 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
92}
93
94static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
95{
96 struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
97
98 if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
99 EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
100 efa_process_comp_eqe(dev, eqe);
101 else
102 ibdev_err_ratelimited(&dev->ibdev,
103 "Unknown event type received %lu",
104 EFA_GET(&eqe->common,
105 EFA_ADMIN_EQE_EVENT_TYPE));
106}
107
108static irqreturn_t efa_intr_msix_comp(int irq, void *data)
109{
110 struct efa_eq *eq = data;
111 struct efa_com_dev *edev = eq->eeq.edev;
112
113 efa_com_eq_comp_intr_handler(edev, &eq->eeq);
114
115 return IRQ_HANDLED;
116}
117
118static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
119{
120 struct efa_dev *dev = data;
121
122 efa_com_admin_q_comp_intr_handler(&dev->edev);
123 efa_com_aenq_intr_handler(&dev->edev, data);
124
125 return IRQ_HANDLED;
126}
127
128static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
129{
130 int err;
131
132 err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
133 if (err) {
134 dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
135 irq->name, err);
136 return err;
137 }
138
139 irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
140
141 return 0;
142}
143
144static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
145 int vector)
146{
147 u32 cpu;
148
149 cpu = vector - EFA_COMP_EQS_VEC_BASE;
150 snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
151 pci_name(dev->pdev));
152 eq->irq.handler = efa_intr_msix_comp;
153 eq->irq.data = eq;
154 eq->irq.vector = vector;
155 eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
156 cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
157}
158
159static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
160{
161 irq_set_affinity_hint(irq->irqn, NULL);
162 free_irq(irq->irqn, irq->data);
163}
164
165static void efa_setup_mgmnt_irq(struct efa_dev *dev)
166{
167 u32 cpu;
168
169 snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
170 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
171 dev->admin_irq.handler = efa_intr_msix_mgmnt;
172 dev->admin_irq.data = dev;
173 dev->admin_irq.vector = dev->admin_msix_vector_idx;
174 dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
175 dev->admin_msix_vector_idx);
176 cpu = cpumask_first(cpu_online_mask);
177 cpumask_set_cpu(cpu,
178 &dev->admin_irq.affinity_hint_mask);
179 dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
180 dev->admin_irq.irqn,
181 dev->admin_irq.name);
182}
183
184static int efa_set_mgmnt_irq(struct efa_dev *dev)
185{
186 efa_setup_mgmnt_irq(dev);
187
188 return efa_request_irq(dev, &dev->admin_irq);
189}
190
191static int efa_request_doorbell_bar(struct efa_dev *dev)
192{
193 u8 db_bar_idx = dev->dev_attr.db_bar;
194 struct pci_dev *pdev = dev->pdev;
195 int pci_mem_bars;
196 int db_bar;
197 int err;
198
199 db_bar = BIT(db_bar_idx);
200 if (!(db_bar & EFA_BASE_BAR_MASK)) {
201 pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM);
202 if (db_bar & ~pci_mem_bars) {
203 dev_err(&pdev->dev,
204 "Doorbells BAR unavailable. Requested %#x, available %#x\n",
205 db_bar, pci_mem_bars);
206 return -ENODEV;
207 }
208
209 err = pci_request_selected_regions(pdev, db_bar, DRV_MODULE_NAME);
210 if (err) {
211 dev_err(&pdev->dev,
212 "pci_request_selected_regions for bar %d failed %d\n",
213 db_bar_idx, err);
214 return err;
215 }
216 }
217
218 dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
219 dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
220
221 return 0;
222}
223
224static void efa_release_doorbell_bar(struct efa_dev *dev)
225{
226 if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
227 efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
228}
229
230static void efa_update_hw_hints(struct efa_dev *dev,
231 struct efa_com_get_hw_hints_result *hw_hints)
232{
233 struct efa_com_dev *edev = &dev->edev;
234
235 if (hw_hints->mmio_read_timeout)
236 edev->mmio_read.mmio_read_timeout =
237 hw_hints->mmio_read_timeout * 1000;
238
239 if (hw_hints->poll_interval)
240 edev->aq.poll_interval = hw_hints->poll_interval;
241
242 if (hw_hints->admin_completion_timeout)
243 edev->aq.completion_timeout =
244 hw_hints->admin_completion_timeout;
245}
246
247static void efa_stats_init(struct efa_dev *dev)
248{
249 atomic64_t *s = (atomic64_t *)&dev->stats;
250 int i;
251
252 for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
253 atomic64_set(s, 0);
254}
255
256static void efa_set_host_info(struct efa_dev *dev)
257{
258 struct efa_admin_set_feature_resp resp = {};
259 struct efa_admin_set_feature_cmd cmd = {};
260 struct efa_admin_host_info *hinf;
261 u32 bufsz = sizeof(*hinf);
262 dma_addr_t hinf_dma;
263
264 if (!efa_com_check_supported_feature_id(&dev->edev,
265 EFA_ADMIN_HOST_INFO))
266 return;
267
268 /* Failures in host info set shall not disturb probe */
269 hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
270 GFP_KERNEL);
271 if (!hinf)
272 return;
273
274 strscpy(hinf->os_dist_str, utsname()->release,
275 sizeof(hinf->os_dist_str));
276 hinf->os_type = EFA_ADMIN_OS_LINUX;
277 strscpy(hinf->kernel_ver_str, utsname()->version,
278 sizeof(hinf->kernel_ver_str));
279 hinf->kernel_ver = LINUX_VERSION_CODE;
280 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
281 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
282 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
283 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
284 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
285 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
286 PCI_SLOT(dev->pdev->devfn));
287 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
288 PCI_FUNC(dev->pdev->devfn));
289 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
290 EFA_COMMON_SPEC_VERSION_MAJOR);
291 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
292 EFA_COMMON_SPEC_VERSION_MINOR);
293 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
294 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
295
296 efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
297 hinf_dma, bufsz);
298
299 dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
300}
301
302static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
303{
304 efa_com_eq_destroy(&dev->edev, &eq->eeq);
305 efa_free_irq(dev, &eq->irq);
306}
307
308static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
309{
310 int err;
311
312 efa_setup_comp_irq(dev, eq, msix_vec);
313 err = efa_request_irq(dev, &eq->irq);
314 if (err)
315 return err;
316
317 err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
318 dev->dev_attr.max_eq_depth, msix_vec);
319 if (err)
320 goto err_free_comp_irq;
321
322 return 0;
323
324err_free_comp_irq:
325 efa_free_irq(dev, &eq->irq);
326 return err;
327}
328
329static int efa_create_eqs(struct efa_dev *dev)
330{
331 unsigned int neqs = dev->dev_attr.max_eq;
332 int err;
333 int i;
334
335 neqs = min_t(unsigned int, neqs,
336 dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
337
338 dev->neqs = neqs;
339 dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
340 if (!dev->eqs)
341 return -ENOMEM;
342
343 for (i = 0; i < neqs; i++) {
344 err = efa_create_eq(dev, &dev->eqs[i],
345 i + EFA_COMP_EQS_VEC_BASE);
346 if (err)
347 goto err_destroy_eqs;
348 }
349
350 return 0;
351
352err_destroy_eqs:
353 for (i--; i >= 0; i--)
354 efa_destroy_eq(dev, &dev->eqs[i]);
355 kfree(dev->eqs);
356
357 return err;
358}
359
360static void efa_destroy_eqs(struct efa_dev *dev)
361{
362 int i;
363
364 for (i = 0; i < dev->neqs; i++)
365 efa_destroy_eq(dev, &dev->eqs[i]);
366
367 kfree(dev->eqs);
368}
369
370static const struct ib_device_ops efa_dev_ops = {
371 .owner = THIS_MODULE,
372 .driver_id = RDMA_DRIVER_EFA,
373 .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
374
375 .alloc_hw_port_stats = efa_alloc_hw_port_stats,
376 .alloc_hw_device_stats = efa_alloc_hw_device_stats,
377 .alloc_pd = efa_alloc_pd,
378 .alloc_ucontext = efa_alloc_ucontext,
379 .create_cq = efa_create_cq,
380 .create_qp = efa_create_qp,
381 .create_user_ah = efa_create_ah,
382 .dealloc_pd = efa_dealloc_pd,
383 .dealloc_ucontext = efa_dealloc_ucontext,
384 .dereg_mr = efa_dereg_mr,
385 .destroy_ah = efa_destroy_ah,
386 .destroy_cq = efa_destroy_cq,
387 .destroy_qp = efa_destroy_qp,
388 .get_hw_stats = efa_get_hw_stats,
389 .get_link_layer = efa_port_link_layer,
390 .get_port_immutable = efa_get_port_immutable,
391 .mmap = efa_mmap,
392 .mmap_free = efa_mmap_free,
393 .modify_qp = efa_modify_qp,
394 .query_device = efa_query_device,
395 .query_gid = efa_query_gid,
396 .query_pkey = efa_query_pkey,
397 .query_port = efa_query_port,
398 .query_qp = efa_query_qp,
399 .reg_user_mr = efa_reg_mr,
400 .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
401
402 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
403 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
404 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
405 INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
406 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
407};
408
409static int efa_ib_device_add(struct efa_dev *dev)
410{
411 struct efa_com_get_hw_hints_result hw_hints;
412 struct pci_dev *pdev = dev->pdev;
413 int err;
414
415 efa_stats_init(dev);
416
417 err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
418 if (err)
419 return err;
420
421 dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
422 err = efa_request_doorbell_bar(dev);
423 if (err)
424 return err;
425
426 err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
427 if (err)
428 goto err_release_doorbell_bar;
429
430 efa_update_hw_hints(dev, &hw_hints);
431
432 /* Try to enable all the available aenq groups */
433 err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
434 if (err)
435 goto err_release_doorbell_bar;
436
437 err = efa_create_eqs(dev);
438 if (err)
439 goto err_release_doorbell_bar;
440
441 efa_set_host_info(dev);
442
443 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
444 dev->ibdev.node_guid = dev->dev_attr.guid;
445 dev->ibdev.phys_port_cnt = 1;
446 dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
447 dev->ibdev.dev.parent = &pdev->dev;
448
449 ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
450
451 dev->ibdev.driver_def = efa_uapi_defs;
452
453 err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
454 if (err)
455 goto err_destroy_eqs;
456
457 ibdev_info(&dev->ibdev, "IB device registered\n");
458
459 return 0;
460
461err_destroy_eqs:
462 efa_destroy_eqs(dev);
463err_release_doorbell_bar:
464 efa_release_doorbell_bar(dev);
465 return err;
466}
467
468static void efa_ib_device_remove(struct efa_dev *dev)
469{
470 ibdev_info(&dev->ibdev, "Unregister ib device\n");
471 ib_unregister_device(&dev->ibdev);
472 efa_destroy_eqs(dev);
473 efa_release_doorbell_bar(dev);
474}
475
476static void efa_disable_msix(struct efa_dev *dev)
477{
478 pci_free_irq_vectors(dev->pdev);
479}
480
481static int efa_enable_msix(struct efa_dev *dev)
482{
483 int max_vecs, num_vecs;
484
485 /*
486 * Reserve the max msix vectors we might need, one vector is reserved
487 * for admin.
488 */
489 max_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
490 num_online_cpus() + 1);
491 dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
492 max_vecs);
493
494 dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
495 num_vecs = pci_alloc_irq_vectors(dev->pdev, 1,
496 max_vecs, PCI_IRQ_MSIX);
497
498 if (num_vecs < 0) {
499 dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n",
500 num_vecs);
501 return -ENOSPC;
502 }
503
504 dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs);
505
506 dev->num_irq_vectors = num_vecs;
507
508 return 0;
509}
510
511static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
512{
513 int dma_width;
514 int err;
515
516 err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
517 if (err)
518 return err;
519
520 err = efa_com_validate_version(edev);
521 if (err)
522 return err;
523
524 dma_width = efa_com_get_dma_width(edev);
525 if (dma_width < 0) {
526 err = dma_width;
527 return err;
528 }
529
530 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
531 if (err) {
532 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
533 return err;
534 }
535
536 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
537 return 0;
538}
539
540static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
541{
542 struct efa_com_dev *edev;
543 struct efa_dev *dev;
544 int pci_mem_bars;
545 int err;
546
547 err = pci_enable_device_mem(pdev);
548 if (err) {
549 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
550 return ERR_PTR(err);
551 }
552
553 pci_set_master(pdev);
554
555 dev = ib_alloc_device(efa_dev, ibdev);
556 if (!dev) {
557 dev_err(&pdev->dev, "Device alloc failed\n");
558 err = -ENOMEM;
559 goto err_disable_device;
560 }
561
562 pci_set_drvdata(pdev, dev);
563 edev = &dev->edev;
564 edev->efa_dev = dev;
565 edev->dmadev = &pdev->dev;
566 dev->pdev = pdev;
567 xa_init(&dev->cqs_xa);
568
569 pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM);
570 if (EFA_BASE_BAR_MASK & ~pci_mem_bars) {
571 dev_err(&pdev->dev, "BARs unavailable. Requested %#x, available %#x\n",
572 (int)EFA_BASE_BAR_MASK, pci_mem_bars);
573 err = -ENODEV;
574 goto err_ibdev_destroy;
575 }
576 err = pci_request_selected_regions(pdev, EFA_BASE_BAR_MASK, DRV_MODULE_NAME);
577 if (err) {
578 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
579 err);
580 goto err_ibdev_destroy;
581 }
582
583 dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
584 dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
585 dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
586 dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
587
588 edev->reg_bar = devm_ioremap(&pdev->dev,
589 dev->reg_bar_addr,
590 dev->reg_bar_len);
591 if (!edev->reg_bar) {
592 dev_err(&pdev->dev, "Failed to remap register bar\n");
593 err = -EFAULT;
594 goto err_release_bars;
595 }
596
597 err = efa_com_mmio_reg_read_init(edev);
598 if (err) {
599 dev_err(&pdev->dev, "Failed to init readless MMIO\n");
600 goto err_iounmap;
601 }
602
603 err = efa_device_init(edev, pdev);
604 if (err) {
605 dev_err(&pdev->dev, "EFA device init failed\n");
606 if (err == -ETIME)
607 err = -EPROBE_DEFER;
608 goto err_reg_read_destroy;
609 }
610
611 err = efa_enable_msix(dev);
612 if (err)
613 goto err_reg_read_destroy;
614
615 edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
616 edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
617
618 err = efa_set_mgmnt_irq(dev);
619 if (err)
620 goto err_disable_msix;
621
622 err = efa_com_admin_init(edev, &aenq_handlers);
623 if (err)
624 goto err_free_mgmnt_irq;
625
626 return dev;
627
628err_free_mgmnt_irq:
629 efa_free_irq(dev, &dev->admin_irq);
630err_disable_msix:
631 efa_disable_msix(dev);
632err_reg_read_destroy:
633 efa_com_mmio_reg_read_destroy(edev);
634err_iounmap:
635 devm_iounmap(&pdev->dev, edev->reg_bar);
636err_release_bars:
637 efa_release_bars(dev, EFA_BASE_BAR_MASK);
638err_ibdev_destroy:
639 ib_dealloc_device(&dev->ibdev);
640err_disable_device:
641 pci_disable_device(pdev);
642 return ERR_PTR(err);
643}
644
645static void efa_remove_device(struct pci_dev *pdev,
646 enum efa_regs_reset_reason_types reset_reason)
647{
648 struct efa_dev *dev = pci_get_drvdata(pdev);
649 struct efa_com_dev *edev;
650
651 edev = &dev->edev;
652 efa_com_dev_reset(edev, reset_reason);
653 efa_com_admin_destroy(edev);
654 efa_free_irq(dev, &dev->admin_irq);
655 efa_disable_msix(dev);
656 efa_com_mmio_reg_read_destroy(edev);
657 devm_iounmap(&pdev->dev, edev->reg_bar);
658 efa_release_bars(dev, EFA_BASE_BAR_MASK);
659 xa_destroy(&dev->cqs_xa);
660 ib_dealloc_device(&dev->ibdev);
661 pci_disable_device(pdev);
662}
663
664static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
665{
666 struct efa_dev *dev;
667 int err;
668
669 dev = efa_probe_device(pdev);
670 if (IS_ERR(dev))
671 return PTR_ERR(dev);
672
673 err = efa_ib_device_add(dev);
674 if (err)
675 goto err_remove_device;
676
677 return 0;
678
679err_remove_device:
680 efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
681 return err;
682}
683
684static void efa_remove(struct pci_dev *pdev)
685{
686 struct efa_dev *dev = pci_get_drvdata(pdev);
687
688 efa_ib_device_remove(dev);
689 efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
690}
691
692static void efa_shutdown(struct pci_dev *pdev)
693{
694 struct efa_dev *dev = pci_get_drvdata(pdev);
695
696 efa_destroy_eqs(dev);
697 efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_SHUTDOWN);
698 efa_free_irq(dev, &dev->admin_irq);
699 efa_disable_msix(dev);
700}
701
702static struct pci_driver efa_pci_driver = {
703 .name = DRV_MODULE_NAME,
704 .id_table = efa_pci_tbl,
705 .probe = efa_probe,
706 .remove = efa_remove,
707 .shutdown = efa_shutdown,
708};
709
710module_pci_driver(efa_pci_driver);