Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/utsname.h>
9#include <linux/version.h>
10
11#include <rdma/ib_user_verbs.h>
12#include <rdma/uverbs_ioctl.h>
13
14#include "efa.h"
15
16#define PCI_DEV_ID_EFA0_VF 0xefa0
17#define PCI_DEV_ID_EFA1_VF 0xefa1
18#define PCI_DEV_ID_EFA2_VF 0xefa2
19
20static const struct pci_device_id efa_pci_tbl[] = {
21 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
22 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
23 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
24 { }
25};
26
27MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
28MODULE_LICENSE("Dual BSD/GPL");
29MODULE_DESCRIPTION(DEVICE_NAME);
30MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
31
32#define EFA_REG_BAR 0
33#define EFA_MEM_BAR 2
34#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
35
36#define EFA_AENQ_ENABLED_GROUPS \
37 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
38 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
39
40extern const struct uapi_definition efa_uapi_defs[];
41
42/* This handler will called for unknown event group or unimplemented handlers */
43static void unimplemented_aenq_handler(void *data,
44 struct efa_admin_aenq_entry *aenq_e)
45{
46 struct efa_dev *dev = (struct efa_dev *)data;
47
48 ibdev_err(&dev->ibdev,
49 "Unknown event was received or event with unimplemented handler\n");
50}
51
52static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
53{
54 struct efa_dev *dev = (struct efa_dev *)data;
55
56 atomic64_inc(&dev->stats.keep_alive_rcvd);
57}
58
59static struct efa_aenq_handlers aenq_handlers = {
60 .handlers = {
61 [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
62 },
63 .unimplemented_handler = unimplemented_aenq_handler
64};
65
66static void efa_release_bars(struct efa_dev *dev, int bars_mask)
67{
68 struct pci_dev *pdev = dev->pdev;
69 int release_bars;
70
71 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
72 pci_release_selected_regions(pdev, release_bars);
73}
74
75static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
76{
77 u16 cqn = eqe->u.comp_event.cqn;
78 struct efa_cq *cq;
79
80 /* Safe to load as we're in irq and removal calls synchronize_irq() */
81 cq = xa_load(&dev->cqs_xa, cqn);
82 if (unlikely(!cq)) {
83 ibdev_err_ratelimited(&dev->ibdev,
84 "Completion event on non-existent CQ[%u]",
85 cqn);
86 return;
87 }
88
89 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
90}
91
92static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
93{
94 struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
95
96 if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
97 EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
98 efa_process_comp_eqe(dev, eqe);
99 else
100 ibdev_err_ratelimited(&dev->ibdev,
101 "Unknown event type received %lu",
102 EFA_GET(&eqe->common,
103 EFA_ADMIN_EQE_EVENT_TYPE));
104}
105
106static irqreturn_t efa_intr_msix_comp(int irq, void *data)
107{
108 struct efa_eq *eq = data;
109 struct efa_com_dev *edev = eq->eeq.edev;
110
111 efa_com_eq_comp_intr_handler(edev, &eq->eeq);
112
113 return IRQ_HANDLED;
114}
115
116static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
117{
118 struct efa_dev *dev = data;
119
120 efa_com_admin_q_comp_intr_handler(&dev->edev);
121 efa_com_aenq_intr_handler(&dev->edev, data);
122
123 return IRQ_HANDLED;
124}
125
126static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
127{
128 int err;
129
130 err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
131 if (err) {
132 dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
133 irq->name, err);
134 return err;
135 }
136
137 irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
138
139 return 0;
140}
141
142static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
143 int vector)
144{
145 u32 cpu;
146
147 cpu = vector - EFA_COMP_EQS_VEC_BASE;
148 snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
149 pci_name(dev->pdev));
150 eq->irq.handler = efa_intr_msix_comp;
151 eq->irq.data = eq;
152 eq->irq.vector = vector;
153 eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
154 cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
155}
156
157static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
158{
159 irq_set_affinity_hint(irq->irqn, NULL);
160 free_irq(irq->irqn, irq->data);
161}
162
163static void efa_setup_mgmnt_irq(struct efa_dev *dev)
164{
165 u32 cpu;
166
167 snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
168 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
169 dev->admin_irq.handler = efa_intr_msix_mgmnt;
170 dev->admin_irq.data = dev;
171 dev->admin_irq.vector = dev->admin_msix_vector_idx;
172 dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
173 dev->admin_msix_vector_idx);
174 cpu = cpumask_first(cpu_online_mask);
175 cpumask_set_cpu(cpu,
176 &dev->admin_irq.affinity_hint_mask);
177 dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
178 dev->admin_irq.irqn,
179 dev->admin_irq.name);
180}
181
182static int efa_set_mgmnt_irq(struct efa_dev *dev)
183{
184 efa_setup_mgmnt_irq(dev);
185
186 return efa_request_irq(dev, &dev->admin_irq);
187}
188
189static int efa_request_doorbell_bar(struct efa_dev *dev)
190{
191 u8 db_bar_idx = dev->dev_attr.db_bar;
192 struct pci_dev *pdev = dev->pdev;
193 int bars;
194 int err;
195
196 if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
197 bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
198
199 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
200 if (err) {
201 dev_err(&dev->pdev->dev,
202 "pci_request_selected_regions for bar %d failed %d\n",
203 db_bar_idx, err);
204 return err;
205 }
206 }
207
208 dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
209 dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
210
211 return 0;
212}
213
214static void efa_release_doorbell_bar(struct efa_dev *dev)
215{
216 if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
217 efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
218}
219
220static void efa_update_hw_hints(struct efa_dev *dev,
221 struct efa_com_get_hw_hints_result *hw_hints)
222{
223 struct efa_com_dev *edev = &dev->edev;
224
225 if (hw_hints->mmio_read_timeout)
226 edev->mmio_read.mmio_read_timeout =
227 hw_hints->mmio_read_timeout * 1000;
228
229 if (hw_hints->poll_interval)
230 edev->aq.poll_interval = hw_hints->poll_interval;
231
232 if (hw_hints->admin_completion_timeout)
233 edev->aq.completion_timeout =
234 hw_hints->admin_completion_timeout;
235}
236
237static void efa_stats_init(struct efa_dev *dev)
238{
239 atomic64_t *s = (atomic64_t *)&dev->stats;
240 int i;
241
242 for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
243 atomic64_set(s, 0);
244}
245
246static void efa_set_host_info(struct efa_dev *dev)
247{
248 struct efa_admin_set_feature_resp resp = {};
249 struct efa_admin_set_feature_cmd cmd = {};
250 struct efa_admin_host_info *hinf;
251 u32 bufsz = sizeof(*hinf);
252 dma_addr_t hinf_dma;
253
254 if (!efa_com_check_supported_feature_id(&dev->edev,
255 EFA_ADMIN_HOST_INFO))
256 return;
257
258 /* Failures in host info set shall not disturb probe */
259 hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
260 GFP_KERNEL);
261 if (!hinf)
262 return;
263
264 strscpy(hinf->os_dist_str, utsname()->release,
265 sizeof(hinf->os_dist_str));
266 hinf->os_type = EFA_ADMIN_OS_LINUX;
267 strscpy(hinf->kernel_ver_str, utsname()->version,
268 sizeof(hinf->kernel_ver_str));
269 hinf->kernel_ver = LINUX_VERSION_CODE;
270 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
271 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
272 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
273 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
274 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
275 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
276 PCI_SLOT(dev->pdev->devfn));
277 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
278 PCI_FUNC(dev->pdev->devfn));
279 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
280 EFA_COMMON_SPEC_VERSION_MAJOR);
281 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
282 EFA_COMMON_SPEC_VERSION_MINOR);
283 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
284 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
285
286 efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
287 hinf_dma, bufsz);
288
289 dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
290}
291
292static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
293{
294 efa_com_eq_destroy(&dev->edev, &eq->eeq);
295 efa_free_irq(dev, &eq->irq);
296}
297
298static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
299{
300 int err;
301
302 efa_setup_comp_irq(dev, eq, msix_vec);
303 err = efa_request_irq(dev, &eq->irq);
304 if (err)
305 return err;
306
307 err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
308 dev->dev_attr.max_eq_depth, msix_vec);
309 if (err)
310 goto err_free_comp_irq;
311
312 return 0;
313
314err_free_comp_irq:
315 efa_free_irq(dev, &eq->irq);
316 return err;
317}
318
319static int efa_create_eqs(struct efa_dev *dev)
320{
321 unsigned int neqs = dev->dev_attr.max_eq;
322 int err;
323 int i;
324
325 neqs = min_t(unsigned int, neqs, num_online_cpus());
326 dev->neqs = neqs;
327 dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
328 if (!dev->eqs)
329 return -ENOMEM;
330
331 for (i = 0; i < neqs; i++) {
332 err = efa_create_eq(dev, &dev->eqs[i],
333 i + EFA_COMP_EQS_VEC_BASE);
334 if (err)
335 goto err_destroy_eqs;
336 }
337
338 return 0;
339
340err_destroy_eqs:
341 for (i--; i >= 0; i--)
342 efa_destroy_eq(dev, &dev->eqs[i]);
343 kfree(dev->eqs);
344
345 return err;
346}
347
348static void efa_destroy_eqs(struct efa_dev *dev)
349{
350 int i;
351
352 for (i = 0; i < dev->neqs; i++)
353 efa_destroy_eq(dev, &dev->eqs[i]);
354
355 kfree(dev->eqs);
356}
357
358static const struct ib_device_ops efa_dev_ops = {
359 .owner = THIS_MODULE,
360 .driver_id = RDMA_DRIVER_EFA,
361 .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
362
363 .alloc_hw_port_stats = efa_alloc_hw_port_stats,
364 .alloc_hw_device_stats = efa_alloc_hw_device_stats,
365 .alloc_pd = efa_alloc_pd,
366 .alloc_ucontext = efa_alloc_ucontext,
367 .create_cq = efa_create_cq,
368 .create_qp = efa_create_qp,
369 .create_user_ah = efa_create_ah,
370 .dealloc_pd = efa_dealloc_pd,
371 .dealloc_ucontext = efa_dealloc_ucontext,
372 .dereg_mr = efa_dereg_mr,
373 .destroy_ah = efa_destroy_ah,
374 .destroy_cq = efa_destroy_cq,
375 .destroy_qp = efa_destroy_qp,
376 .get_hw_stats = efa_get_hw_stats,
377 .get_link_layer = efa_port_link_layer,
378 .get_port_immutable = efa_get_port_immutable,
379 .mmap = efa_mmap,
380 .mmap_free = efa_mmap_free,
381 .modify_qp = efa_modify_qp,
382 .query_device = efa_query_device,
383 .query_gid = efa_query_gid,
384 .query_pkey = efa_query_pkey,
385 .query_port = efa_query_port,
386 .query_qp = efa_query_qp,
387 .reg_user_mr = efa_reg_mr,
388 .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
389
390 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
391 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
392 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
393 INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
394 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
395};
396
397static int efa_ib_device_add(struct efa_dev *dev)
398{
399 struct efa_com_get_hw_hints_result hw_hints;
400 struct pci_dev *pdev = dev->pdev;
401 int err;
402
403 efa_stats_init(dev);
404
405 err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
406 if (err)
407 return err;
408
409 dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
410 err = efa_request_doorbell_bar(dev);
411 if (err)
412 return err;
413
414 err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
415 if (err)
416 goto err_release_doorbell_bar;
417
418 efa_update_hw_hints(dev, &hw_hints);
419
420 /* Try to enable all the available aenq groups */
421 err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
422 if (err)
423 goto err_release_doorbell_bar;
424
425 err = efa_create_eqs(dev);
426 if (err)
427 goto err_release_doorbell_bar;
428
429 efa_set_host_info(dev);
430
431 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
432 dev->ibdev.phys_port_cnt = 1;
433 dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
434 dev->ibdev.dev.parent = &pdev->dev;
435
436 ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
437
438 dev->ibdev.driver_def = efa_uapi_defs;
439
440 err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
441 if (err)
442 goto err_destroy_eqs;
443
444 ibdev_info(&dev->ibdev, "IB device registered\n");
445
446 return 0;
447
448err_destroy_eqs:
449 efa_destroy_eqs(dev);
450err_release_doorbell_bar:
451 efa_release_doorbell_bar(dev);
452 return err;
453}
454
455static void efa_ib_device_remove(struct efa_dev *dev)
456{
457 ibdev_info(&dev->ibdev, "Unregister ib device\n");
458 ib_unregister_device(&dev->ibdev);
459 efa_destroy_eqs(dev);
460 efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
461 efa_release_doorbell_bar(dev);
462}
463
464static void efa_disable_msix(struct efa_dev *dev)
465{
466 pci_free_irq_vectors(dev->pdev);
467}
468
469static int efa_enable_msix(struct efa_dev *dev)
470{
471 int msix_vecs, irq_num;
472
473 /*
474 * Reserve the max msix vectors we might need, one vector is reserved
475 * for admin.
476 */
477 msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
478 num_online_cpus() + 1);
479 dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
480 msix_vecs);
481
482 dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
483 irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
484 msix_vecs, PCI_IRQ_MSIX);
485
486 if (irq_num < 0) {
487 dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
488 irq_num);
489 return -ENOSPC;
490 }
491
492 if (irq_num != msix_vecs) {
493 efa_disable_msix(dev);
494 dev_err(&dev->pdev->dev,
495 "Allocated %d MSI-X (out of %d requested)\n",
496 irq_num, msix_vecs);
497 return -ENOSPC;
498 }
499
500 return 0;
501}
502
503static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
504{
505 int dma_width;
506 int err;
507
508 err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
509 if (err)
510 return err;
511
512 err = efa_com_validate_version(edev);
513 if (err)
514 return err;
515
516 dma_width = efa_com_get_dma_width(edev);
517 if (dma_width < 0) {
518 err = dma_width;
519 return err;
520 }
521
522 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
523 if (err) {
524 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
525 return err;
526 }
527
528 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
529 return 0;
530}
531
532static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
533{
534 struct efa_com_dev *edev;
535 struct efa_dev *dev;
536 int bars;
537 int err;
538
539 err = pci_enable_device_mem(pdev);
540 if (err) {
541 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
542 return ERR_PTR(err);
543 }
544
545 pci_set_master(pdev);
546
547 dev = ib_alloc_device(efa_dev, ibdev);
548 if (!dev) {
549 dev_err(&pdev->dev, "Device alloc failed\n");
550 err = -ENOMEM;
551 goto err_disable_device;
552 }
553
554 pci_set_drvdata(pdev, dev);
555 edev = &dev->edev;
556 edev->efa_dev = dev;
557 edev->dmadev = &pdev->dev;
558 dev->pdev = pdev;
559 xa_init(&dev->cqs_xa);
560
561 bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
562 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
563 if (err) {
564 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
565 err);
566 goto err_ibdev_destroy;
567 }
568
569 dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
570 dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
571 dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
572 dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
573
574 edev->reg_bar = devm_ioremap(&pdev->dev,
575 dev->reg_bar_addr,
576 dev->reg_bar_len);
577 if (!edev->reg_bar) {
578 dev_err(&pdev->dev, "Failed to remap register bar\n");
579 err = -EFAULT;
580 goto err_release_bars;
581 }
582
583 err = efa_com_mmio_reg_read_init(edev);
584 if (err) {
585 dev_err(&pdev->dev, "Failed to init readless MMIO\n");
586 goto err_iounmap;
587 }
588
589 err = efa_device_init(edev, pdev);
590 if (err) {
591 dev_err(&pdev->dev, "EFA device init failed\n");
592 if (err == -ETIME)
593 err = -EPROBE_DEFER;
594 goto err_reg_read_destroy;
595 }
596
597 err = efa_enable_msix(dev);
598 if (err)
599 goto err_reg_read_destroy;
600
601 edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
602 edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
603
604 err = efa_set_mgmnt_irq(dev);
605 if (err)
606 goto err_disable_msix;
607
608 err = efa_com_admin_init(edev, &aenq_handlers);
609 if (err)
610 goto err_free_mgmnt_irq;
611
612 return dev;
613
614err_free_mgmnt_irq:
615 efa_free_irq(dev, &dev->admin_irq);
616err_disable_msix:
617 efa_disable_msix(dev);
618err_reg_read_destroy:
619 efa_com_mmio_reg_read_destroy(edev);
620err_iounmap:
621 devm_iounmap(&pdev->dev, edev->reg_bar);
622err_release_bars:
623 efa_release_bars(dev, EFA_BASE_BAR_MASK);
624err_ibdev_destroy:
625 ib_dealloc_device(&dev->ibdev);
626err_disable_device:
627 pci_disable_device(pdev);
628 return ERR_PTR(err);
629}
630
631static void efa_remove_device(struct pci_dev *pdev)
632{
633 struct efa_dev *dev = pci_get_drvdata(pdev);
634 struct efa_com_dev *edev;
635
636 edev = &dev->edev;
637 efa_com_admin_destroy(edev);
638 efa_free_irq(dev, &dev->admin_irq);
639 efa_disable_msix(dev);
640 efa_com_mmio_reg_read_destroy(edev);
641 devm_iounmap(&pdev->dev, edev->reg_bar);
642 efa_release_bars(dev, EFA_BASE_BAR_MASK);
643 xa_destroy(&dev->cqs_xa);
644 ib_dealloc_device(&dev->ibdev);
645 pci_disable_device(pdev);
646}
647
648static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
649{
650 struct efa_dev *dev;
651 int err;
652
653 dev = efa_probe_device(pdev);
654 if (IS_ERR(dev))
655 return PTR_ERR(dev);
656
657 err = efa_ib_device_add(dev);
658 if (err)
659 goto err_remove_device;
660
661 return 0;
662
663err_remove_device:
664 efa_remove_device(pdev);
665 return err;
666}
667
668static void efa_remove(struct pci_dev *pdev)
669{
670 struct efa_dev *dev = pci_get_drvdata(pdev);
671
672 efa_ib_device_remove(dev);
673 efa_remove_device(pdev);
674}
675
676static struct pci_driver efa_pci_driver = {
677 .name = DRV_MODULE_NAME,
678 .id_table = efa_pci_tbl,
679 .probe = efa_probe,
680 .remove = efa_remove,
681};
682
683module_pci_driver(efa_pci_driver);
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/utsname.h>
9#include <linux/version.h>
10
11#include <rdma/ib_user_verbs.h>
12
13#include "efa.h"
14
15#define PCI_DEV_ID_EFA0_VF 0xefa0
16#define PCI_DEV_ID_EFA1_VF 0xefa1
17
18static const struct pci_device_id efa_pci_tbl[] = {
19 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
20 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
21 { }
22};
23
24MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
25MODULE_LICENSE("Dual BSD/GPL");
26MODULE_DESCRIPTION(DEVICE_NAME);
27MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
28
29#define EFA_REG_BAR 0
30#define EFA_MEM_BAR 2
31#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
32
33#define EFA_AENQ_ENABLED_GROUPS \
34 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
35 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
36
37/* This handler will called for unknown event group or unimplemented handlers */
38static void unimplemented_aenq_handler(void *data,
39 struct efa_admin_aenq_entry *aenq_e)
40{
41 struct efa_dev *dev = (struct efa_dev *)data;
42
43 ibdev_err(&dev->ibdev,
44 "Unknown event was received or event with unimplemented handler\n");
45}
46
47static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
48{
49 struct efa_dev *dev = (struct efa_dev *)data;
50
51 atomic64_inc(&dev->stats.keep_alive_rcvd);
52}
53
54static struct efa_aenq_handlers aenq_handlers = {
55 .handlers = {
56 [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
57 },
58 .unimplemented_handler = unimplemented_aenq_handler
59};
60
61static void efa_release_bars(struct efa_dev *dev, int bars_mask)
62{
63 struct pci_dev *pdev = dev->pdev;
64 int release_bars;
65
66 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
67 pci_release_selected_regions(pdev, release_bars);
68}
69
70static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
71{
72 struct efa_dev *dev = data;
73
74 efa_com_admin_q_comp_intr_handler(&dev->edev);
75 efa_com_aenq_intr_handler(&dev->edev, data);
76
77 return IRQ_HANDLED;
78}
79
80static int efa_request_mgmnt_irq(struct efa_dev *dev)
81{
82 struct efa_irq *irq;
83 int err;
84
85 irq = &dev->admin_irq;
86 err = request_irq(irq->vector, irq->handler, 0, irq->name,
87 irq->data);
88 if (err) {
89 dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
90 err);
91 return err;
92 }
93
94 dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
95 nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector);
96 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
97
98 return 0;
99}
100
101static void efa_setup_mgmnt_irq(struct efa_dev *dev)
102{
103 u32 cpu;
104
105 snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
106 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
107 dev->admin_irq.handler = efa_intr_msix_mgmnt;
108 dev->admin_irq.data = dev;
109 dev->admin_irq.vector =
110 pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
111 cpu = cpumask_first(cpu_online_mask);
112 dev->admin_irq.cpu = cpu;
113 cpumask_set_cpu(cpu,
114 &dev->admin_irq.affinity_hint_mask);
115 dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n",
116 &dev->admin_irq,
117 dev->admin_irq.vector,
118 dev->admin_irq.name);
119}
120
121static void efa_free_mgmnt_irq(struct efa_dev *dev)
122{
123 struct efa_irq *irq;
124
125 irq = &dev->admin_irq;
126 irq_set_affinity_hint(irq->vector, NULL);
127 free_irq(irq->vector, irq->data);
128}
129
130static int efa_set_mgmnt_irq(struct efa_dev *dev)
131{
132 efa_setup_mgmnt_irq(dev);
133
134 return efa_request_mgmnt_irq(dev);
135}
136
137static int efa_request_doorbell_bar(struct efa_dev *dev)
138{
139 u8 db_bar_idx = dev->dev_attr.db_bar;
140 struct pci_dev *pdev = dev->pdev;
141 int bars;
142 int err;
143
144 if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
145 bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
146
147 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
148 if (err) {
149 dev_err(&dev->pdev->dev,
150 "pci_request_selected_regions for bar %d failed %d\n",
151 db_bar_idx, err);
152 return err;
153 }
154 }
155
156 dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
157 dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
158
159 return 0;
160}
161
162static void efa_release_doorbell_bar(struct efa_dev *dev)
163{
164 if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
165 efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
166}
167
168static void efa_update_hw_hints(struct efa_dev *dev,
169 struct efa_com_get_hw_hints_result *hw_hints)
170{
171 struct efa_com_dev *edev = &dev->edev;
172
173 if (hw_hints->mmio_read_timeout)
174 edev->mmio_read.mmio_read_timeout =
175 hw_hints->mmio_read_timeout * 1000;
176
177 if (hw_hints->poll_interval)
178 edev->aq.poll_interval = hw_hints->poll_interval;
179
180 if (hw_hints->admin_completion_timeout)
181 edev->aq.completion_timeout =
182 hw_hints->admin_completion_timeout;
183}
184
185static void efa_stats_init(struct efa_dev *dev)
186{
187 atomic64_t *s = (atomic64_t *)&dev->stats;
188 int i;
189
190 for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
191 atomic64_set(s, 0);
192}
193
194static void efa_set_host_info(struct efa_dev *dev)
195{
196 struct efa_admin_set_feature_resp resp = {};
197 struct efa_admin_set_feature_cmd cmd = {};
198 struct efa_admin_host_info *hinf;
199 u32 bufsz = sizeof(*hinf);
200 dma_addr_t hinf_dma;
201
202 if (!efa_com_check_supported_feature_id(&dev->edev,
203 EFA_ADMIN_HOST_INFO))
204 return;
205
206 /* Failures in host info set shall not disturb probe */
207 hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
208 GFP_KERNEL);
209 if (!hinf)
210 return;
211
212 strscpy(hinf->os_dist_str, utsname()->release,
213 sizeof(hinf->os_dist_str));
214 hinf->os_type = EFA_ADMIN_OS_LINUX;
215 strscpy(hinf->kernel_ver_str, utsname()->version,
216 sizeof(hinf->kernel_ver_str));
217 hinf->kernel_ver = LINUX_VERSION_CODE;
218 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
219 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
220 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
221 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
222 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
223 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
224 PCI_SLOT(dev->pdev->devfn));
225 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
226 PCI_FUNC(dev->pdev->devfn));
227 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
228 EFA_COMMON_SPEC_VERSION_MAJOR);
229 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
230 EFA_COMMON_SPEC_VERSION_MINOR);
231 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
232 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
233
234 efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
235 hinf_dma, bufsz);
236
237 dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
238}
239
240static const struct ib_device_ops efa_dev_ops = {
241 .owner = THIS_MODULE,
242 .driver_id = RDMA_DRIVER_EFA,
243 .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
244
245 .alloc_hw_port_stats = efa_alloc_hw_port_stats,
246 .alloc_hw_device_stats = efa_alloc_hw_device_stats,
247 .alloc_pd = efa_alloc_pd,
248 .alloc_ucontext = efa_alloc_ucontext,
249 .create_cq = efa_create_cq,
250 .create_qp = efa_create_qp,
251 .create_user_ah = efa_create_ah,
252 .dealloc_pd = efa_dealloc_pd,
253 .dealloc_ucontext = efa_dealloc_ucontext,
254 .dereg_mr = efa_dereg_mr,
255 .destroy_ah = efa_destroy_ah,
256 .destroy_cq = efa_destroy_cq,
257 .destroy_qp = efa_destroy_qp,
258 .get_hw_stats = efa_get_hw_stats,
259 .get_link_layer = efa_port_link_layer,
260 .get_port_immutable = efa_get_port_immutable,
261 .mmap = efa_mmap,
262 .mmap_free = efa_mmap_free,
263 .modify_qp = efa_modify_qp,
264 .query_device = efa_query_device,
265 .query_gid = efa_query_gid,
266 .query_pkey = efa_query_pkey,
267 .query_port = efa_query_port,
268 .query_qp = efa_query_qp,
269 .reg_user_mr = efa_reg_mr,
270
271 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
272 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
273 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
274 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
275};
276
277static int efa_ib_device_add(struct efa_dev *dev)
278{
279 struct efa_com_get_hw_hints_result hw_hints;
280 struct pci_dev *pdev = dev->pdev;
281 int err;
282
283 efa_stats_init(dev);
284
285 err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
286 if (err)
287 return err;
288
289 dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
290 err = efa_request_doorbell_bar(dev);
291 if (err)
292 return err;
293
294 err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
295 if (err)
296 goto err_release_doorbell_bar;
297
298 efa_update_hw_hints(dev, &hw_hints);
299
300 /* Try to enable all the available aenq groups */
301 err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
302 if (err)
303 goto err_release_doorbell_bar;
304
305 efa_set_host_info(dev);
306
307 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
308 dev->ibdev.phys_port_cnt = 1;
309 dev->ibdev.num_comp_vectors = 1;
310 dev->ibdev.dev.parent = &pdev->dev;
311
312 ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
313
314 err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
315 if (err)
316 goto err_release_doorbell_bar;
317
318 ibdev_info(&dev->ibdev, "IB device registered\n");
319
320 return 0;
321
322err_release_doorbell_bar:
323 efa_release_doorbell_bar(dev);
324 return err;
325}
326
327static void efa_ib_device_remove(struct efa_dev *dev)
328{
329 efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
330 ibdev_info(&dev->ibdev, "Unregister ib device\n");
331 ib_unregister_device(&dev->ibdev);
332 efa_release_doorbell_bar(dev);
333}
334
335static void efa_disable_msix(struct efa_dev *dev)
336{
337 pci_free_irq_vectors(dev->pdev);
338}
339
340static int efa_enable_msix(struct efa_dev *dev)
341{
342 int msix_vecs, irq_num;
343
344 /* Reserve the max msix vectors we might need */
345 msix_vecs = EFA_NUM_MSIX_VEC;
346 dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
347 msix_vecs);
348
349 dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
350 irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
351 msix_vecs, PCI_IRQ_MSIX);
352
353 if (irq_num < 0) {
354 dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
355 irq_num);
356 return -ENOSPC;
357 }
358
359 if (irq_num != msix_vecs) {
360 efa_disable_msix(dev);
361 dev_err(&dev->pdev->dev,
362 "Allocated %d MSI-X (out of %d requested)\n",
363 irq_num, msix_vecs);
364 return -ENOSPC;
365 }
366
367 return 0;
368}
369
370static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
371{
372 int dma_width;
373 int err;
374
375 err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
376 if (err)
377 return err;
378
379 err = efa_com_validate_version(edev);
380 if (err)
381 return err;
382
383 dma_width = efa_com_get_dma_width(edev);
384 if (dma_width < 0) {
385 err = dma_width;
386 return err;
387 }
388
389 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
390 if (err) {
391 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
392 return err;
393 }
394
395 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
396 return 0;
397}
398
399static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
400{
401 struct efa_com_dev *edev;
402 struct efa_dev *dev;
403 int bars;
404 int err;
405
406 err = pci_enable_device_mem(pdev);
407 if (err) {
408 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
409 return ERR_PTR(err);
410 }
411
412 pci_set_master(pdev);
413
414 dev = ib_alloc_device(efa_dev, ibdev);
415 if (!dev) {
416 dev_err(&pdev->dev, "Device alloc failed\n");
417 err = -ENOMEM;
418 goto err_disable_device;
419 }
420
421 pci_set_drvdata(pdev, dev);
422 edev = &dev->edev;
423 edev->efa_dev = dev;
424 edev->dmadev = &pdev->dev;
425 dev->pdev = pdev;
426
427 bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
428 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
429 if (err) {
430 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
431 err);
432 goto err_ibdev_destroy;
433 }
434
435 dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
436 dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
437 dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
438 dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
439
440 edev->reg_bar = devm_ioremap(&pdev->dev,
441 dev->reg_bar_addr,
442 dev->reg_bar_len);
443 if (!edev->reg_bar) {
444 dev_err(&pdev->dev, "Failed to remap register bar\n");
445 err = -EFAULT;
446 goto err_release_bars;
447 }
448
449 err = efa_com_mmio_reg_read_init(edev);
450 if (err) {
451 dev_err(&pdev->dev, "Failed to init readless MMIO\n");
452 goto err_iounmap;
453 }
454
455 err = efa_device_init(edev, pdev);
456 if (err) {
457 dev_err(&pdev->dev, "EFA device init failed\n");
458 if (err == -ETIME)
459 err = -EPROBE_DEFER;
460 goto err_reg_read_destroy;
461 }
462
463 err = efa_enable_msix(dev);
464 if (err)
465 goto err_reg_read_destroy;
466
467 edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
468 edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
469
470 err = efa_set_mgmnt_irq(dev);
471 if (err)
472 goto err_disable_msix;
473
474 err = efa_com_admin_init(edev, &aenq_handlers);
475 if (err)
476 goto err_free_mgmnt_irq;
477
478 return dev;
479
480err_free_mgmnt_irq:
481 efa_free_mgmnt_irq(dev);
482err_disable_msix:
483 efa_disable_msix(dev);
484err_reg_read_destroy:
485 efa_com_mmio_reg_read_destroy(edev);
486err_iounmap:
487 devm_iounmap(&pdev->dev, edev->reg_bar);
488err_release_bars:
489 efa_release_bars(dev, EFA_BASE_BAR_MASK);
490err_ibdev_destroy:
491 ib_dealloc_device(&dev->ibdev);
492err_disable_device:
493 pci_disable_device(pdev);
494 return ERR_PTR(err);
495}
496
497static void efa_remove_device(struct pci_dev *pdev)
498{
499 struct efa_dev *dev = pci_get_drvdata(pdev);
500 struct efa_com_dev *edev;
501
502 edev = &dev->edev;
503 efa_com_admin_destroy(edev);
504 efa_free_mgmnt_irq(dev);
505 efa_disable_msix(dev);
506 efa_com_mmio_reg_read_destroy(edev);
507 devm_iounmap(&pdev->dev, edev->reg_bar);
508 efa_release_bars(dev, EFA_BASE_BAR_MASK);
509 ib_dealloc_device(&dev->ibdev);
510 pci_disable_device(pdev);
511}
512
513static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
514{
515 struct efa_dev *dev;
516 int err;
517
518 dev = efa_probe_device(pdev);
519 if (IS_ERR(dev))
520 return PTR_ERR(dev);
521
522 err = efa_ib_device_add(dev);
523 if (err)
524 goto err_remove_device;
525
526 return 0;
527
528err_remove_device:
529 efa_remove_device(pdev);
530 return err;
531}
532
533static void efa_remove(struct pci_dev *pdev)
534{
535 struct efa_dev *dev = pci_get_drvdata(pdev);
536
537 efa_ib_device_remove(dev);
538 efa_remove_device(pdev);
539}
540
541static struct pci_driver efa_pci_driver = {
542 .name = DRV_MODULE_NAME,
543 .id_table = efa_pci_tbl,
544 .probe = efa_probe,
545 .remove = efa_remove,
546};
547
548module_pci_driver(efa_pci_driver);