Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/sysfs.h>
15#include "ifcvf_base.h"
16
17#define VERSION_STRING "0.1"
18#define DRIVER_AUTHOR "Intel Corporation"
19#define IFCVF_DRIVER_NAME "ifcvf"
20
21static irqreturn_t ifcvf_config_changed(int irq, void *arg)
22{
23 struct ifcvf_hw *vf = arg;
24
25 if (vf->config_cb.callback)
26 return vf->config_cb.callback(vf->config_cb.private);
27
28 return IRQ_HANDLED;
29}
30
31static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
32{
33 struct vring_info *vring = arg;
34
35 if (vring->cb.callback)
36 return vring->cb.callback(vring->cb.private);
37
38 return IRQ_HANDLED;
39}
40
41static void ifcvf_free_irq_vectors(void *data)
42{
43 pci_free_irq_vectors(data);
44}
45
46static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
47{
48 struct pci_dev *pdev = adapter->pdev;
49 struct ifcvf_hw *vf = &adapter->vf;
50 int i;
51
52
53 for (i = 0; i < queues; i++) {
54 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
55 vf->vring[i].irq = -EINVAL;
56 }
57
58 devm_free_irq(&pdev->dev, vf->config_irq, vf);
59 ifcvf_free_irq_vectors(pdev);
60}
61
62static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
63{
64 struct pci_dev *pdev = adapter->pdev;
65 struct ifcvf_hw *vf = &adapter->vf;
66 int vector, i, ret, irq;
67
68 ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
69 IFCVF_MAX_INTR, PCI_IRQ_MSIX);
70 if (ret < 0) {
71 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
72 return ret;
73 }
74
75 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
76 pci_name(pdev));
77 vector = 0;
78 vf->config_irq = pci_irq_vector(pdev, vector);
79 ret = devm_request_irq(&pdev->dev, vf->config_irq,
80 ifcvf_config_changed, 0,
81 vf->config_msix_name, vf);
82 if (ret) {
83 IFCVF_ERR(pdev, "Failed to request config irq\n");
84 return ret;
85 }
86
87 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
88 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
89 pci_name(pdev), i);
90 vector = i + IFCVF_MSI_QUEUE_OFF;
91 irq = pci_irq_vector(pdev, vector);
92 ret = devm_request_irq(&pdev->dev, irq,
93 ifcvf_intr_handler, 0,
94 vf->vring[i].msix_name,
95 &vf->vring[i]);
96 if (ret) {
97 IFCVF_ERR(pdev,
98 "Failed to request irq for vq %d\n", i);
99 ifcvf_free_irq(adapter, i);
100
101 return ret;
102 }
103
104 vf->vring[i].irq = irq;
105 }
106
107 return 0;
108}
109
110static int ifcvf_start_datapath(void *private)
111{
112 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
113 u8 status;
114 int ret;
115
116 vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
117 ret = ifcvf_start_hw(vf);
118 if (ret < 0) {
119 status = ifcvf_get_status(vf);
120 status |= VIRTIO_CONFIG_S_FAILED;
121 ifcvf_set_status(vf, status);
122 }
123
124 return ret;
125}
126
127static int ifcvf_stop_datapath(void *private)
128{
129 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
130 int i;
131
132 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
133 vf->vring[i].cb.callback = NULL;
134
135 ifcvf_stop_hw(vf);
136
137 return 0;
138}
139
140static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
141{
142 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
143 int i;
144
145 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
146 vf->vring[i].last_avail_idx = 0;
147 vf->vring[i].desc = 0;
148 vf->vring[i].avail = 0;
149 vf->vring[i].used = 0;
150 vf->vring[i].ready = 0;
151 vf->vring[i].cb.callback = NULL;
152 vf->vring[i].cb.private = NULL;
153 }
154
155 ifcvf_reset(vf);
156}
157
158static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
159{
160 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
161}
162
163static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
164{
165 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
166
167 return &adapter->vf;
168}
169
170static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
171{
172 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173 u64 features;
174
175 features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
176
177 return features;
178}
179
180static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
181{
182 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
183
184 vf->req_features = features;
185
186 return 0;
187}
188
189static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
190{
191 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
192
193 return ifcvf_get_status(vf);
194}
195
196static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
197{
198 struct ifcvf_adapter *adapter;
199 struct ifcvf_hw *vf;
200 u8 status_old;
201 int ret;
202
203 vf = vdpa_to_vf(vdpa_dev);
204 adapter = dev_get_drvdata(vdpa_dev->dev.parent);
205 status_old = ifcvf_get_status(vf);
206
207 if (status_old == status)
208 return;
209
210 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
211 !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
212 ifcvf_stop_datapath(adapter);
213 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
214 }
215
216 if (status == 0) {
217 ifcvf_reset_vring(adapter);
218 return;
219 }
220
221 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
222 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
223 ret = ifcvf_request_irq(adapter);
224 if (ret) {
225 status = ifcvf_get_status(vf);
226 status |= VIRTIO_CONFIG_S_FAILED;
227 ifcvf_set_status(vf, status);
228 return;
229 }
230
231 if (ifcvf_start_datapath(adapter) < 0)
232 IFCVF_ERR(adapter->pdev,
233 "Failed to set ifcvf vdpa status %u\n",
234 status);
235 }
236
237 ifcvf_set_status(vf, status);
238}
239
240static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
241{
242 return IFCVF_QUEUE_MAX;
243}
244
245static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
246 struct vdpa_vq_state *state)
247{
248 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
249
250 state->avail_index = ifcvf_get_vq_state(vf, qid);
251 return 0;
252}
253
254static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
255 const struct vdpa_vq_state *state)
256{
257 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
258
259 return ifcvf_set_vq_state(vf, qid, state->avail_index);
260}
261
262static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
263 struct vdpa_callback *cb)
264{
265 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
266
267 vf->vring[qid].cb = *cb;
268}
269
270static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
271 u16 qid, bool ready)
272{
273 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
274
275 vf->vring[qid].ready = ready;
276}
277
278static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
279{
280 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
281
282 return vf->vring[qid].ready;
283}
284
285static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
286 u32 num)
287{
288 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
289
290 vf->vring[qid].size = num;
291}
292
293static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
294 u64 desc_area, u64 driver_area,
295 u64 device_area)
296{
297 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
298
299 vf->vring[qid].desc = desc_area;
300 vf->vring[qid].avail = driver_area;
301 vf->vring[qid].used = device_area;
302
303 return 0;
304}
305
306static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
307{
308 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
309
310 ifcvf_notify_queue(vf, qid);
311}
312
313static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
314{
315 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
316
317 return ioread8(&vf->common_cfg->config_generation);
318}
319
320static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
321{
322 return VIRTIO_ID_NET;
323}
324
325static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
326{
327 return IFCVF_SUBSYS_VENDOR_ID;
328}
329
330static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
331{
332 return IFCVF_QUEUE_ALIGNMENT;
333}
334
335static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
336 unsigned int offset,
337 void *buf, unsigned int len)
338{
339 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
340
341 WARN_ON(offset + len > sizeof(struct virtio_net_config));
342 ifcvf_read_net_config(vf, offset, buf, len);
343}
344
345static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
346 unsigned int offset, const void *buf,
347 unsigned int len)
348{
349 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
350
351 WARN_ON(offset + len > sizeof(struct virtio_net_config));
352 ifcvf_write_net_config(vf, offset, buf, len);
353}
354
355static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
356 struct vdpa_callback *cb)
357{
358 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
359
360 vf->config_cb.callback = cb->callback;
361 vf->config_cb.private = cb->private;
362}
363
364static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
365 u16 qid)
366{
367 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
368
369 return vf->vring[qid].irq;
370}
371
372/*
373 * IFCVF currently does't have on-chip IOMMU, so not
374 * implemented set_map()/dma_map()/dma_unmap()
375 */
376static const struct vdpa_config_ops ifc_vdpa_ops = {
377 .get_features = ifcvf_vdpa_get_features,
378 .set_features = ifcvf_vdpa_set_features,
379 .get_status = ifcvf_vdpa_get_status,
380 .set_status = ifcvf_vdpa_set_status,
381 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
382 .get_vq_state = ifcvf_vdpa_get_vq_state,
383 .set_vq_state = ifcvf_vdpa_set_vq_state,
384 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
385 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
386 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
387 .set_vq_num = ifcvf_vdpa_set_vq_num,
388 .set_vq_address = ifcvf_vdpa_set_vq_address,
389 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
390 .kick_vq = ifcvf_vdpa_kick_vq,
391 .get_generation = ifcvf_vdpa_get_generation,
392 .get_device_id = ifcvf_vdpa_get_device_id,
393 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
394 .get_vq_align = ifcvf_vdpa_get_vq_align,
395 .get_config = ifcvf_vdpa_get_config,
396 .set_config = ifcvf_vdpa_set_config,
397 .set_config_cb = ifcvf_vdpa_set_config_cb,
398};
399
400static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
401{
402 struct device *dev = &pdev->dev;
403 struct ifcvf_adapter *adapter;
404 struct ifcvf_hw *vf;
405 int ret, i;
406
407 ret = pcim_enable_device(pdev);
408 if (ret) {
409 IFCVF_ERR(pdev, "Failed to enable device\n");
410 return ret;
411 }
412
413 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
414 IFCVF_DRIVER_NAME);
415 if (ret) {
416 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
417 return ret;
418 }
419
420 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
421 if (ret) {
422 IFCVF_ERR(pdev, "No usable DMA confiugration\n");
423 return ret;
424 }
425
426 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
427 if (ret) {
428 IFCVF_ERR(pdev,
429 "No usable coherent DMA confiugration\n");
430 return ret;
431 }
432
433 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
434 if (ret) {
435 IFCVF_ERR(pdev,
436 "Failed for adding devres for freeing irq vectors\n");
437 return ret;
438 }
439
440 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
441 dev, &ifc_vdpa_ops,
442 IFCVF_MAX_QUEUE_PAIRS * 2);
443 if (adapter == NULL) {
444 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
445 return -ENOMEM;
446 }
447
448 pci_set_master(pdev);
449 pci_set_drvdata(pdev, adapter);
450
451 vf = &adapter->vf;
452 vf->base = pcim_iomap_table(pdev);
453
454 adapter->pdev = pdev;
455 adapter->vdpa.dma_dev = &pdev->dev;
456
457 ret = ifcvf_init_hw(vf, pdev);
458 if (ret) {
459 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
460 goto err;
461 }
462
463 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
464 vf->vring[i].irq = -EINVAL;
465
466 ret = vdpa_register_device(&adapter->vdpa);
467 if (ret) {
468 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
469 goto err;
470 }
471
472 return 0;
473
474err:
475 put_device(&adapter->vdpa.dev);
476 return ret;
477}
478
479static void ifcvf_remove(struct pci_dev *pdev)
480{
481 struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
482
483 vdpa_unregister_device(&adapter->vdpa);
484}
485
486static struct pci_device_id ifcvf_pci_ids[] = {
487 { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
488 IFCVF_DEVICE_ID,
489 IFCVF_SUBSYS_VENDOR_ID,
490 IFCVF_SUBSYS_DEVICE_ID) },
491 { 0 },
492};
493MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
494
495static struct pci_driver ifcvf_driver = {
496 .name = IFCVF_DRIVER_NAME,
497 .id_table = ifcvf_pci_ids,
498 .probe = ifcvf_probe,
499 .remove = ifcvf_remove,
500};
501
502module_pci_driver(ifcvf_driver);
503
504MODULE_LICENSE("GPL v2");
505MODULE_VERSION(VERSION_STRING);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/sysfs.h>
15#include "ifcvf_base.h"
16
17#define DRIVER_AUTHOR "Intel Corporation"
18#define IFCVF_DRIVER_NAME "ifcvf"
19
20static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21{
22 struct ifcvf_hw *vf = arg;
23
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
26
27 return IRQ_HANDLED;
28}
29
30static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
31{
32 struct vring_info *vring = arg;
33
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
36
37 return IRQ_HANDLED;
38}
39
40static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
41{
42 struct ifcvf_hw *vf = arg;
43 struct vring_info *vring;
44 int i;
45
46 for (i = 0; i < vf->nr_vring; i++) {
47 vring = &vf->vring[i];
48 if (vring->cb.callback)
49 vring->cb.callback(vring->cb.private);
50 }
51
52 return IRQ_HANDLED;
53}
54
55static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
56{
57 struct ifcvf_hw *vf = arg;
58 u8 isr;
59
60 isr = vp_ioread8(vf->isr);
61 if (isr & VIRTIO_PCI_ISR_CONFIG)
62 ifcvf_config_changed(irq, arg);
63
64 return ifcvf_vqs_reused_intr_handler(irq, arg);
65}
66
67static void ifcvf_free_irq_vectors(void *data)
68{
69 pci_free_irq_vectors(data);
70}
71
72static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
73{
74 struct pci_dev *pdev = adapter->pdev;
75 struct ifcvf_hw *vf = &adapter->vf;
76 int i;
77
78 for (i = 0; i < vf->nr_vring; i++) {
79 if (vf->vring[i].irq != -EINVAL) {
80 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
81 vf->vring[i].irq = -EINVAL;
82 }
83 }
84}
85
86static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
87{
88 struct pci_dev *pdev = adapter->pdev;
89 struct ifcvf_hw *vf = &adapter->vf;
90
91 if (vf->vqs_reused_irq != -EINVAL) {
92 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
93 vf->vqs_reused_irq = -EINVAL;
94 }
95
96}
97
98static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
99{
100 struct ifcvf_hw *vf = &adapter->vf;
101
102 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
103 ifcvf_free_per_vq_irq(adapter);
104 else
105 ifcvf_free_vqs_reused_irq(adapter);
106}
107
108static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
109{
110 struct pci_dev *pdev = adapter->pdev;
111 struct ifcvf_hw *vf = &adapter->vf;
112
113 if (vf->config_irq == -EINVAL)
114 return;
115
116 /* If the irq is shared by all vqs and the config interrupt,
117 * it is already freed in ifcvf_free_vq_irq, so here only
118 * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
119 */
120 if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
121 devm_free_irq(&pdev->dev, vf->config_irq, vf);
122 vf->config_irq = -EINVAL;
123 }
124}
125
126static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
127{
128 struct pci_dev *pdev = adapter->pdev;
129
130 ifcvf_free_vq_irq(adapter);
131 ifcvf_free_config_irq(adapter);
132 ifcvf_free_irq_vectors(pdev);
133}
134
135/* ifcvf MSIX vectors allocator, this helper tries to allocate
136 * vectors for all virtqueues and the config interrupt.
137 * It returns the number of allocated vectors, negative
138 * return value when fails.
139 */
140static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
141{
142 struct pci_dev *pdev = adapter->pdev;
143 struct ifcvf_hw *vf = &adapter->vf;
144 int max_intr, ret;
145
146 /* all queues and config interrupt */
147 max_intr = vf->nr_vring + 1;
148 ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
149
150 if (ret < 0) {
151 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
152 return ret;
153 }
154
155 if (ret < max_intr)
156 IFCVF_INFO(pdev,
157 "Requested %u vectors, however only %u allocated, lower performance\n",
158 max_intr, ret);
159
160 return ret;
161}
162
163static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
164{
165 struct pci_dev *pdev = adapter->pdev;
166 struct ifcvf_hw *vf = &adapter->vf;
167 int i, vector, ret, irq;
168
169 vf->vqs_reused_irq = -EINVAL;
170 for (i = 0; i < vf->nr_vring; i++) {
171 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
172 vector = i;
173 irq = pci_irq_vector(pdev, vector);
174 ret = devm_request_irq(&pdev->dev, irq,
175 ifcvf_vq_intr_handler, 0,
176 vf->vring[i].msix_name,
177 &vf->vring[i]);
178 if (ret) {
179 IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
180 goto err;
181 }
182
183 vf->vring[i].irq = irq;
184 ret = ifcvf_set_vq_vector(vf, i, vector);
185 if (ret == VIRTIO_MSI_NO_VECTOR) {
186 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
187 goto err;
188 }
189 }
190
191 return 0;
192err:
193 ifcvf_free_irq(adapter);
194
195 return -EFAULT;
196}
197
198static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
199{
200 struct pci_dev *pdev = adapter->pdev;
201 struct ifcvf_hw *vf = &adapter->vf;
202 int i, vector, ret, irq;
203
204 vector = 0;
205 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
206 irq = pci_irq_vector(pdev, vector);
207 ret = devm_request_irq(&pdev->dev, irq,
208 ifcvf_vqs_reused_intr_handler, 0,
209 vf->vring[0].msix_name, vf);
210 if (ret) {
211 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
212 goto err;
213 }
214
215 vf->vqs_reused_irq = irq;
216 for (i = 0; i < vf->nr_vring; i++) {
217 vf->vring[i].irq = -EINVAL;
218 ret = ifcvf_set_vq_vector(vf, i, vector);
219 if (ret == VIRTIO_MSI_NO_VECTOR) {
220 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
221 goto err;
222 }
223 }
224
225 return 0;
226err:
227 ifcvf_free_irq(adapter);
228
229 return -EFAULT;
230}
231
232static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
233{
234 struct pci_dev *pdev = adapter->pdev;
235 struct ifcvf_hw *vf = &adapter->vf;
236 int i, vector, ret, irq;
237
238 vector = 0;
239 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
240 irq = pci_irq_vector(pdev, vector);
241 ret = devm_request_irq(&pdev->dev, irq,
242 ifcvf_dev_intr_handler, 0,
243 vf->vring[0].msix_name, vf);
244 if (ret) {
245 IFCVF_ERR(pdev, "Failed to request irq for the device\n");
246 goto err;
247 }
248
249 vf->vqs_reused_irq = irq;
250 for (i = 0; i < vf->nr_vring; i++) {
251 vf->vring[i].irq = -EINVAL;
252 ret = ifcvf_set_vq_vector(vf, i, vector);
253 if (ret == VIRTIO_MSI_NO_VECTOR) {
254 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
255 goto err;
256 }
257 }
258
259 vf->config_irq = irq;
260 ret = ifcvf_set_config_vector(vf, vector);
261 if (ret == VIRTIO_MSI_NO_VECTOR) {
262 IFCVF_ERR(pdev, "No msix vector for device config\n");
263 goto err;
264 }
265
266 return 0;
267err:
268 ifcvf_free_irq(adapter);
269
270 return -EFAULT;
271
272}
273
274static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
275{
276 struct ifcvf_hw *vf = &adapter->vf;
277 int ret;
278
279 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
280 ret = ifcvf_request_per_vq_irq(adapter);
281 else
282 ret = ifcvf_request_vqs_reused_irq(adapter);
283
284 return ret;
285}
286
287static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
288{
289 struct pci_dev *pdev = adapter->pdev;
290 struct ifcvf_hw *vf = &adapter->vf;
291 int config_vector, ret;
292
293 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
294 config_vector = vf->nr_vring;
295 else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
296 /* vector 0 for vqs and 1 for config interrupt */
297 config_vector = 1;
298 else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
299 /* re-use the vqs vector */
300 return 0;
301 else
302 return -EINVAL;
303
304 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
305 pci_name(pdev));
306 vf->config_irq = pci_irq_vector(pdev, config_vector);
307 ret = devm_request_irq(&pdev->dev, vf->config_irq,
308 ifcvf_config_changed, 0,
309 vf->config_msix_name, vf);
310 if (ret) {
311 IFCVF_ERR(pdev, "Failed to request config irq\n");
312 goto err;
313 }
314
315 ret = ifcvf_set_config_vector(vf, config_vector);
316 if (ret == VIRTIO_MSI_NO_VECTOR) {
317 IFCVF_ERR(pdev, "No msix vector for device config\n");
318 goto err;
319 }
320
321 return 0;
322err:
323 ifcvf_free_irq(adapter);
324
325 return -EFAULT;
326}
327
328static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
329{
330 struct ifcvf_hw *vf = &adapter->vf;
331 int nvectors, ret, max_intr;
332
333 nvectors = ifcvf_alloc_vectors(adapter);
334 if (nvectors <= 0)
335 return -EFAULT;
336
337 vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
338 max_intr = vf->nr_vring + 1;
339 if (nvectors < max_intr)
340 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
341
342 if (nvectors == 1) {
343 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
344 ret = ifcvf_request_dev_irq(adapter);
345
346 return ret;
347 }
348
349 ret = ifcvf_request_vq_irq(adapter);
350 if (ret)
351 return ret;
352
353 ret = ifcvf_request_config_irq(adapter);
354
355 if (ret)
356 return ret;
357
358 return 0;
359}
360
361static int ifcvf_start_datapath(void *private)
362{
363 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
364 u8 status;
365 int ret;
366
367 ret = ifcvf_start_hw(vf);
368 if (ret < 0) {
369 status = ifcvf_get_status(vf);
370 status |= VIRTIO_CONFIG_S_FAILED;
371 ifcvf_set_status(vf, status);
372 }
373
374 return ret;
375}
376
377static int ifcvf_stop_datapath(void *private)
378{
379 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
380 int i;
381
382 for (i = 0; i < vf->nr_vring; i++)
383 vf->vring[i].cb.callback = NULL;
384
385 ifcvf_stop_hw(vf);
386
387 return 0;
388}
389
390static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
391{
392 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
393 int i;
394
395 for (i = 0; i < vf->nr_vring; i++) {
396 vf->vring[i].last_avail_idx = 0;
397 vf->vring[i].desc = 0;
398 vf->vring[i].avail = 0;
399 vf->vring[i].used = 0;
400 vf->vring[i].ready = 0;
401 vf->vring[i].cb.callback = NULL;
402 vf->vring[i].cb.private = NULL;
403 }
404
405 ifcvf_reset(vf);
406}
407
408static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
409{
410 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
411}
412
413static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
414{
415 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
416
417 return &adapter->vf;
418}
419
420static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
421{
422 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
423 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
424 struct pci_dev *pdev = adapter->pdev;
425 u32 type = vf->dev_type;
426 u64 features;
427
428 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
429 features = ifcvf_get_features(vf);
430 else {
431 features = 0;
432 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
433 }
434
435 return features;
436}
437
438static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
439{
440 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
441 int ret;
442
443 ret = ifcvf_verify_min_features(vf, features);
444 if (ret)
445 return ret;
446
447 vf->req_features = features;
448
449 return 0;
450}
451
452static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
453{
454 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
455
456 return vf->req_features;
457}
458
459static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
460{
461 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
462
463 return ifcvf_get_status(vf);
464}
465
466static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
467{
468 struct ifcvf_adapter *adapter;
469 struct ifcvf_hw *vf;
470 u8 status_old;
471 int ret;
472
473 vf = vdpa_to_vf(vdpa_dev);
474 adapter = vdpa_to_adapter(vdpa_dev);
475 status_old = ifcvf_get_status(vf);
476
477 if (status_old == status)
478 return;
479
480 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
481 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
482 ret = ifcvf_request_irq(adapter);
483 if (ret) {
484 status = ifcvf_get_status(vf);
485 status |= VIRTIO_CONFIG_S_FAILED;
486 ifcvf_set_status(vf, status);
487 return;
488 }
489
490 if (ifcvf_start_datapath(adapter) < 0)
491 IFCVF_ERR(adapter->pdev,
492 "Failed to set ifcvf vdpa status %u\n",
493 status);
494 }
495
496 ifcvf_set_status(vf, status);
497}
498
499static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
500{
501 struct ifcvf_adapter *adapter;
502 struct ifcvf_hw *vf;
503 u8 status_old;
504
505 vf = vdpa_to_vf(vdpa_dev);
506 adapter = vdpa_to_adapter(vdpa_dev);
507 status_old = ifcvf_get_status(vf);
508
509 if (status_old == 0)
510 return 0;
511
512 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
513 ifcvf_stop_datapath(adapter);
514 ifcvf_free_irq(adapter);
515 }
516
517 ifcvf_reset_vring(adapter);
518
519 return 0;
520}
521
522static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
523{
524 return IFCVF_QUEUE_MAX;
525}
526
527static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
528 struct vdpa_vq_state *state)
529{
530 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
531
532 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
533 return 0;
534}
535
536static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
537 const struct vdpa_vq_state *state)
538{
539 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
540
541 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
542}
543
544static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
545 struct vdpa_callback *cb)
546{
547 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
548
549 vf->vring[qid].cb = *cb;
550}
551
552static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
553 u16 qid, bool ready)
554{
555 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
556
557 vf->vring[qid].ready = ready;
558}
559
560static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
561{
562 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
563
564 return vf->vring[qid].ready;
565}
566
567static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
568 u32 num)
569{
570 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
571
572 vf->vring[qid].size = num;
573}
574
575static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
576 u64 desc_area, u64 driver_area,
577 u64 device_area)
578{
579 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
580
581 vf->vring[qid].desc = desc_area;
582 vf->vring[qid].avail = driver_area;
583 vf->vring[qid].used = device_area;
584
585 return 0;
586}
587
588static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
589{
590 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
591
592 ifcvf_notify_queue(vf, qid);
593}
594
595static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
596{
597 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
598
599 return vp_ioread8(&vf->common_cfg->config_generation);
600}
601
602static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
603{
604 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
605
606 return vf->dev_type;
607}
608
609static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
610{
611 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
612 struct pci_dev *pdev = adapter->pdev;
613
614 return pdev->subsystem_vendor;
615}
616
617static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
618{
619 return IFCVF_QUEUE_ALIGNMENT;
620}
621
622static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
623{
624 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
625
626 return vf->config_size;
627}
628
629static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
630{
631 return 0;
632}
633
634static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
635 unsigned int offset,
636 void *buf, unsigned int len)
637{
638 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
639
640 ifcvf_read_dev_config(vf, offset, buf, len);
641}
642
643static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
644 unsigned int offset, const void *buf,
645 unsigned int len)
646{
647 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
648
649 ifcvf_write_dev_config(vf, offset, buf, len);
650}
651
652static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
653 struct vdpa_callback *cb)
654{
655 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
656
657 vf->config_cb.callback = cb->callback;
658 vf->config_cb.private = cb->private;
659}
660
661static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
662 u16 qid)
663{
664 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
665
666 if (vf->vqs_reused_irq < 0)
667 return vf->vring[qid].irq;
668 else
669 return -EINVAL;
670}
671
672static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
673 u16 idx)
674{
675 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
676 struct vdpa_notification_area area;
677
678 area.addr = vf->vring[idx].notify_pa;
679 if (!vf->notify_off_multiplier)
680 area.size = PAGE_SIZE;
681 else
682 area.size = vf->notify_off_multiplier;
683
684 return area;
685}
686
687/*
688 * IFCVF currently doesn't have on-chip IOMMU, so not
689 * implemented set_map()/dma_map()/dma_unmap()
690 */
691static const struct vdpa_config_ops ifc_vdpa_ops = {
692 .get_device_features = ifcvf_vdpa_get_device_features,
693 .set_driver_features = ifcvf_vdpa_set_driver_features,
694 .get_driver_features = ifcvf_vdpa_get_driver_features,
695 .get_status = ifcvf_vdpa_get_status,
696 .set_status = ifcvf_vdpa_set_status,
697 .reset = ifcvf_vdpa_reset,
698 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
699 .get_vq_state = ifcvf_vdpa_get_vq_state,
700 .set_vq_state = ifcvf_vdpa_set_vq_state,
701 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
702 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
703 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
704 .set_vq_num = ifcvf_vdpa_set_vq_num,
705 .set_vq_address = ifcvf_vdpa_set_vq_address,
706 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
707 .kick_vq = ifcvf_vdpa_kick_vq,
708 .get_generation = ifcvf_vdpa_get_generation,
709 .get_device_id = ifcvf_vdpa_get_device_id,
710 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
711 .get_vq_align = ifcvf_vdpa_get_vq_align,
712 .get_vq_group = ifcvf_vdpa_get_vq_group,
713 .get_config_size = ifcvf_vdpa_get_config_size,
714 .get_config = ifcvf_vdpa_get_config,
715 .set_config = ifcvf_vdpa_set_config,
716 .set_config_cb = ifcvf_vdpa_set_config_cb,
717 .get_vq_notification = ifcvf_get_vq_notification,
718};
719
720static struct virtio_device_id id_table_net[] = {
721 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
722 {0},
723};
724
725static struct virtio_device_id id_table_blk[] = {
726 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
727 {0},
728};
729
730static u32 get_dev_type(struct pci_dev *pdev)
731{
732 u32 dev_type;
733
734 /* This drirver drives both modern virtio devices and transitional
735 * devices in modern mode.
736 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
737 * so legacy devices and transitional devices in legacy
738 * mode will not work for vDPA, this driver will not
739 * drive devices with legacy interface.
740 */
741
742 if (pdev->device < 0x1040)
743 dev_type = pdev->subsystem_device;
744 else
745 dev_type = pdev->device - 0x1040;
746
747 return dev_type;
748}
749
750static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
751 const struct vdpa_dev_set_config *config)
752{
753 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
754 struct ifcvf_adapter *adapter;
755 struct vdpa_device *vdpa_dev;
756 struct pci_dev *pdev;
757 struct ifcvf_hw *vf;
758 int ret;
759
760 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
761 if (!ifcvf_mgmt_dev->adapter)
762 return -EOPNOTSUPP;
763
764 adapter = ifcvf_mgmt_dev->adapter;
765 vf = &adapter->vf;
766 pdev = adapter->pdev;
767 vdpa_dev = &adapter->vdpa;
768
769 if (name)
770 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
771 else
772 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
773
774 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
775 if (ret) {
776 put_device(&adapter->vdpa.dev);
777 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
778 return ret;
779 }
780
781 return 0;
782}
783
784
785static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
786{
787 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
788
789 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
790 _vdpa_unregister_device(dev);
791 ifcvf_mgmt_dev->adapter = NULL;
792}
793
794static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
795 .dev_add = ifcvf_vdpa_dev_add,
796 .dev_del = ifcvf_vdpa_dev_del
797};
798
799static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
800{
801 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
802 struct device *dev = &pdev->dev;
803 struct ifcvf_adapter *adapter;
804 struct ifcvf_hw *vf;
805 u32 dev_type;
806 int ret, i;
807
808 ret = pcim_enable_device(pdev);
809 if (ret) {
810 IFCVF_ERR(pdev, "Failed to enable device\n");
811 return ret;
812 }
813 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
814 IFCVF_DRIVER_NAME);
815 if (ret) {
816 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
817 return ret;
818 }
819
820 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
821 if (ret) {
822 IFCVF_ERR(pdev, "No usable DMA configuration\n");
823 return ret;
824 }
825
826 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
827 if (ret) {
828 IFCVF_ERR(pdev,
829 "Failed for adding devres for freeing irq vectors\n");
830 return ret;
831 }
832
833 pci_set_master(pdev);
834
835 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
836 dev, &ifc_vdpa_ops, 1, 1, NULL, false);
837 if (IS_ERR(adapter)) {
838 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
839 return PTR_ERR(adapter);
840 }
841
842 vf = &adapter->vf;
843 vf->dev_type = get_dev_type(pdev);
844 vf->base = pcim_iomap_table(pdev);
845
846 adapter->pdev = pdev;
847 adapter->vdpa.dma_dev = &pdev->dev;
848
849 ret = ifcvf_init_hw(vf, pdev);
850 if (ret) {
851 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
852 goto err;
853 }
854
855 for (i = 0; i < vf->nr_vring; i++)
856 vf->vring[i].irq = -EINVAL;
857
858 vf->hw_features = ifcvf_get_hw_features(vf);
859 vf->config_size = ifcvf_get_config_size(vf);
860
861 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
862 if (!ifcvf_mgmt_dev) {
863 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
864 return -ENOMEM;
865 }
866
867 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
868 ifcvf_mgmt_dev->mdev.device = dev;
869 ifcvf_mgmt_dev->adapter = adapter;
870
871 dev_type = get_dev_type(pdev);
872 switch (dev_type) {
873 case VIRTIO_ID_NET:
874 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
875 break;
876 case VIRTIO_ID_BLOCK:
877 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
878 break;
879 default:
880 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
881 ret = -EOPNOTSUPP;
882 goto err;
883 }
884
885 ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
886 ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
887
888 adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
889
890
891 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
892 if (ret) {
893 IFCVF_ERR(pdev,
894 "Failed to initialize the management interfaces\n");
895 goto err;
896 }
897
898 pci_set_drvdata(pdev, ifcvf_mgmt_dev);
899
900 return 0;
901
902err:
903 kfree(ifcvf_mgmt_dev);
904 return ret;
905}
906
907static void ifcvf_remove(struct pci_dev *pdev)
908{
909 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
910
911 ifcvf_mgmt_dev = pci_get_drvdata(pdev);
912 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
913 kfree(ifcvf_mgmt_dev);
914}
915
916static struct pci_device_id ifcvf_pci_ids[] = {
917 /* N3000 network device */
918 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
919 N3000_DEVICE_ID,
920 PCI_VENDOR_ID_INTEL,
921 N3000_SUBSYS_DEVICE_ID) },
922 /* C5000X-PL network device */
923 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
924 VIRTIO_TRANS_ID_NET,
925 PCI_VENDOR_ID_INTEL,
926 VIRTIO_ID_NET) },
927 /* C5000X-PL block device */
928 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
929 VIRTIO_TRANS_ID_BLOCK,
930 PCI_VENDOR_ID_INTEL,
931 VIRTIO_ID_BLOCK) },
932
933 { 0 },
934};
935MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
936
937static struct pci_driver ifcvf_driver = {
938 .name = IFCVF_DRIVER_NAME,
939 .id_table = ifcvf_pci_ids,
940 .probe = ifcvf_probe,
941 .remove = ifcvf_remove,
942};
943
944module_pci_driver(ifcvf_driver);
945
946MODULE_LICENSE("GPL v2");