Loading...
1/*
2 * PCI Backend - Provides restricted access to the real PCI bus topology
3 * to the frontend
4 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8#include <linux/list.h>
9#include <linux/pci.h>
10#include <linux/spinlock.h>
11#include "pciback.h"
12
13struct passthrough_dev_data {
14 /* Access to dev_list must be protected by lock */
15 struct list_head dev_list;
16 spinlock_t lock;
17};
18
19static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
20 unsigned int domain,
21 unsigned int bus,
22 unsigned int devfn)
23{
24 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
25 struct pci_dev_entry *dev_entry;
26 struct pci_dev *dev = NULL;
27 unsigned long flags;
28
29 spin_lock_irqsave(&dev_data->lock, flags);
30
31 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
32 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
33 && bus == (unsigned int)dev_entry->dev->bus->number
34 && devfn == dev_entry->dev->devfn) {
35 dev = dev_entry->dev;
36 break;
37 }
38 }
39
40 spin_unlock_irqrestore(&dev_data->lock, flags);
41
42 return dev;
43}
44
45static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
46 struct pci_dev *dev,
47 int devid, publish_pci_dev_cb publish_cb)
48{
49 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
50 struct pci_dev_entry *dev_entry;
51 unsigned long flags;
52 unsigned int domain, bus, devfn;
53 int err;
54
55 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
56 if (!dev_entry)
57 return -ENOMEM;
58 dev_entry->dev = dev;
59
60 spin_lock_irqsave(&dev_data->lock, flags);
61 list_add_tail(&dev_entry->list, &dev_data->dev_list);
62 spin_unlock_irqrestore(&dev_data->lock, flags);
63
64 /* Publish this device. */
65 domain = (unsigned int)pci_domain_nr(dev->bus);
66 bus = (unsigned int)dev->bus->number;
67 devfn = dev->devfn;
68 err = publish_cb(pdev, domain, bus, devfn, devid);
69
70 return err;
71}
72
73static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
74 struct pci_dev *dev)
75{
76 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
77 struct pci_dev_entry *dev_entry, *t;
78 struct pci_dev *found_dev = NULL;
79 unsigned long flags;
80
81 spin_lock_irqsave(&dev_data->lock, flags);
82
83 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
84 if (dev_entry->dev == dev) {
85 list_del(&dev_entry->list);
86 found_dev = dev_entry->dev;
87 kfree(dev_entry);
88 }
89 }
90
91 spin_unlock_irqrestore(&dev_data->lock, flags);
92
93 if (found_dev)
94 pcistub_put_pci_dev(found_dev);
95}
96
97static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
98{
99 struct passthrough_dev_data *dev_data;
100
101 dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
102 if (!dev_data)
103 return -ENOMEM;
104
105 spin_lock_init(&dev_data->lock);
106
107 INIT_LIST_HEAD(&dev_data->dev_list);
108
109 pdev->pci_dev_data = dev_data;
110
111 return 0;
112}
113
114static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
115 publish_pci_root_cb publish_root_cb)
116{
117 int err = 0;
118 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
119 struct pci_dev_entry *dev_entry, *e, *tmp;
120 struct pci_dev *dev;
121 int found;
122 unsigned int domain, bus;
123
124 spin_lock(&dev_data->lock);
125
126 list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
127 /* Only publish this device as a root if none of its
128 * parent bridges are exported
129 */
130 found = 0;
131 dev = dev_entry->dev->bus->self;
132 for (; !found && dev != NULL; dev = dev->bus->self) {
133 list_for_each_entry(e, &dev_data->dev_list, list) {
134 if (dev == e->dev) {
135 found = 1;
136 break;
137 }
138 }
139 }
140
141 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
142 bus = (unsigned int)dev_entry->dev->bus->number;
143
144 if (!found) {
145 spin_unlock(&dev_data->lock);
146 err = publish_root_cb(pdev, domain, bus);
147 if (err)
148 break;
149 spin_lock(&dev_data->lock);
150 }
151 }
152
153 if (!err)
154 spin_unlock(&dev_data->lock);
155
156 return err;
157}
158
159static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
160{
161 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
162 struct pci_dev_entry *dev_entry, *t;
163
164 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
165 list_del(&dev_entry->list);
166 pcistub_put_pci_dev(dev_entry->dev);
167 kfree(dev_entry);
168 }
169
170 kfree(dev_data);
171 pdev->pci_dev_data = NULL;
172}
173
174static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
175 struct xen_pcibk_device *pdev,
176 unsigned int *domain, unsigned int *bus,
177 unsigned int *devfn)
178{
179 *domain = pci_domain_nr(pcidev->bus);
180 *bus = pcidev->bus->number;
181 *devfn = pcidev->devfn;
182 return 1;
183}
184
185struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
186 .name = "passthrough",
187 .init = __xen_pcibk_init_devices,
188 .free = __xen_pcibk_release_devices,
189 .find = __xen_pcibk_get_pcifront_dev,
190 .publish = __xen_pcibk_publish_pci_roots,
191 .release = __xen_pcibk_release_pci_dev,
192 .add = __xen_pcibk_add_pci_dev,
193 .get = __xen_pcibk_get_pci_dev,
194};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Backend - Provides restricted access to the real PCI bus topology
4 * to the frontend
5 *
6 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
7 */
8
9#include <linux/list.h>
10#include <linux/pci.h>
11#include <linux/mutex.h>
12#include "pciback.h"
13
14struct passthrough_dev_data {
15 /* Access to dev_list must be protected by lock */
16 struct list_head dev_list;
17 struct mutex lock;
18};
19
20static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
21 unsigned int domain,
22 unsigned int bus,
23 unsigned int devfn)
24{
25 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
26 struct pci_dev_entry *dev_entry;
27 struct pci_dev *dev = NULL;
28
29 mutex_lock(&dev_data->lock);
30
31 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
32 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
33 && bus == (unsigned int)dev_entry->dev->bus->number
34 && devfn == dev_entry->dev->devfn) {
35 dev = dev_entry->dev;
36 break;
37 }
38 }
39
40 mutex_unlock(&dev_data->lock);
41
42 return dev;
43}
44
45static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
46 struct pci_dev *dev,
47 int devid, publish_pci_dev_cb publish_cb)
48{
49 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
50 struct pci_dev_entry *dev_entry;
51 unsigned int domain, bus, devfn;
52 int err;
53
54 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
55 if (!dev_entry)
56 return -ENOMEM;
57 dev_entry->dev = dev;
58
59 mutex_lock(&dev_data->lock);
60 list_add_tail(&dev_entry->list, &dev_data->dev_list);
61 mutex_unlock(&dev_data->lock);
62
63 /* Publish this device. */
64 domain = (unsigned int)pci_domain_nr(dev->bus);
65 bus = (unsigned int)dev->bus->number;
66 devfn = dev->devfn;
67 err = publish_cb(pdev, domain, bus, devfn, devid);
68
69 return err;
70}
71
72static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
73 struct pci_dev *dev, bool lock)
74{
75 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
76 struct pci_dev_entry *dev_entry, *t;
77 struct pci_dev *found_dev = NULL;
78
79 mutex_lock(&dev_data->lock);
80
81 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
82 if (dev_entry->dev == dev) {
83 list_del(&dev_entry->list);
84 found_dev = dev_entry->dev;
85 kfree(dev_entry);
86 }
87 }
88
89 mutex_unlock(&dev_data->lock);
90
91 if (found_dev) {
92 if (lock)
93 device_lock(&found_dev->dev);
94 pcistub_put_pci_dev(found_dev);
95 if (lock)
96 device_unlock(&found_dev->dev);
97 }
98}
99
100static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
101{
102 struct passthrough_dev_data *dev_data;
103
104 dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
105 if (!dev_data)
106 return -ENOMEM;
107
108 mutex_init(&dev_data->lock);
109
110 INIT_LIST_HEAD(&dev_data->dev_list);
111
112 pdev->pci_dev_data = dev_data;
113
114 return 0;
115}
116
117static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
118 publish_pci_root_cb publish_root_cb)
119{
120 int err = 0;
121 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
122 struct pci_dev_entry *dev_entry, *e;
123 struct pci_dev *dev;
124 int found;
125 unsigned int domain, bus;
126
127 mutex_lock(&dev_data->lock);
128
129 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
130 /* Only publish this device as a root if none of its
131 * parent bridges are exported
132 */
133 found = 0;
134 dev = dev_entry->dev->bus->self;
135 for (; !found && dev != NULL; dev = dev->bus->self) {
136 list_for_each_entry(e, &dev_data->dev_list, list) {
137 if (dev == e->dev) {
138 found = 1;
139 break;
140 }
141 }
142 }
143
144 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
145 bus = (unsigned int)dev_entry->dev->bus->number;
146
147 if (!found) {
148 err = publish_root_cb(pdev, domain, bus);
149 if (err)
150 break;
151 }
152 }
153
154 mutex_unlock(&dev_data->lock);
155
156 return err;
157}
158
159static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
160{
161 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
162 struct pci_dev_entry *dev_entry, *t;
163
164 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
165 struct pci_dev *dev = dev_entry->dev;
166 list_del(&dev_entry->list);
167 device_lock(&dev->dev);
168 pcistub_put_pci_dev(dev);
169 device_unlock(&dev->dev);
170 kfree(dev_entry);
171 }
172
173 kfree(dev_data);
174 pdev->pci_dev_data = NULL;
175}
176
177static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
178 struct xen_pcibk_device *pdev,
179 unsigned int *domain, unsigned int *bus,
180 unsigned int *devfn)
181{
182 *domain = pci_domain_nr(pcidev->bus);
183 *bus = pcidev->bus->number;
184 *devfn = pcidev->devfn;
185 return 1;
186}
187
188const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
189 .name = "passthrough",
190 .init = __xen_pcibk_init_devices,
191 .free = __xen_pcibk_release_devices,
192 .find = __xen_pcibk_get_pcifront_dev,
193 .publish = __xen_pcibk_publish_pci_roots,
194 .release = __xen_pcibk_release_pci_dev,
195 .add = __xen_pcibk_add_pci_dev,
196 .get = __xen_pcibk_get_pci_dev,
197};