Loading...
1/*
2 * PCI Backend - Provides restricted access to the real PCI bus topology
3 * to the frontend
4 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8#include <linux/list.h>
9#include <linux/pci.h>
10#include <linux/spinlock.h>
11#include "pciback.h"
12
13struct passthrough_dev_data {
14 /* Access to dev_list must be protected by lock */
15 struct list_head dev_list;
16 spinlock_t lock;
17};
18
19static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
20 unsigned int domain,
21 unsigned int bus,
22 unsigned int devfn)
23{
24 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
25 struct pci_dev_entry *dev_entry;
26 struct pci_dev *dev = NULL;
27 unsigned long flags;
28
29 spin_lock_irqsave(&dev_data->lock, flags);
30
31 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
32 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
33 && bus == (unsigned int)dev_entry->dev->bus->number
34 && devfn == dev_entry->dev->devfn) {
35 dev = dev_entry->dev;
36 break;
37 }
38 }
39
40 spin_unlock_irqrestore(&dev_data->lock, flags);
41
42 return dev;
43}
44
45static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
46 struct pci_dev *dev,
47 int devid, publish_pci_dev_cb publish_cb)
48{
49 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
50 struct pci_dev_entry *dev_entry;
51 unsigned long flags;
52 unsigned int domain, bus, devfn;
53 int err;
54
55 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
56 if (!dev_entry)
57 return -ENOMEM;
58 dev_entry->dev = dev;
59
60 spin_lock_irqsave(&dev_data->lock, flags);
61 list_add_tail(&dev_entry->list, &dev_data->dev_list);
62 spin_unlock_irqrestore(&dev_data->lock, flags);
63
64 /* Publish this device. */
65 domain = (unsigned int)pci_domain_nr(dev->bus);
66 bus = (unsigned int)dev->bus->number;
67 devfn = dev->devfn;
68 err = publish_cb(pdev, domain, bus, devfn, devid);
69
70 return err;
71}
72
73static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
74 struct pci_dev *dev)
75{
76 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
77 struct pci_dev_entry *dev_entry, *t;
78 struct pci_dev *found_dev = NULL;
79 unsigned long flags;
80
81 spin_lock_irqsave(&dev_data->lock, flags);
82
83 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
84 if (dev_entry->dev == dev) {
85 list_del(&dev_entry->list);
86 found_dev = dev_entry->dev;
87 kfree(dev_entry);
88 }
89 }
90
91 spin_unlock_irqrestore(&dev_data->lock, flags);
92
93 if (found_dev)
94 pcistub_put_pci_dev(found_dev);
95}
96
97static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
98{
99 struct passthrough_dev_data *dev_data;
100
101 dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
102 if (!dev_data)
103 return -ENOMEM;
104
105 spin_lock_init(&dev_data->lock);
106
107 INIT_LIST_HEAD(&dev_data->dev_list);
108
109 pdev->pci_dev_data = dev_data;
110
111 return 0;
112}
113
114static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
115 publish_pci_root_cb publish_root_cb)
116{
117 int err = 0;
118 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
119 struct pci_dev_entry *dev_entry, *e, *tmp;
120 struct pci_dev *dev;
121 int found;
122 unsigned int domain, bus;
123
124 spin_lock(&dev_data->lock);
125
126 list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
127 /* Only publish this device as a root if none of its
128 * parent bridges are exported
129 */
130 found = 0;
131 dev = dev_entry->dev->bus->self;
132 for (; !found && dev != NULL; dev = dev->bus->self) {
133 list_for_each_entry(e, &dev_data->dev_list, list) {
134 if (dev == e->dev) {
135 found = 1;
136 break;
137 }
138 }
139 }
140
141 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
142 bus = (unsigned int)dev_entry->dev->bus->number;
143
144 if (!found) {
145 spin_unlock(&dev_data->lock);
146 err = publish_root_cb(pdev, domain, bus);
147 if (err)
148 break;
149 spin_lock(&dev_data->lock);
150 }
151 }
152
153 if (!err)
154 spin_unlock(&dev_data->lock);
155
156 return err;
157}
158
159static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
160{
161 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
162 struct pci_dev_entry *dev_entry, *t;
163
164 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
165 list_del(&dev_entry->list);
166 pcistub_put_pci_dev(dev_entry->dev);
167 kfree(dev_entry);
168 }
169
170 kfree(dev_data);
171 pdev->pci_dev_data = NULL;
172}
173
174static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
175 struct xen_pcibk_device *pdev,
176 unsigned int *domain, unsigned int *bus,
177 unsigned int *devfn)
178{
179 *domain = pci_domain_nr(pcidev->bus);
180 *bus = pcidev->bus->number;
181 *devfn = pcidev->devfn;
182 return 1;
183}
184
185struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
186 .name = "passthrough",
187 .init = __xen_pcibk_init_devices,
188 .free = __xen_pcibk_release_devices,
189 .find = __xen_pcibk_get_pcifront_dev,
190 .publish = __xen_pcibk_publish_pci_roots,
191 .release = __xen_pcibk_release_pci_dev,
192 .add = __xen_pcibk_add_pci_dev,
193 .get = __xen_pcibk_get_pci_dev,
194};
1/*
2 * PCI Backend - Provides restricted access to the real PCI bus topology
3 * to the frontend
4 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8#include <linux/list.h>
9#include <linux/pci.h>
10#include <linux/mutex.h>
11#include "pciback.h"
12
13struct passthrough_dev_data {
14 /* Access to dev_list must be protected by lock */
15 struct list_head dev_list;
16 struct mutex lock;
17};
18
19static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
20 unsigned int domain,
21 unsigned int bus,
22 unsigned int devfn)
23{
24 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
25 struct pci_dev_entry *dev_entry;
26 struct pci_dev *dev = NULL;
27
28 mutex_lock(&dev_data->lock);
29
30 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
31 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
32 && bus == (unsigned int)dev_entry->dev->bus->number
33 && devfn == dev_entry->dev->devfn) {
34 dev = dev_entry->dev;
35 break;
36 }
37 }
38
39 mutex_unlock(&dev_data->lock);
40
41 return dev;
42}
43
44static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
45 struct pci_dev *dev,
46 int devid, publish_pci_dev_cb publish_cb)
47{
48 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
49 struct pci_dev_entry *dev_entry;
50 unsigned int domain, bus, devfn;
51 int err;
52
53 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
54 if (!dev_entry)
55 return -ENOMEM;
56 dev_entry->dev = dev;
57
58 mutex_lock(&dev_data->lock);
59 list_add_tail(&dev_entry->list, &dev_data->dev_list);
60 mutex_unlock(&dev_data->lock);
61
62 /* Publish this device. */
63 domain = (unsigned int)pci_domain_nr(dev->bus);
64 bus = (unsigned int)dev->bus->number;
65 devfn = dev->devfn;
66 err = publish_cb(pdev, domain, bus, devfn, devid);
67
68 return err;
69}
70
71static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
72 struct pci_dev *dev, bool lock)
73{
74 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
75 struct pci_dev_entry *dev_entry, *t;
76 struct pci_dev *found_dev = NULL;
77
78 mutex_lock(&dev_data->lock);
79
80 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
81 if (dev_entry->dev == dev) {
82 list_del(&dev_entry->list);
83 found_dev = dev_entry->dev;
84 kfree(dev_entry);
85 }
86 }
87
88 mutex_unlock(&dev_data->lock);
89
90 if (found_dev) {
91 if (lock)
92 device_lock(&found_dev->dev);
93 pcistub_put_pci_dev(found_dev);
94 if (lock)
95 device_unlock(&found_dev->dev);
96 }
97}
98
99static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
100{
101 struct passthrough_dev_data *dev_data;
102
103 dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
104 if (!dev_data)
105 return -ENOMEM;
106
107 mutex_init(&dev_data->lock);
108
109 INIT_LIST_HEAD(&dev_data->dev_list);
110
111 pdev->pci_dev_data = dev_data;
112
113 return 0;
114}
115
116static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
117 publish_pci_root_cb publish_root_cb)
118{
119 int err = 0;
120 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
121 struct pci_dev_entry *dev_entry, *e;
122 struct pci_dev *dev;
123 int found;
124 unsigned int domain, bus;
125
126 mutex_lock(&dev_data->lock);
127
128 list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
129 /* Only publish this device as a root if none of its
130 * parent bridges are exported
131 */
132 found = 0;
133 dev = dev_entry->dev->bus->self;
134 for (; !found && dev != NULL; dev = dev->bus->self) {
135 list_for_each_entry(e, &dev_data->dev_list, list) {
136 if (dev == e->dev) {
137 found = 1;
138 break;
139 }
140 }
141 }
142
143 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
144 bus = (unsigned int)dev_entry->dev->bus->number;
145
146 if (!found) {
147 err = publish_root_cb(pdev, domain, bus);
148 if (err)
149 break;
150 }
151 }
152
153 mutex_unlock(&dev_data->lock);
154
155 return err;
156}
157
158static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
159{
160 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
161 struct pci_dev_entry *dev_entry, *t;
162
163 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
164 struct pci_dev *dev = dev_entry->dev;
165 list_del(&dev_entry->list);
166 device_lock(&dev->dev);
167 pcistub_put_pci_dev(dev);
168 device_unlock(&dev->dev);
169 kfree(dev_entry);
170 }
171
172 kfree(dev_data);
173 pdev->pci_dev_data = NULL;
174}
175
176static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
177 struct xen_pcibk_device *pdev,
178 unsigned int *domain, unsigned int *bus,
179 unsigned int *devfn)
180{
181 *domain = pci_domain_nr(pcidev->bus);
182 *bus = pcidev->bus->number;
183 *devfn = pcidev->devfn;
184 return 1;
185}
186
187const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
188 .name = "passthrough",
189 .init = __xen_pcibk_init_devices,
190 .free = __xen_pcibk_release_devices,
191 .find = __xen_pcibk_get_pcifront_dev,
192 .publish = __xen_pcibk_publish_pci_roots,
193 .release = __xen_pcibk_release_pci_dev,
194 .add = __xen_pcibk_add_pci_dev,
195 .get = __xen_pcibk_get_pci_dev,
196};