Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * Author: Weidong Han <weidong.han@intel.com>
6 */
7
8#include <linux/pci.h>
9#include <linux/acpi.h>
10#include <linux/pci-acpi.h>
11#include <xen/pci.h>
12#include <xen/xen.h>
13#include <xen/interface/physdev.h>
14#include <xen/interface/xen.h>
15
16#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h>
18#include "../pci/pci.h"
19#ifdef CONFIG_PCI_MMCONFIG
20#include <asm/pci_x86.h>
21
22static int xen_mcfg_late(void);
23#endif
24
25static bool __read_mostly pci_seg_supported = true;
26
27static int xen_add_device(struct device *dev)
28{
29 int r;
30 struct pci_dev *pci_dev = to_pci_dev(dev);
31#ifdef CONFIG_PCI_IOV
32 struct pci_dev *physfn = pci_dev->physfn;
33#endif
34#ifdef CONFIG_PCI_MMCONFIG
35 static bool pci_mcfg_reserved = false;
36 /*
37 * Reserve MCFG areas in Xen on first invocation due to this being
38 * potentially called from inside of acpi_init immediately after
39 * MCFG table has been finally parsed.
40 */
41 if (!pci_mcfg_reserved) {
42 xen_mcfg_late();
43 pci_mcfg_reserved = true;
44 }
45#endif
46 if (pci_seg_supported) {
47 DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
48
49 add->seg = pci_domain_nr(pci_dev->bus);
50 add->bus = pci_dev->bus->number;
51 add->devfn = pci_dev->devfn;
52
53#ifdef CONFIG_ACPI
54 acpi_handle handle;
55#endif
56
57#ifdef CONFIG_PCI_IOV
58 if (pci_dev->is_virtfn) {
59 add->flags = XEN_PCI_DEV_VIRTFN;
60 add->physfn.bus = physfn->bus->number;
61 add->physfn.devfn = physfn->devfn;
62 } else
63#endif
64 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
65 add->flags = XEN_PCI_DEV_EXTFN;
66
67#ifdef CONFIG_ACPI
68 handle = ACPI_HANDLE(&pci_dev->dev);
69#ifdef CONFIG_PCI_IOV
70 if (!handle && pci_dev->is_virtfn)
71 handle = ACPI_HANDLE(physfn->bus->bridge);
72#endif
73 if (!handle) {
74 /*
75 * This device was not listed in the ACPI name space at
76 * all. Try to get acpi handle of parent pci bus.
77 */
78 struct pci_bus *pbus;
79 for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
80 handle = acpi_pci_get_bridge_handle(pbus);
81 if (handle)
82 break;
83 }
84 }
85 if (handle) {
86 acpi_status status;
87
88 do {
89 unsigned long long pxm;
90
91 status = acpi_evaluate_integer(handle, "_PXM",
92 NULL, &pxm);
93 if (ACPI_SUCCESS(status)) {
94 add->optarr[0] = pxm;
95 add->flags |= XEN_PCI_DEV_PXM;
96 break;
97 }
98 status = acpi_get_parent(handle, &handle);
99 } while (ACPI_SUCCESS(status));
100 }
101#endif /* CONFIG_ACPI */
102
103 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
104 if (r != -ENOSYS)
105 return r;
106 pci_seg_supported = false;
107 }
108
109 if (pci_domain_nr(pci_dev->bus))
110 r = -ENOSYS;
111#ifdef CONFIG_PCI_IOV
112 else if (pci_dev->is_virtfn) {
113 struct physdev_manage_pci_ext manage_pci_ext = {
114 .bus = pci_dev->bus->number,
115 .devfn = pci_dev->devfn,
116 .is_virtfn = 1,
117 .physfn.bus = physfn->bus->number,
118 .physfn.devfn = physfn->devfn,
119 };
120
121 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
122 &manage_pci_ext);
123 }
124#endif
125 else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
126 struct physdev_manage_pci_ext manage_pci_ext = {
127 .bus = pci_dev->bus->number,
128 .devfn = pci_dev->devfn,
129 .is_extfn = 1,
130 };
131
132 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
133 &manage_pci_ext);
134 } else {
135 struct physdev_manage_pci manage_pci = {
136 .bus = pci_dev->bus->number,
137 .devfn = pci_dev->devfn,
138 };
139
140 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
141 &manage_pci);
142 }
143
144 return r;
145}
146
147static int xen_remove_device(struct device *dev)
148{
149 int r;
150 struct pci_dev *pci_dev = to_pci_dev(dev);
151
152 if (pci_seg_supported) {
153 struct physdev_pci_device device = {
154 .seg = pci_domain_nr(pci_dev->bus),
155 .bus = pci_dev->bus->number,
156 .devfn = pci_dev->devfn
157 };
158
159 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
160 &device);
161 } else if (pci_domain_nr(pci_dev->bus))
162 r = -ENOSYS;
163 else {
164 struct physdev_manage_pci manage_pci = {
165 .bus = pci_dev->bus->number,
166 .devfn = pci_dev->devfn
167 };
168
169 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
170 &manage_pci);
171 }
172
173 return r;
174}
175
176int xen_reset_device(const struct pci_dev *dev)
177{
178 struct pci_device_reset device = {
179 .dev.seg = pci_domain_nr(dev->bus),
180 .dev.bus = dev->bus->number,
181 .dev.devfn = dev->devfn,
182 .flags = PCI_DEVICE_RESET_FLR,
183 };
184
185 return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
186}
187EXPORT_SYMBOL_GPL(xen_reset_device);
188
189static int xen_pci_notifier(struct notifier_block *nb,
190 unsigned long action, void *data)
191{
192 struct device *dev = data;
193 int r = 0;
194
195 switch (action) {
196 case BUS_NOTIFY_ADD_DEVICE:
197 r = xen_add_device(dev);
198 break;
199 case BUS_NOTIFY_DEL_DEVICE:
200 r = xen_remove_device(dev);
201 break;
202 default:
203 return NOTIFY_DONE;
204 }
205 if (r)
206 dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
207 action == BUS_NOTIFY_ADD_DEVICE ? "add" :
208 (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
209 return NOTIFY_OK;
210}
211
212static struct notifier_block device_nb = {
213 .notifier_call = xen_pci_notifier,
214};
215
216static int __init register_xen_pci_notifier(void)
217{
218 if (!xen_initial_domain())
219 return 0;
220
221 return bus_register_notifier(&pci_bus_type, &device_nb);
222}
223
224arch_initcall(register_xen_pci_notifier);
225
226#ifdef CONFIG_PCI_MMCONFIG
227static int xen_mcfg_late(void)
228{
229 struct pci_mmcfg_region *cfg;
230 int rc;
231
232 if (!xen_initial_domain())
233 return 0;
234
235 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
236 return 0;
237
238 if (list_empty(&pci_mmcfg_list))
239 return 0;
240
241 /* Check whether they are in the right area. */
242 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
243 struct physdev_pci_mmcfg_reserved r;
244
245 r.address = cfg->address;
246 r.segment = cfg->segment;
247 r.start_bus = cfg->start_bus;
248 r.end_bus = cfg->end_bus;
249 r.flags = XEN_PCI_MMCFG_RESERVED;
250
251 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
252 switch (rc) {
253 case 0:
254 case -ENOSYS:
255 continue;
256
257 default:
258 pr_warn("Failed to report MMCONFIG reservation"
259 " state for %s to hypervisor"
260 " (%d)\n",
261 cfg->name, rc);
262 }
263 }
264 return 0;
265}
266#endif
267
268#ifdef CONFIG_XEN_DOM0
269struct xen_device_domain_owner {
270 domid_t domain;
271 struct pci_dev *dev;
272 struct list_head list;
273};
274
275static DEFINE_SPINLOCK(dev_domain_list_spinlock);
276static LIST_HEAD(dev_domain_list);
277
278static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
279{
280 struct xen_device_domain_owner *owner;
281
282 list_for_each_entry(owner, &dev_domain_list, list) {
283 if (owner->dev == dev)
284 return owner;
285 }
286 return NULL;
287}
288
289int xen_find_device_domain_owner(struct pci_dev *dev)
290{
291 struct xen_device_domain_owner *owner;
292 int domain = -ENODEV;
293
294 spin_lock(&dev_domain_list_spinlock);
295 owner = find_device(dev);
296 if (owner)
297 domain = owner->domain;
298 spin_unlock(&dev_domain_list_spinlock);
299 return domain;
300}
301EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
302
303int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
304{
305 struct xen_device_domain_owner *owner;
306
307 owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
308 if (!owner)
309 return -ENODEV;
310
311 spin_lock(&dev_domain_list_spinlock);
312 if (find_device(dev)) {
313 spin_unlock(&dev_domain_list_spinlock);
314 kfree(owner);
315 return -EEXIST;
316 }
317 owner->domain = domain;
318 owner->dev = dev;
319 list_add_tail(&owner->list, &dev_domain_list);
320 spin_unlock(&dev_domain_list_spinlock);
321 return 0;
322}
323EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
324
325int xen_unregister_device_domain_owner(struct pci_dev *dev)
326{
327 struct xen_device_domain_owner *owner;
328
329 spin_lock(&dev_domain_list_spinlock);
330 owner = find_device(dev);
331 if (!owner) {
332 spin_unlock(&dev_domain_list_spinlock);
333 return -ENODEV;
334 }
335 list_del(&owner->list);
336 spin_unlock(&dev_domain_list_spinlock);
337 kfree(owner);
338 return 0;
339}
340EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
341#endif
1/*
2 * Copyright (c) 2009, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Weidong Han <weidong.han@intel.com>
18 */
19
20#include <linux/pci.h>
21#include <xen/xen.h>
22#include <xen/interface/physdev.h>
23#include <xen/interface/xen.h>
24
25#include <asm/xen/hypervisor.h>
26#include <asm/xen/hypercall.h>
27#include "../pci/pci.h"
28
29static int xen_add_device(struct device *dev)
30{
31 int r;
32 struct pci_dev *pci_dev = to_pci_dev(dev);
33
34#ifdef CONFIG_PCI_IOV
35 if (pci_dev->is_virtfn) {
36 struct physdev_manage_pci_ext manage_pci_ext = {
37 .bus = pci_dev->bus->number,
38 .devfn = pci_dev->devfn,
39 .is_virtfn = 1,
40 .physfn.bus = pci_dev->physfn->bus->number,
41 .physfn.devfn = pci_dev->physfn->devfn,
42 };
43
44 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
45 &manage_pci_ext);
46 } else
47#endif
48 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
49 struct physdev_manage_pci_ext manage_pci_ext = {
50 .bus = pci_dev->bus->number,
51 .devfn = pci_dev->devfn,
52 .is_extfn = 1,
53 };
54
55 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
56 &manage_pci_ext);
57 } else {
58 struct physdev_manage_pci manage_pci = {
59 .bus = pci_dev->bus->number,
60 .devfn = pci_dev->devfn,
61 };
62
63 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
64 &manage_pci);
65 }
66
67 return r;
68}
69
70static int xen_remove_device(struct device *dev)
71{
72 int r;
73 struct pci_dev *pci_dev = to_pci_dev(dev);
74 struct physdev_manage_pci manage_pci;
75
76 manage_pci.bus = pci_dev->bus->number;
77 manage_pci.devfn = pci_dev->devfn;
78
79 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
80 &manage_pci);
81
82 return r;
83}
84
85static int xen_pci_notifier(struct notifier_block *nb,
86 unsigned long action, void *data)
87{
88 struct device *dev = data;
89 int r = 0;
90
91 switch (action) {
92 case BUS_NOTIFY_ADD_DEVICE:
93 r = xen_add_device(dev);
94 break;
95 case BUS_NOTIFY_DEL_DEVICE:
96 r = xen_remove_device(dev);
97 break;
98 default:
99 break;
100 }
101
102 return r;
103}
104
105struct notifier_block device_nb = {
106 .notifier_call = xen_pci_notifier,
107};
108
109static int __init register_xen_pci_notifier(void)
110{
111 if (!xen_initial_domain())
112 return 0;
113
114 return bus_register_notifier(&pci_bus_type, &device_nb);
115}
116
117arch_initcall(register_xen_pci_notifier);