Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * Author: Weidong Han <weidong.han@intel.com>
6 */
7
8#include <linux/pci.h>
9#include <linux/acpi.h>
10#include <linux/pci-acpi.h>
11#include <xen/xen.h>
12#include <xen/interface/physdev.h>
13#include <xen/interface/xen.h>
14
15#include <asm/xen/hypervisor.h>
16#include <asm/xen/hypercall.h>
17#include "../pci/pci.h"
18#ifdef CONFIG_PCI_MMCONFIG
19#include <asm/pci_x86.h>
20
21static int xen_mcfg_late(void);
22#endif
23
24static bool __read_mostly pci_seg_supported = true;
25
26static int xen_add_device(struct device *dev)
27{
28 int r;
29 struct pci_dev *pci_dev = to_pci_dev(dev);
30#ifdef CONFIG_PCI_IOV
31 struct pci_dev *physfn = pci_dev->physfn;
32#endif
33#ifdef CONFIG_PCI_MMCONFIG
34 static bool pci_mcfg_reserved = false;
35 /*
36 * Reserve MCFG areas in Xen on first invocation due to this being
37 * potentially called from inside of acpi_init immediately after
38 * MCFG table has been finally parsed.
39 */
40 if (!pci_mcfg_reserved) {
41 xen_mcfg_late();
42 pci_mcfg_reserved = true;
43 }
44#endif
45 if (pci_seg_supported) {
46 struct {
47 struct physdev_pci_device_add add;
48 uint32_t pxm;
49 } add_ext = {
50 .add.seg = pci_domain_nr(pci_dev->bus),
51 .add.bus = pci_dev->bus->number,
52 .add.devfn = pci_dev->devfn
53 };
54 struct physdev_pci_device_add *add = &add_ext.add;
55
56#ifdef CONFIG_ACPI
57 acpi_handle handle;
58#endif
59
60#ifdef CONFIG_PCI_IOV
61 if (pci_dev->is_virtfn) {
62 add->flags = XEN_PCI_DEV_VIRTFN;
63 add->physfn.bus = physfn->bus->number;
64 add->physfn.devfn = physfn->devfn;
65 } else
66#endif
67 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
68 add->flags = XEN_PCI_DEV_EXTFN;
69
70#ifdef CONFIG_ACPI
71 handle = ACPI_HANDLE(&pci_dev->dev);
72#ifdef CONFIG_PCI_IOV
73 if (!handle && pci_dev->is_virtfn)
74 handle = ACPI_HANDLE(physfn->bus->bridge);
75#endif
76 if (!handle) {
77 /*
78 * This device was not listed in the ACPI name space at
79 * all. Try to get acpi handle of parent pci bus.
80 */
81 struct pci_bus *pbus;
82 for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
83 handle = acpi_pci_get_bridge_handle(pbus);
84 if (handle)
85 break;
86 }
87 }
88 if (handle) {
89 acpi_status status;
90
91 do {
92 unsigned long long pxm;
93
94 status = acpi_evaluate_integer(handle, "_PXM",
95 NULL, &pxm);
96 if (ACPI_SUCCESS(status)) {
97 add->optarr[0] = pxm;
98 add->flags |= XEN_PCI_DEV_PXM;
99 break;
100 }
101 status = acpi_get_parent(handle, &handle);
102 } while (ACPI_SUCCESS(status));
103 }
104#endif /* CONFIG_ACPI */
105
106 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
107 if (r != -ENOSYS)
108 return r;
109 pci_seg_supported = false;
110 }
111
112 if (pci_domain_nr(pci_dev->bus))
113 r = -ENOSYS;
114#ifdef CONFIG_PCI_IOV
115 else if (pci_dev->is_virtfn) {
116 struct physdev_manage_pci_ext manage_pci_ext = {
117 .bus = pci_dev->bus->number,
118 .devfn = pci_dev->devfn,
119 .is_virtfn = 1,
120 .physfn.bus = physfn->bus->number,
121 .physfn.devfn = physfn->devfn,
122 };
123
124 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
125 &manage_pci_ext);
126 }
127#endif
128 else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
129 struct physdev_manage_pci_ext manage_pci_ext = {
130 .bus = pci_dev->bus->number,
131 .devfn = pci_dev->devfn,
132 .is_extfn = 1,
133 };
134
135 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
136 &manage_pci_ext);
137 } else {
138 struct physdev_manage_pci manage_pci = {
139 .bus = pci_dev->bus->number,
140 .devfn = pci_dev->devfn,
141 };
142
143 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
144 &manage_pci);
145 }
146
147 return r;
148}
149
150static int xen_remove_device(struct device *dev)
151{
152 int r;
153 struct pci_dev *pci_dev = to_pci_dev(dev);
154
155 if (pci_seg_supported) {
156 struct physdev_pci_device device = {
157 .seg = pci_domain_nr(pci_dev->bus),
158 .bus = pci_dev->bus->number,
159 .devfn = pci_dev->devfn
160 };
161
162 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
163 &device);
164 } else if (pci_domain_nr(pci_dev->bus))
165 r = -ENOSYS;
166 else {
167 struct physdev_manage_pci manage_pci = {
168 .bus = pci_dev->bus->number,
169 .devfn = pci_dev->devfn
170 };
171
172 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
173 &manage_pci);
174 }
175
176 return r;
177}
178
179static int xen_pci_notifier(struct notifier_block *nb,
180 unsigned long action, void *data)
181{
182 struct device *dev = data;
183 int r = 0;
184
185 switch (action) {
186 case BUS_NOTIFY_ADD_DEVICE:
187 r = xen_add_device(dev);
188 break;
189 case BUS_NOTIFY_DEL_DEVICE:
190 r = xen_remove_device(dev);
191 break;
192 default:
193 return NOTIFY_DONE;
194 }
195 if (r)
196 dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
197 action == BUS_NOTIFY_ADD_DEVICE ? "add" :
198 (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
199 return NOTIFY_OK;
200}
201
202static struct notifier_block device_nb = {
203 .notifier_call = xen_pci_notifier,
204};
205
206static int __init register_xen_pci_notifier(void)
207{
208 if (!xen_initial_domain())
209 return 0;
210
211 return bus_register_notifier(&pci_bus_type, &device_nb);
212}
213
214arch_initcall(register_xen_pci_notifier);
215
216#ifdef CONFIG_PCI_MMCONFIG
217static int xen_mcfg_late(void)
218{
219 struct pci_mmcfg_region *cfg;
220 int rc;
221
222 if (!xen_initial_domain())
223 return 0;
224
225 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
226 return 0;
227
228 if (list_empty(&pci_mmcfg_list))
229 return 0;
230
231 /* Check whether they are in the right area. */
232 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
233 struct physdev_pci_mmcfg_reserved r;
234
235 r.address = cfg->address;
236 r.segment = cfg->segment;
237 r.start_bus = cfg->start_bus;
238 r.end_bus = cfg->end_bus;
239 r.flags = XEN_PCI_MMCFG_RESERVED;
240
241 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
242 switch (rc) {
243 case 0:
244 case -ENOSYS:
245 continue;
246
247 default:
248 pr_warn("Failed to report MMCONFIG reservation"
249 " state for %s to hypervisor"
250 " (%d)\n",
251 cfg->name, rc);
252 }
253 }
254 return 0;
255}
256#endif
1/*
2 * Copyright (c) 2009, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Weidong Han <weidong.han@intel.com>
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <xen/xen.h>
23#include <xen/interface/physdev.h>
24#include <xen/interface/xen.h>
25
26#include <asm/xen/hypervisor.h>
27#include <asm/xen/hypercall.h>
28#include "../pci/pci.h"
29#ifdef CONFIG_PCI_MMCONFIG
30#include <asm/pci_x86.h>
31#endif
32
33static bool __read_mostly pci_seg_supported = true;
34
35static int xen_add_device(struct device *dev)
36{
37 int r;
38 struct pci_dev *pci_dev = to_pci_dev(dev);
39#ifdef CONFIG_PCI_IOV
40 struct pci_dev *physfn = pci_dev->physfn;
41#endif
42
43 if (pci_seg_supported) {
44 struct physdev_pci_device_add add = {
45 .seg = pci_domain_nr(pci_dev->bus),
46 .bus = pci_dev->bus->number,
47 .devfn = pci_dev->devfn
48 };
49#ifdef CONFIG_ACPI
50 acpi_handle handle;
51#endif
52
53#ifdef CONFIG_PCI_IOV
54 if (pci_dev->is_virtfn) {
55 add.flags = XEN_PCI_DEV_VIRTFN;
56 add.physfn.bus = physfn->bus->number;
57 add.physfn.devfn = physfn->devfn;
58 } else
59#endif
60 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
61 add.flags = XEN_PCI_DEV_EXTFN;
62
63#ifdef CONFIG_ACPI
64 handle = ACPI_HANDLE(&pci_dev->dev);
65 if (!handle && pci_dev->bus->bridge)
66 handle = ACPI_HANDLE(pci_dev->bus->bridge);
67#ifdef CONFIG_PCI_IOV
68 if (!handle && pci_dev->is_virtfn)
69 handle = ACPI_HANDLE(physfn->bus->bridge);
70#endif
71 if (handle) {
72 acpi_status status;
73
74 do {
75 unsigned long long pxm;
76
77 status = acpi_evaluate_integer(handle, "_PXM",
78 NULL, &pxm);
79 if (ACPI_SUCCESS(status)) {
80 add.optarr[0] = pxm;
81 add.flags |= XEN_PCI_DEV_PXM;
82 break;
83 }
84 status = acpi_get_parent(handle, &handle);
85 } while (ACPI_SUCCESS(status));
86 }
87#endif /* CONFIG_ACPI */
88
89 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add);
90 if (r != -ENOSYS)
91 return r;
92 pci_seg_supported = false;
93 }
94
95 if (pci_domain_nr(pci_dev->bus))
96 r = -ENOSYS;
97#ifdef CONFIG_PCI_IOV
98 else if (pci_dev->is_virtfn) {
99 struct physdev_manage_pci_ext manage_pci_ext = {
100 .bus = pci_dev->bus->number,
101 .devfn = pci_dev->devfn,
102 .is_virtfn = 1,
103 .physfn.bus = physfn->bus->number,
104 .physfn.devfn = physfn->devfn,
105 };
106
107 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
108 &manage_pci_ext);
109 }
110#endif
111 else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
112 struct physdev_manage_pci_ext manage_pci_ext = {
113 .bus = pci_dev->bus->number,
114 .devfn = pci_dev->devfn,
115 .is_extfn = 1,
116 };
117
118 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
119 &manage_pci_ext);
120 } else {
121 struct physdev_manage_pci manage_pci = {
122 .bus = pci_dev->bus->number,
123 .devfn = pci_dev->devfn,
124 };
125
126 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
127 &manage_pci);
128 }
129
130 return r;
131}
132
133static int xen_remove_device(struct device *dev)
134{
135 int r;
136 struct pci_dev *pci_dev = to_pci_dev(dev);
137
138 if (pci_seg_supported) {
139 struct physdev_pci_device device = {
140 .seg = pci_domain_nr(pci_dev->bus),
141 .bus = pci_dev->bus->number,
142 .devfn = pci_dev->devfn
143 };
144
145 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
146 &device);
147 } else if (pci_domain_nr(pci_dev->bus))
148 r = -ENOSYS;
149 else {
150 struct physdev_manage_pci manage_pci = {
151 .bus = pci_dev->bus->number,
152 .devfn = pci_dev->devfn
153 };
154
155 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
156 &manage_pci);
157 }
158
159 return r;
160}
161
162static int xen_pci_notifier(struct notifier_block *nb,
163 unsigned long action, void *data)
164{
165 struct device *dev = data;
166 int r = 0;
167
168 switch (action) {
169 case BUS_NOTIFY_ADD_DEVICE:
170 r = xen_add_device(dev);
171 break;
172 case BUS_NOTIFY_DEL_DEVICE:
173 r = xen_remove_device(dev);
174 break;
175 default:
176 return NOTIFY_DONE;
177 }
178 if (r)
179 dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
180 action == BUS_NOTIFY_ADD_DEVICE ? "add" :
181 (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
182 return NOTIFY_OK;
183}
184
185static struct notifier_block device_nb = {
186 .notifier_call = xen_pci_notifier,
187};
188
189static int __init register_xen_pci_notifier(void)
190{
191 if (!xen_initial_domain())
192 return 0;
193
194 return bus_register_notifier(&pci_bus_type, &device_nb);
195}
196
197arch_initcall(register_xen_pci_notifier);
198
199#ifdef CONFIG_PCI_MMCONFIG
200static int __init xen_mcfg_late(void)
201{
202 struct pci_mmcfg_region *cfg;
203 int rc;
204
205 if (!xen_initial_domain())
206 return 0;
207
208 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
209 return 0;
210
211 if (list_empty(&pci_mmcfg_list))
212 return 0;
213
214 /* Check whether they are in the right area. */
215 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
216 struct physdev_pci_mmcfg_reserved r;
217
218 r.address = cfg->address;
219 r.segment = cfg->segment;
220 r.start_bus = cfg->start_bus;
221 r.end_bus = cfg->end_bus;
222 r.flags = XEN_PCI_MMCFG_RESERVED;
223
224 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
225 switch (rc) {
226 case 0:
227 case -ENOSYS:
228 continue;
229
230 default:
231 pr_warn("Failed to report MMCONFIG reservation"
232 " state for %s to hypervisor"
233 " (%d)\n",
234 cfg->name, rc);
235 }
236 }
237 return 0;
238}
239/*
240 * Needs to be done after acpi_init which are subsys_initcall.
241 */
242subsys_initcall_sync(xen_mcfg_late);
243#endif