Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Intel Corporation.
  4 *
  5 * Author: Weidong Han <weidong.han@intel.com>
  6 */
  7
  8#include <linux/pci.h>
  9#include <linux/acpi.h>
 10#include <linux/pci-acpi.h>
 11#include <xen/pci.h>
 12#include <xen/xen.h>
 13#include <xen/interface/physdev.h>
 14#include <xen/interface/xen.h>
 15
 16#include <asm/xen/hypervisor.h>
 17#include <asm/xen/hypercall.h>
 18#include "../pci/pci.h"
 19#ifdef CONFIG_PCI_MMCONFIG
 20#include <asm/pci_x86.h>
 21
 22static int xen_mcfg_late(void);
 23#endif
 24
 25static bool __read_mostly pci_seg_supported = true;
 26
 27static int xen_add_device(struct device *dev)
 28{
 29	int r;
 30	struct pci_dev *pci_dev = to_pci_dev(dev);
 31#ifdef CONFIG_PCI_IOV
 32	struct pci_dev *physfn = pci_dev->physfn;
 33#endif
 34#ifdef CONFIG_PCI_MMCONFIG
 35	static bool pci_mcfg_reserved = false;
 36	/*
 37	 * Reserve MCFG areas in Xen on first invocation due to this being
 38	 * potentially called from inside of acpi_init immediately after
 39	 * MCFG table has been finally parsed.
 40	 */
 41	if (!pci_mcfg_reserved) {
 42		xen_mcfg_late();
 43		pci_mcfg_reserved = true;
 44	}
 45#endif
 46	if (pci_seg_supported) {
 47		DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
 48
 49		add->seg = pci_domain_nr(pci_dev->bus);
 50		add->bus = pci_dev->bus->number;
 51		add->devfn = pci_dev->devfn;
 
 
 
 
 52
 53#ifdef CONFIG_ACPI
 54		acpi_handle handle;
 55#endif
 56
 57#ifdef CONFIG_PCI_IOV
 58		if (pci_dev->is_virtfn) {
 59			add->flags = XEN_PCI_DEV_VIRTFN;
 60			add->physfn.bus = physfn->bus->number;
 61			add->physfn.devfn = physfn->devfn;
 62		} else
 63#endif
 64		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
 65			add->flags = XEN_PCI_DEV_EXTFN;
 66
 67#ifdef CONFIG_ACPI
 68		handle = ACPI_HANDLE(&pci_dev->dev);
 69#ifdef CONFIG_PCI_IOV
 70		if (!handle && pci_dev->is_virtfn)
 71			handle = ACPI_HANDLE(physfn->bus->bridge);
 72#endif
 73		if (!handle) {
 74			/*
 75			 * This device was not listed in the ACPI name space at
 76			 * all. Try to get acpi handle of parent pci bus.
 77			 */
 78			struct pci_bus *pbus;
 79			for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
 80				handle = acpi_pci_get_bridge_handle(pbus);
 81				if (handle)
 82					break;
 83			}
 84		}
 85		if (handle) {
 86			acpi_status status;
 87
 88			do {
 89				unsigned long long pxm;
 90
 91				status = acpi_evaluate_integer(handle, "_PXM",
 92							       NULL, &pxm);
 93				if (ACPI_SUCCESS(status)) {
 94					add->optarr[0] = pxm;
 95					add->flags |= XEN_PCI_DEV_PXM;
 96					break;
 97				}
 98				status = acpi_get_parent(handle, &handle);
 99			} while (ACPI_SUCCESS(status));
100		}
101#endif /* CONFIG_ACPI */
102
103		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
104		if (r != -ENOSYS)
105			return r;
106		pci_seg_supported = false;
107	}
108
109	if (pci_domain_nr(pci_dev->bus))
110		r = -ENOSYS;
111#ifdef CONFIG_PCI_IOV
112	else if (pci_dev->is_virtfn) {
113		struct physdev_manage_pci_ext manage_pci_ext = {
114			.bus		= pci_dev->bus->number,
115			.devfn		= pci_dev->devfn,
116			.is_virtfn 	= 1,
117			.physfn.bus	= physfn->bus->number,
118			.physfn.devfn	= physfn->devfn,
119		};
120
121		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
122			&manage_pci_ext);
123	}
124#endif
125	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
126		struct physdev_manage_pci_ext manage_pci_ext = {
127			.bus		= pci_dev->bus->number,
128			.devfn		= pci_dev->devfn,
129			.is_extfn	= 1,
130		};
131
132		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
133			&manage_pci_ext);
134	} else {
135		struct physdev_manage_pci manage_pci = {
136			.bus	= pci_dev->bus->number,
137			.devfn	= pci_dev->devfn,
138		};
139
140		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
141			&manage_pci);
142	}
143
144	return r;
145}
146
147static int xen_remove_device(struct device *dev)
148{
149	int r;
150	struct pci_dev *pci_dev = to_pci_dev(dev);
151
152	if (pci_seg_supported) {
153		struct physdev_pci_device device = {
154			.seg = pci_domain_nr(pci_dev->bus),
155			.bus = pci_dev->bus->number,
156			.devfn = pci_dev->devfn
157		};
158
159		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
160					  &device);
161	} else if (pci_domain_nr(pci_dev->bus))
162		r = -ENOSYS;
163	else {
164		struct physdev_manage_pci manage_pci = {
165			.bus = pci_dev->bus->number,
166			.devfn = pci_dev->devfn
167		};
168
169		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
170					  &manage_pci);
171	}
172
173	return r;
174}
175
176int xen_reset_device(const struct pci_dev *dev)
177{
178	struct pci_device_reset device = {
179		.dev.seg = pci_domain_nr(dev->bus),
180		.dev.bus = dev->bus->number,
181		.dev.devfn = dev->devfn,
182		.flags = PCI_DEVICE_RESET_FLR,
183	};
184
185	return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
186}
187EXPORT_SYMBOL_GPL(xen_reset_device);
188
189static int xen_pci_notifier(struct notifier_block *nb,
190			    unsigned long action, void *data)
191{
192	struct device *dev = data;
193	int r = 0;
194
195	switch (action) {
196	case BUS_NOTIFY_ADD_DEVICE:
197		r = xen_add_device(dev);
198		break;
199	case BUS_NOTIFY_DEL_DEVICE:
200		r = xen_remove_device(dev);
201		break;
202	default:
203		return NOTIFY_DONE;
204	}
205	if (r)
206		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
207			action == BUS_NOTIFY_ADD_DEVICE ? "add" :
208			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
209	return NOTIFY_OK;
210}
211
212static struct notifier_block device_nb = {
213	.notifier_call = xen_pci_notifier,
214};
215
216static int __init register_xen_pci_notifier(void)
217{
218	if (!xen_initial_domain())
219		return 0;
220
221	return bus_register_notifier(&pci_bus_type, &device_nb);
222}
223
224arch_initcall(register_xen_pci_notifier);
225
226#ifdef CONFIG_PCI_MMCONFIG
227static int xen_mcfg_late(void)
228{
229	struct pci_mmcfg_region *cfg;
230	int rc;
231
232	if (!xen_initial_domain())
233		return 0;
234
235	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
236		return 0;
237
238	if (list_empty(&pci_mmcfg_list))
239		return 0;
240
241	/* Check whether they are in the right area. */
242	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
243		struct physdev_pci_mmcfg_reserved r;
244
245		r.address = cfg->address;
246		r.segment = cfg->segment;
247		r.start_bus = cfg->start_bus;
248		r.end_bus = cfg->end_bus;
249		r.flags = XEN_PCI_MMCFG_RESERVED;
250
251		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
252		switch (rc) {
253		case 0:
254		case -ENOSYS:
255			continue;
256
257		default:
258			pr_warn("Failed to report MMCONFIG reservation"
259				" state for %s to hypervisor"
260				" (%d)\n",
261				cfg->name, rc);
262		}
263	}
264	return 0;
265}
266#endif
267
268#ifdef CONFIG_XEN_DOM0
269struct xen_device_domain_owner {
270	domid_t domain;
271	struct pci_dev *dev;
272	struct list_head list;
273};
274
275static DEFINE_SPINLOCK(dev_domain_list_spinlock);
276static LIST_HEAD(dev_domain_list);
277
278static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
279{
280	struct xen_device_domain_owner *owner;
281
282	list_for_each_entry(owner, &dev_domain_list, list) {
283		if (owner->dev == dev)
284			return owner;
285	}
286	return NULL;
287}
288
289int xen_find_device_domain_owner(struct pci_dev *dev)
290{
291	struct xen_device_domain_owner *owner;
292	int domain = -ENODEV;
293
294	spin_lock(&dev_domain_list_spinlock);
295	owner = find_device(dev);
296	if (owner)
297		domain = owner->domain;
298	spin_unlock(&dev_domain_list_spinlock);
299	return domain;
300}
301EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
302
303int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
304{
305	struct xen_device_domain_owner *owner;
306
307	owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
308	if (!owner)
309		return -ENODEV;
310
311	spin_lock(&dev_domain_list_spinlock);
312	if (find_device(dev)) {
313		spin_unlock(&dev_domain_list_spinlock);
314		kfree(owner);
315		return -EEXIST;
316	}
317	owner->domain = domain;
318	owner->dev = dev;
319	list_add_tail(&owner->list, &dev_domain_list);
320	spin_unlock(&dev_domain_list_spinlock);
321	return 0;
322}
323EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
324
325int xen_unregister_device_domain_owner(struct pci_dev *dev)
326{
327	struct xen_device_domain_owner *owner;
328
329	spin_lock(&dev_domain_list_spinlock);
330	owner = find_device(dev);
331	if (!owner) {
332		spin_unlock(&dev_domain_list_spinlock);
333		return -ENODEV;
334	}
335	list_del(&owner->list);
336	spin_unlock(&dev_domain_list_spinlock);
337	kfree(owner);
338	return 0;
339}
340EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
341#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Intel Corporation.
  4 *
  5 * Author: Weidong Han <weidong.han@intel.com>
  6 */
  7
  8#include <linux/pci.h>
  9#include <linux/acpi.h>
 10#include <linux/pci-acpi.h>
 
 11#include <xen/xen.h>
 12#include <xen/interface/physdev.h>
 13#include <xen/interface/xen.h>
 14
 15#include <asm/xen/hypervisor.h>
 16#include <asm/xen/hypercall.h>
 17#include "../pci/pci.h"
 18#ifdef CONFIG_PCI_MMCONFIG
 19#include <asm/pci_x86.h>
 20
 21static int xen_mcfg_late(void);
 22#endif
 23
 24static bool __read_mostly pci_seg_supported = true;
 25
 26static int xen_add_device(struct device *dev)
 27{
 28	int r;
 29	struct pci_dev *pci_dev = to_pci_dev(dev);
 30#ifdef CONFIG_PCI_IOV
 31	struct pci_dev *physfn = pci_dev->physfn;
 32#endif
 33#ifdef CONFIG_PCI_MMCONFIG
 34	static bool pci_mcfg_reserved = false;
 35	/*
 36	 * Reserve MCFG areas in Xen on first invocation due to this being
 37	 * potentially called from inside of acpi_init immediately after
 38	 * MCFG table has been finally parsed.
 39	 */
 40	if (!pci_mcfg_reserved) {
 41		xen_mcfg_late();
 42		pci_mcfg_reserved = true;
 43	}
 44#endif
 45	if (pci_seg_supported) {
 46		struct {
 47			struct physdev_pci_device_add add;
 48			uint32_t pxm;
 49		} add_ext = {
 50			.add.seg = pci_domain_nr(pci_dev->bus),
 51			.add.bus = pci_dev->bus->number,
 52			.add.devfn = pci_dev->devfn
 53		};
 54		struct physdev_pci_device_add *add = &add_ext.add;
 55
 56#ifdef CONFIG_ACPI
 57		acpi_handle handle;
 58#endif
 59
 60#ifdef CONFIG_PCI_IOV
 61		if (pci_dev->is_virtfn) {
 62			add->flags = XEN_PCI_DEV_VIRTFN;
 63			add->physfn.bus = physfn->bus->number;
 64			add->physfn.devfn = physfn->devfn;
 65		} else
 66#endif
 67		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
 68			add->flags = XEN_PCI_DEV_EXTFN;
 69
 70#ifdef CONFIG_ACPI
 71		handle = ACPI_HANDLE(&pci_dev->dev);
 72#ifdef CONFIG_PCI_IOV
 73		if (!handle && pci_dev->is_virtfn)
 74			handle = ACPI_HANDLE(physfn->bus->bridge);
 75#endif
 76		if (!handle) {
 77			/*
 78			 * This device was not listed in the ACPI name space at
 79			 * all. Try to get acpi handle of parent pci bus.
 80			 */
 81			struct pci_bus *pbus;
 82			for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
 83				handle = acpi_pci_get_bridge_handle(pbus);
 84				if (handle)
 85					break;
 86			}
 87		}
 88		if (handle) {
 89			acpi_status status;
 90
 91			do {
 92				unsigned long long pxm;
 93
 94				status = acpi_evaluate_integer(handle, "_PXM",
 95							       NULL, &pxm);
 96				if (ACPI_SUCCESS(status)) {
 97					add->optarr[0] = pxm;
 98					add->flags |= XEN_PCI_DEV_PXM;
 99					break;
100				}
101				status = acpi_get_parent(handle, &handle);
102			} while (ACPI_SUCCESS(status));
103		}
104#endif /* CONFIG_ACPI */
105
106		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
107		if (r != -ENOSYS)
108			return r;
109		pci_seg_supported = false;
110	}
111
112	if (pci_domain_nr(pci_dev->bus))
113		r = -ENOSYS;
114#ifdef CONFIG_PCI_IOV
115	else if (pci_dev->is_virtfn) {
116		struct physdev_manage_pci_ext manage_pci_ext = {
117			.bus		= pci_dev->bus->number,
118			.devfn		= pci_dev->devfn,
119			.is_virtfn 	= 1,
120			.physfn.bus	= physfn->bus->number,
121			.physfn.devfn	= physfn->devfn,
122		};
123
124		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
125			&manage_pci_ext);
126	}
127#endif
128	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
129		struct physdev_manage_pci_ext manage_pci_ext = {
130			.bus		= pci_dev->bus->number,
131			.devfn		= pci_dev->devfn,
132			.is_extfn	= 1,
133		};
134
135		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
136			&manage_pci_ext);
137	} else {
138		struct physdev_manage_pci manage_pci = {
139			.bus	= pci_dev->bus->number,
140			.devfn	= pci_dev->devfn,
141		};
142
143		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
144			&manage_pci);
145	}
146
147	return r;
148}
149
150static int xen_remove_device(struct device *dev)
151{
152	int r;
153	struct pci_dev *pci_dev = to_pci_dev(dev);
154
155	if (pci_seg_supported) {
156		struct physdev_pci_device device = {
157			.seg = pci_domain_nr(pci_dev->bus),
158			.bus = pci_dev->bus->number,
159			.devfn = pci_dev->devfn
160		};
161
162		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
163					  &device);
164	} else if (pci_domain_nr(pci_dev->bus))
165		r = -ENOSYS;
166	else {
167		struct physdev_manage_pci manage_pci = {
168			.bus = pci_dev->bus->number,
169			.devfn = pci_dev->devfn
170		};
171
172		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
173					  &manage_pci);
174	}
175
176	return r;
177}
178
 
 
 
 
 
 
 
 
 
 
 
 
 
179static int xen_pci_notifier(struct notifier_block *nb,
180			    unsigned long action, void *data)
181{
182	struct device *dev = data;
183	int r = 0;
184
185	switch (action) {
186	case BUS_NOTIFY_ADD_DEVICE:
187		r = xen_add_device(dev);
188		break;
189	case BUS_NOTIFY_DEL_DEVICE:
190		r = xen_remove_device(dev);
191		break;
192	default:
193		return NOTIFY_DONE;
194	}
195	if (r)
196		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
197			action == BUS_NOTIFY_ADD_DEVICE ? "add" :
198			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
199	return NOTIFY_OK;
200}
201
202static struct notifier_block device_nb = {
203	.notifier_call = xen_pci_notifier,
204};
205
206static int __init register_xen_pci_notifier(void)
207{
208	if (!xen_initial_domain())
209		return 0;
210
211	return bus_register_notifier(&pci_bus_type, &device_nb);
212}
213
214arch_initcall(register_xen_pci_notifier);
215
216#ifdef CONFIG_PCI_MMCONFIG
217static int xen_mcfg_late(void)
218{
219	struct pci_mmcfg_region *cfg;
220	int rc;
221
222	if (!xen_initial_domain())
223		return 0;
224
225	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
226		return 0;
227
228	if (list_empty(&pci_mmcfg_list))
229		return 0;
230
231	/* Check whether they are in the right area. */
232	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
233		struct physdev_pci_mmcfg_reserved r;
234
235		r.address = cfg->address;
236		r.segment = cfg->segment;
237		r.start_bus = cfg->start_bus;
238		r.end_bus = cfg->end_bus;
239		r.flags = XEN_PCI_MMCFG_RESERVED;
240
241		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
242		switch (rc) {
243		case 0:
244		case -ENOSYS:
245			continue;
246
247		default:
248			pr_warn("Failed to report MMCONFIG reservation"
249				" state for %s to hypervisor"
250				" (%d)\n",
251				cfg->name, rc);
252		}
253	}
254	return 0;
255}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256#endif