Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (c) 2009, Intel Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
 16 *
 17 * Author: Weidong Han <weidong.han@intel.com>
 18 */
 19
 20#include <linux/pci.h>
 
 
 
 21#include <xen/xen.h>
 22#include <xen/interface/physdev.h>
 23#include <xen/interface/xen.h>
 24
 25#include <asm/xen/hypervisor.h>
 26#include <asm/xen/hypercall.h>
 27#include "../pci/pci.h"
 
 
 
 
 
 
 
 28
 29static int xen_add_device(struct device *dev)
 30{
 31	int r;
 32	struct pci_dev *pci_dev = to_pci_dev(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34#ifdef CONFIG_PCI_IOV
 35	if (pci_dev->is_virtfn) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36		struct physdev_manage_pci_ext manage_pci_ext = {
 37			.bus		= pci_dev->bus->number,
 38			.devfn		= pci_dev->devfn,
 39			.is_virtfn 	= 1,
 40			.physfn.bus	= pci_dev->physfn->bus->number,
 41			.physfn.devfn	= pci_dev->physfn->devfn,
 42		};
 43
 44		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
 45			&manage_pci_ext);
 46	} else
 47#endif
 48	if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
 49		struct physdev_manage_pci_ext manage_pci_ext = {
 50			.bus		= pci_dev->bus->number,
 51			.devfn		= pci_dev->devfn,
 52			.is_extfn	= 1,
 53		};
 54
 55		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
 56			&manage_pci_ext);
 57	} else {
 58		struct physdev_manage_pci manage_pci = {
 59			.bus 	= pci_dev->bus->number,
 60			.devfn	= pci_dev->devfn,
 61		};
 62
 63		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
 64			&manage_pci);
 65	}
 66
 67	return r;
 68}
 69
 70static int xen_remove_device(struct device *dev)
 71{
 72	int r;
 73	struct pci_dev *pci_dev = to_pci_dev(dev);
 74	struct physdev_manage_pci manage_pci;
 75
 76	manage_pci.bus = pci_dev->bus->number;
 77	manage_pci.devfn = pci_dev->devfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78
 79	r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
 80		&manage_pci);
 
 81
 82	return r;
 83}
 84
 85static int xen_pci_notifier(struct notifier_block *nb,
 86			    unsigned long action, void *data)
 87{
 88	struct device *dev = data;
 89	int r = 0;
 90
 91	switch (action) {
 92	case BUS_NOTIFY_ADD_DEVICE:
 93		r = xen_add_device(dev);
 94		break;
 95	case BUS_NOTIFY_DEL_DEVICE:
 96		r = xen_remove_device(dev);
 97		break;
 98	default:
 99		break;
100	}
101
102	return r;
 
 
 
103}
104
105struct notifier_block device_nb = {
106	.notifier_call = xen_pci_notifier,
107};
108
109static int __init register_xen_pci_notifier(void)
110{
111	if (!xen_initial_domain())
112		return 0;
113
114	return bus_register_notifier(&pci_bus_type, &device_nb);
115}
116
117arch_initcall(register_xen_pci_notifier);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Intel Corporation.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Author: Weidong Han <weidong.han@intel.com>
  6 */
  7
  8#include <linux/pci.h>
  9#include <linux/acpi.h>
 10#include <linux/pci-acpi.h>
 11#include <xen/pci.h>
 12#include <xen/xen.h>
 13#include <xen/interface/physdev.h>
 14#include <xen/interface/xen.h>
 15
 16#include <asm/xen/hypervisor.h>
 17#include <asm/xen/hypercall.h>
 18#include "../pci/pci.h"
 19#ifdef CONFIG_PCI_MMCONFIG
 20#include <asm/pci_x86.h>
 21
 22static int xen_mcfg_late(void);
 23#endif
 24
 25static bool __read_mostly pci_seg_supported = true;
 26
 27static int xen_add_device(struct device *dev)
 28{
 29	int r;
 30	struct pci_dev *pci_dev = to_pci_dev(dev);
 31#ifdef CONFIG_PCI_IOV
 32	struct pci_dev *physfn = pci_dev->physfn;
 33#endif
 34#ifdef CONFIG_PCI_MMCONFIG
 35	static bool pci_mcfg_reserved = false;
 36	/*
 37	 * Reserve MCFG areas in Xen on first invocation due to this being
 38	 * potentially called from inside of acpi_init immediately after
 39	 * MCFG table has been finally parsed.
 40	 */
 41	if (!pci_mcfg_reserved) {
 42		xen_mcfg_late();
 43		pci_mcfg_reserved = true;
 44	}
 45#endif
 46	if (pci_seg_supported) {
 47		struct {
 48			struct physdev_pci_device_add add;
 49			uint32_t pxm;
 50		} add_ext = {
 51			.add.seg = pci_domain_nr(pci_dev->bus),
 52			.add.bus = pci_dev->bus->number,
 53			.add.devfn = pci_dev->devfn
 54		};
 55		struct physdev_pci_device_add *add = &add_ext.add;
 56
 57#ifdef CONFIG_ACPI
 58		acpi_handle handle;
 59#endif
 60
 61#ifdef CONFIG_PCI_IOV
 62		if (pci_dev->is_virtfn) {
 63			add->flags = XEN_PCI_DEV_VIRTFN;
 64			add->physfn.bus = physfn->bus->number;
 65			add->physfn.devfn = physfn->devfn;
 66		} else
 67#endif
 68		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
 69			add->flags = XEN_PCI_DEV_EXTFN;
 70
 71#ifdef CONFIG_ACPI
 72		handle = ACPI_HANDLE(&pci_dev->dev);
 73#ifdef CONFIG_PCI_IOV
 74		if (!handle && pci_dev->is_virtfn)
 75			handle = ACPI_HANDLE(physfn->bus->bridge);
 76#endif
 77		if (!handle) {
 78			/*
 79			 * This device was not listed in the ACPI name space at
 80			 * all. Try to get acpi handle of parent pci bus.
 81			 */
 82			struct pci_bus *pbus;
 83			for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
 84				handle = acpi_pci_get_bridge_handle(pbus);
 85				if (handle)
 86					break;
 87			}
 88		}
 89		if (handle) {
 90			acpi_status status;
 91
 92			do {
 93				unsigned long long pxm;
 94
 95				status = acpi_evaluate_integer(handle, "_PXM",
 96							       NULL, &pxm);
 97				if (ACPI_SUCCESS(status)) {
 98					add->optarr[0] = pxm;
 99					add->flags |= XEN_PCI_DEV_PXM;
100					break;
101				}
102				status = acpi_get_parent(handle, &handle);
103			} while (ACPI_SUCCESS(status));
104		}
105#endif /* CONFIG_ACPI */
106
107		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
108		if (r != -ENOSYS)
109			return r;
110		pci_seg_supported = false;
111	}
112
113	if (pci_domain_nr(pci_dev->bus))
114		r = -ENOSYS;
115#ifdef CONFIG_PCI_IOV
116	else if (pci_dev->is_virtfn) {
117		struct physdev_manage_pci_ext manage_pci_ext = {
118			.bus		= pci_dev->bus->number,
119			.devfn		= pci_dev->devfn,
120			.is_virtfn 	= 1,
121			.physfn.bus	= physfn->bus->number,
122			.physfn.devfn	= physfn->devfn,
123		};
124
125		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
126			&manage_pci_ext);
127	}
128#endif
129	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
130		struct physdev_manage_pci_ext manage_pci_ext = {
131			.bus		= pci_dev->bus->number,
132			.devfn		= pci_dev->devfn,
133			.is_extfn	= 1,
134		};
135
136		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
137			&manage_pci_ext);
138	} else {
139		struct physdev_manage_pci manage_pci = {
140			.bus	= pci_dev->bus->number,
141			.devfn	= pci_dev->devfn,
142		};
143
144		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
145			&manage_pci);
146	}
147
148	return r;
149}
150
151static int xen_remove_device(struct device *dev)
152{
153	int r;
154	struct pci_dev *pci_dev = to_pci_dev(dev);
 
155
156	if (pci_seg_supported) {
157		struct physdev_pci_device device = {
158			.seg = pci_domain_nr(pci_dev->bus),
159			.bus = pci_dev->bus->number,
160			.devfn = pci_dev->devfn
161		};
162
163		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
164					  &device);
165	} else if (pci_domain_nr(pci_dev->bus))
166		r = -ENOSYS;
167	else {
168		struct physdev_manage_pci manage_pci = {
169			.bus = pci_dev->bus->number,
170			.devfn = pci_dev->devfn
171		};
172
173		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
174					  &manage_pci);
175	}
176
177	return r;
178}
179
180static int xen_pci_notifier(struct notifier_block *nb,
181			    unsigned long action, void *data)
182{
183	struct device *dev = data;
184	int r = 0;
185
186	switch (action) {
187	case BUS_NOTIFY_ADD_DEVICE:
188		r = xen_add_device(dev);
189		break;
190	case BUS_NOTIFY_DEL_DEVICE:
191		r = xen_remove_device(dev);
192		break;
193	default:
194		return NOTIFY_DONE;
195	}
196	if (r)
197		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
198			action == BUS_NOTIFY_ADD_DEVICE ? "add" :
199			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
200	return NOTIFY_OK;
201}
202
203static struct notifier_block device_nb = {
204	.notifier_call = xen_pci_notifier,
205};
206
207static int __init register_xen_pci_notifier(void)
208{
209	if (!xen_initial_domain())
210		return 0;
211
212	return bus_register_notifier(&pci_bus_type, &device_nb);
213}
214
215arch_initcall(register_xen_pci_notifier);
216
217#ifdef CONFIG_PCI_MMCONFIG
218static int xen_mcfg_late(void)
219{
220	struct pci_mmcfg_region *cfg;
221	int rc;
222
223	if (!xen_initial_domain())
224		return 0;
225
226	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
227		return 0;
228
229	if (list_empty(&pci_mmcfg_list))
230		return 0;
231
232	/* Check whether they are in the right area. */
233	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
234		struct physdev_pci_mmcfg_reserved r;
235
236		r.address = cfg->address;
237		r.segment = cfg->segment;
238		r.start_bus = cfg->start_bus;
239		r.end_bus = cfg->end_bus;
240		r.flags = XEN_PCI_MMCFG_RESERVED;
241
242		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
243		switch (rc) {
244		case 0:
245		case -ENOSYS:
246			continue;
247
248		default:
249			pr_warn("Failed to report MMCONFIG reservation"
250				" state for %s to hypervisor"
251				" (%d)\n",
252				cfg->name, rc);
253		}
254	}
255	return 0;
256}
257#endif
258
259#ifdef CONFIG_XEN_DOM0
260struct xen_device_domain_owner {
261	domid_t domain;
262	struct pci_dev *dev;
263	struct list_head list;
264};
265
266static DEFINE_SPINLOCK(dev_domain_list_spinlock);
267static LIST_HEAD(dev_domain_list);
268
269static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
270{
271	struct xen_device_domain_owner *owner;
272
273	list_for_each_entry(owner, &dev_domain_list, list) {
274		if (owner->dev == dev)
275			return owner;
276	}
277	return NULL;
278}
279
280int xen_find_device_domain_owner(struct pci_dev *dev)
281{
282	struct xen_device_domain_owner *owner;
283	int domain = -ENODEV;
284
285	spin_lock(&dev_domain_list_spinlock);
286	owner = find_device(dev);
287	if (owner)
288		domain = owner->domain;
289	spin_unlock(&dev_domain_list_spinlock);
290	return domain;
291}
292EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
293
294int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
295{
296	struct xen_device_domain_owner *owner;
297
298	owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
299	if (!owner)
300		return -ENODEV;
301
302	spin_lock(&dev_domain_list_spinlock);
303	if (find_device(dev)) {
304		spin_unlock(&dev_domain_list_spinlock);
305		kfree(owner);
306		return -EEXIST;
307	}
308	owner->domain = domain;
309	owner->dev = dev;
310	list_add_tail(&owner->list, &dev_domain_list);
311	spin_unlock(&dev_domain_list_spinlock);
312	return 0;
313}
314EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
315
316int xen_unregister_device_domain_owner(struct pci_dev *dev)
317{
318	struct xen_device_domain_owner *owner;
319
320	spin_lock(&dev_domain_list_spinlock);
321	owner = find_device(dev);
322	if (!owner) {
323		spin_unlock(&dev_domain_list_spinlock);
324		return -ENODEV;
325	}
326	list_del(&owner->list);
327	spin_unlock(&dev_domain_list_spinlock);
328	kfree(owner);
329	return 0;
330}
331EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
332#endif