Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
  4 *     Author: Alex Williamson <alex.williamson@redhat.com>
  5 */
  6#ifndef __VFIO_VFIO_H__
  7#define __VFIO_VFIO_H__
  8
  9#include <linux/file.h>
 10#include <linux/device.h>
 11#include <linux/cdev.h>
 12#include <linux/module.h>
 13#include <linux/vfio.h>
 14
 15struct iommufd_ctx;
 16struct iommu_group;
 
 17struct vfio_container;
 18
 19struct vfio_device_file {
 20	struct vfio_device *device;
 21	struct vfio_group *group;
 22
 23	u8 access_granted;
 24	u32 devid; /* only valid when iommufd is valid */
 25	spinlock_t kvm_ref_lock; /* protect kvm field */
 26	struct kvm *kvm;
 27	struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
 28};
 29
 30void vfio_device_put_registration(struct vfio_device *device);
 31bool vfio_device_try_get_registration(struct vfio_device *device);
 32int vfio_df_open(struct vfio_device_file *df);
 33void vfio_df_close(struct vfio_device_file *df);
 34struct vfio_device_file *
 35vfio_allocate_device_file(struct vfio_device *device);
 36
 37extern const struct file_operations vfio_device_fops;
 38
 39#ifdef CONFIG_VFIO_NOIOMMU
 40extern bool vfio_noiommu __read_mostly;
 41#else
 42enum { vfio_noiommu = false };
 43#endif
 44
 45enum vfio_group_type {
 46	/*
 47	 * Physical device with IOMMU backing.
 48	 */
 49	VFIO_IOMMU,
 50
 51	/*
 52	 * Virtual device without IOMMU backing. The VFIO core fakes up an
 53	 * iommu_group as the iommu_group sysfs interface is part of the
 54	 * userspace ABI.  The user of these devices must not be able to
 55	 * directly trigger unmediated DMA.
 56	 */
 57	VFIO_EMULATED_IOMMU,
 58
 59	/*
 60	 * Physical device without IOMMU backing. The VFIO core fakes up an
 61	 * iommu_group as the iommu_group sysfs interface is part of the
 62	 * userspace ABI.  Users can trigger unmediated DMA by the device,
 63	 * usage is highly dangerous, requires an explicit opt-in and will
 64	 * taint the kernel.
 65	 */
 66	VFIO_NO_IOMMU,
 67};
 68
 69#if IS_ENABLED(CONFIG_VFIO_GROUP)
 70struct vfio_group {
 71	struct device 			dev;
 72	struct cdev			cdev;
 73	/*
 74	 * When drivers is non-zero a driver is attached to the struct device
 75	 * that provided the iommu_group and thus the iommu_group is a valid
 76	 * pointer. When drivers is 0 the driver is being detached. Once users
 77	 * reaches 0 then the iommu_group is invalid.
 78	 */
 79	refcount_t			drivers;
 80	unsigned int			container_users;
 81	struct iommu_group		*iommu_group;
 82	struct vfio_container		*container;
 83	struct list_head		device_list;
 84	struct mutex			device_lock;
 85	struct list_head		vfio_next;
 86#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
 87	struct list_head		container_next;
 88#endif
 89	enum vfio_group_type		type;
 90	struct mutex			group_lock;
 91	struct kvm			*kvm;
 92	struct file			*opened_file;
 93	struct blocking_notifier_head	notifier;
 94	struct iommufd_ctx		*iommufd;
 95	spinlock_t			kvm_ref_lock;
 96	unsigned int			cdev_device_open_cnt;
 97};
 98
 99int vfio_device_block_group(struct vfio_device *device);
100void vfio_device_unblock_group(struct vfio_device *device);
101int vfio_device_set_group(struct vfio_device *device,
102			  enum vfio_group_type type);
103void vfio_device_remove_group(struct vfio_device *device);
104void vfio_device_group_register(struct vfio_device *device);
105void vfio_device_group_unregister(struct vfio_device *device);
106int vfio_device_group_use_iommu(struct vfio_device *device);
107void vfio_device_group_unuse_iommu(struct vfio_device *device);
108void vfio_df_group_close(struct vfio_device_file *df);
109struct vfio_group *vfio_group_from_file(struct file *file);
110bool vfio_group_enforced_coherent(struct vfio_group *group);
111void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
112bool vfio_device_has_container(struct vfio_device *device);
113int __init vfio_group_init(void);
114void vfio_group_cleanup(void);
115
116static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
117{
118	return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
119	       vdev->group->type == VFIO_NO_IOMMU;
120}
121#else
122struct vfio_group;
123
124static inline int vfio_device_block_group(struct vfio_device *device)
125{
126	return 0;
127}
128
129static inline void vfio_device_unblock_group(struct vfio_device *device)
130{
131}
132
133static inline int vfio_device_set_group(struct vfio_device *device,
134					enum vfio_group_type type)
135{
136	return 0;
137}
138
139static inline void vfio_device_remove_group(struct vfio_device *device)
140{
141}
142
143static inline void vfio_device_group_register(struct vfio_device *device)
144{
145}
146
147static inline void vfio_device_group_unregister(struct vfio_device *device)
148{
149}
150
151static inline int vfio_device_group_use_iommu(struct vfio_device *device)
152{
153	return -EOPNOTSUPP;
154}
155
156static inline void vfio_device_group_unuse_iommu(struct vfio_device *device)
157{
158}
159
160static inline void vfio_df_group_close(struct vfio_device_file *df)
161{
162}
163
164static inline struct vfio_group *vfio_group_from_file(struct file *file)
165{
166	return NULL;
167}
168
169static inline bool vfio_group_enforced_coherent(struct vfio_group *group)
170{
171	return true;
172}
173
174static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
175{
176}
177
178static inline bool vfio_device_has_container(struct vfio_device *device)
179{
180	return false;
181}
182
183static inline int __init vfio_group_init(void)
184{
185	return 0;
186}
187
188static inline void vfio_group_cleanup(void)
189{
190}
191
192static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
193{
194	return false;
195}
196#endif /* CONFIG_VFIO_GROUP */
197
198#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
 
 
 
 
 
199/**
200 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
201 */
202struct vfio_iommu_driver_ops {
203	char		*name;
204	struct module	*owner;
205	void		*(*open)(unsigned long arg);
206	void		(*release)(void *iommu_data);
207	long		(*ioctl)(void *iommu_data, unsigned int cmd,
208				 unsigned long arg);
209	int		(*attach_group)(void *iommu_data,
210					struct iommu_group *group,
211					enum vfio_group_type);
212	void		(*detach_group)(void *iommu_data,
213					struct iommu_group *group);
214	int		(*pin_pages)(void *iommu_data,
215				     struct iommu_group *group,
216				     dma_addr_t user_iova,
217				     int npage, int prot,
218				     struct page **pages);
219	void		(*unpin_pages)(void *iommu_data,
220				       dma_addr_t user_iova, int npage);
221	void		(*register_device)(void *iommu_data,
222					   struct vfio_device *vdev);
223	void		(*unregister_device)(void *iommu_data,
224					     struct vfio_device *vdev);
225	int		(*dma_rw)(void *iommu_data, dma_addr_t user_iova,
226				  void *data, size_t count, bool write);
227	struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
228						   struct iommu_group *group);
 
 
229};
230
231struct vfio_iommu_driver {
232	const struct vfio_iommu_driver_ops	*ops;
233	struct list_head			vfio_next;
234};
235
236int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
237void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
238
239struct vfio_container *vfio_container_from_file(struct file *filep);
240int vfio_group_use_container(struct vfio_group *group);
241void vfio_group_unuse_container(struct vfio_group *group);
242int vfio_container_attach_group(struct vfio_container *container,
243				struct vfio_group *group);
244void vfio_group_detach_container(struct vfio_group *group);
245void vfio_device_container_register(struct vfio_device *device);
246void vfio_device_container_unregister(struct vfio_device *device);
247int vfio_device_container_pin_pages(struct vfio_device *device,
248				    dma_addr_t iova, int npage,
249				    int prot, struct page **pages);
250void vfio_device_container_unpin_pages(struct vfio_device *device,
251				       dma_addr_t iova, int npage);
252int vfio_device_container_dma_rw(struct vfio_device *device,
253				 dma_addr_t iova, void *data,
254				 size_t len, bool write);
255
256int __init vfio_container_init(void);
257void vfio_container_cleanup(void);
258#else
259static inline struct vfio_container *
260vfio_container_from_file(struct file *filep)
261{
262	return NULL;
263}
264
265static inline int vfio_group_use_container(struct vfio_group *group)
266{
267	return -EOPNOTSUPP;
268}
269
270static inline void vfio_group_unuse_container(struct vfio_group *group)
271{
272}
273
274static inline int vfio_container_attach_group(struct vfio_container *container,
275					      struct vfio_group *group)
276{
277	return -EOPNOTSUPP;
278}
279
280static inline void vfio_group_detach_container(struct vfio_group *group)
281{
282}
283
284static inline void vfio_device_container_register(struct vfio_device *device)
285{
286}
287
288static inline void vfio_device_container_unregister(struct vfio_device *device)
289{
290}
291
292static inline int vfio_device_container_pin_pages(struct vfio_device *device,
293						  dma_addr_t iova, int npage,
294						  int prot, struct page **pages)
295{
296	return -EOPNOTSUPP;
297}
298
299static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
300						     dma_addr_t iova, int npage)
301{
302}
303
304static inline int vfio_device_container_dma_rw(struct vfio_device *device,
305					       dma_addr_t iova, void *data,
306					       size_t len, bool write)
307{
308	return -EOPNOTSUPP;
309}
310
311static inline int vfio_container_init(void)
312{
313	return 0;
314}
315static inline void vfio_container_cleanup(void)
316{
317}
318#endif
319
320#if IS_ENABLED(CONFIG_IOMMUFD)
321bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
322					 struct iommufd_ctx *ictx);
323int vfio_df_iommufd_bind(struct vfio_device_file *df);
324void vfio_df_iommufd_unbind(struct vfio_device_file *df);
325int vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
326				    struct iommufd_ctx *ictx);
327#else
328static inline bool
329vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
330				    struct iommufd_ctx *ictx)
331{
332	return false;
333}
334
335static inline int vfio_df_iommufd_bind(struct vfio_device_file *fd)
336{
337	return -EOPNOTSUPP;
338}
339
340static inline void vfio_df_iommufd_unbind(struct vfio_device_file *df)
341{
342}
343
344static inline int
345vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
346				struct iommufd_ctx *ictx)
347{
348	return -EOPNOTSUPP;
349}
350#endif
351
352int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
353			    struct vfio_device_attach_iommufd_pt __user *arg);
354int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
355			    struct vfio_device_detach_iommufd_pt __user *arg);
356
357#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
358void vfio_init_device_cdev(struct vfio_device *device);
359
360static inline int vfio_device_add(struct vfio_device *device)
361{
362	/* cdev does not support noiommu device */
363	if (vfio_device_is_noiommu(device))
364		return device_add(&device->device);
365	vfio_init_device_cdev(device);
366	return cdev_device_add(&device->cdev, &device->device);
367}
368
369static inline void vfio_device_del(struct vfio_device *device)
370{
371	if (vfio_device_is_noiommu(device))
372		device_del(&device->device);
373	else
374		cdev_device_del(&device->cdev, &device->device);
375}
376
377int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep);
378long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
379				struct vfio_device_bind_iommufd __user *arg);
380void vfio_df_unbind_iommufd(struct vfio_device_file *df);
381int vfio_cdev_init(struct class *device_class);
382void vfio_cdev_cleanup(void);
383#else
384static inline void vfio_init_device_cdev(struct vfio_device *device)
385{
386}
387
388static inline int vfio_device_add(struct vfio_device *device)
389{
390	return device_add(&device->device);
391}
392
393static inline void vfio_device_del(struct vfio_device *device)
394{
395	device_del(&device->device);
396}
397
398static inline int vfio_device_fops_cdev_open(struct inode *inode,
399					     struct file *filep)
400{
401	return 0;
402}
403
404static inline long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
405					      struct vfio_device_bind_iommufd __user *arg)
406{
407	return -ENOTTY;
408}
409
410static inline void vfio_df_unbind_iommufd(struct vfio_device_file *df)
411{
412}
413
414static inline int vfio_cdev_init(struct class *device_class)
415{
416	return 0;
417}
418
419static inline void vfio_cdev_cleanup(void)
420{
421}
422#endif /* CONFIG_VFIO_DEVICE_CDEV */
423
424#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
425int __init vfio_virqfd_init(void);
426void vfio_virqfd_exit(void);
427#else
428static inline int __init vfio_virqfd_init(void)
429{
430	return 0;
431}
432static inline void vfio_virqfd_exit(void)
433{
434}
435#endif
436
437#ifdef CONFIG_HAVE_KVM
438void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
439void vfio_device_put_kvm(struct vfio_device *device);
440#else
441static inline void vfio_device_get_kvm_safe(struct vfio_device *device,
442					    struct kvm *kvm)
443{
444}
445
446static inline void vfio_device_put_kvm(struct vfio_device *device)
447{
448}
449#endif
450
451#ifdef CONFIG_VFIO_DEBUGFS
452void vfio_debugfs_create_root(void);
453void vfio_debugfs_remove_root(void);
454
455void vfio_device_debugfs_init(struct vfio_device *vdev);
456void vfio_device_debugfs_exit(struct vfio_device *vdev);
457#else
458static inline void vfio_debugfs_create_root(void) { }
459static inline void vfio_debugfs_remove_root(void) { }
460
461static inline void vfio_device_debugfs_init(struct vfio_device *vdev) { }
462static inline void vfio_device_debugfs_exit(struct vfio_device *vdev) { }
463#endif /* CONFIG_VFIO_DEBUGFS */
464
465#endif
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
  4 *     Author: Alex Williamson <alex.williamson@redhat.com>
  5 */
  6#ifndef __VFIO_VFIO_H__
  7#define __VFIO_VFIO_H__
  8
  9#include <linux/file.h>
 10#include <linux/device.h>
 11#include <linux/cdev.h>
 12#include <linux/module.h>
 
 13
 14struct iommufd_ctx;
 15struct iommu_group;
 16struct vfio_device;
 17struct vfio_container;
 18
 
 
 
 
 
 
 
 
 
 
 
 19void vfio_device_put_registration(struct vfio_device *device);
 20bool vfio_device_try_get_registration(struct vfio_device *device);
 21int vfio_device_open(struct vfio_device *device,
 22		     struct iommufd_ctx *iommufd, struct kvm *kvm);
 23void vfio_device_close(struct vfio_device *device,
 24		       struct iommufd_ctx *iommufd);
 25
 26extern const struct file_operations vfio_device_fops;
 27
 
 
 
 
 
 
 28enum vfio_group_type {
 29	/*
 30	 * Physical device with IOMMU backing.
 31	 */
 32	VFIO_IOMMU,
 33
 34	/*
 35	 * Virtual device without IOMMU backing. The VFIO core fakes up an
 36	 * iommu_group as the iommu_group sysfs interface is part of the
 37	 * userspace ABI.  The user of these devices must not be able to
 38	 * directly trigger unmediated DMA.
 39	 */
 40	VFIO_EMULATED_IOMMU,
 41
 42	/*
 43	 * Physical device without IOMMU backing. The VFIO core fakes up an
 44	 * iommu_group as the iommu_group sysfs interface is part of the
 45	 * userspace ABI.  Users can trigger unmediated DMA by the device,
 46	 * usage is highly dangerous, requires an explicit opt-in and will
 47	 * taint the kernel.
 48	 */
 49	VFIO_NO_IOMMU,
 50};
 51
 
 52struct vfio_group {
 53	struct device 			dev;
 54	struct cdev			cdev;
 55	/*
 56	 * When drivers is non-zero a driver is attached to the struct device
 57	 * that provided the iommu_group and thus the iommu_group is a valid
 58	 * pointer. When drivers is 0 the driver is being detached. Once users
 59	 * reaches 0 then the iommu_group is invalid.
 60	 */
 61	refcount_t			drivers;
 62	unsigned int			container_users;
 63	struct iommu_group		*iommu_group;
 64	struct vfio_container		*container;
 65	struct list_head		device_list;
 66	struct mutex			device_lock;
 67	struct list_head		vfio_next;
 68#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
 69	struct list_head		container_next;
 70#endif
 71	enum vfio_group_type		type;
 72	struct mutex			group_lock;
 73	struct kvm			*kvm;
 74	struct file			*opened_file;
 75	struct blocking_notifier_head	notifier;
 76	struct iommufd_ctx		*iommufd;
 
 
 77};
 78
 
 
 79int vfio_device_set_group(struct vfio_device *device,
 80			  enum vfio_group_type type);
 81void vfio_device_remove_group(struct vfio_device *device);
 82void vfio_device_group_register(struct vfio_device *device);
 83void vfio_device_group_unregister(struct vfio_device *device);
 84int vfio_device_group_use_iommu(struct vfio_device *device);
 85void vfio_device_group_unuse_iommu(struct vfio_device *device);
 86void vfio_device_group_close(struct vfio_device *device);
 
 
 
 87bool vfio_device_has_container(struct vfio_device *device);
 88int __init vfio_group_init(void);
 89void vfio_group_cleanup(void);
 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
 92/* events for the backend driver notify callback */
 93enum vfio_iommu_notify_type {
 94	VFIO_IOMMU_CONTAINER_CLOSE = 0,
 95};
 96
 97/**
 98 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
 99 */
100struct vfio_iommu_driver_ops {
101	char		*name;
102	struct module	*owner;
103	void		*(*open)(unsigned long arg);
104	void		(*release)(void *iommu_data);
105	long		(*ioctl)(void *iommu_data, unsigned int cmd,
106				 unsigned long arg);
107	int		(*attach_group)(void *iommu_data,
108					struct iommu_group *group,
109					enum vfio_group_type);
110	void		(*detach_group)(void *iommu_data,
111					struct iommu_group *group);
112	int		(*pin_pages)(void *iommu_data,
113				     struct iommu_group *group,
114				     dma_addr_t user_iova,
115				     int npage, int prot,
116				     struct page **pages);
117	void		(*unpin_pages)(void *iommu_data,
118				       dma_addr_t user_iova, int npage);
119	void		(*register_device)(void *iommu_data,
120					   struct vfio_device *vdev);
121	void		(*unregister_device)(void *iommu_data,
122					     struct vfio_device *vdev);
123	int		(*dma_rw)(void *iommu_data, dma_addr_t user_iova,
124				  void *data, size_t count, bool write);
125	struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
126						   struct iommu_group *group);
127	void		(*notify)(void *iommu_data,
128				  enum vfio_iommu_notify_type event);
129};
130
131struct vfio_iommu_driver {
132	const struct vfio_iommu_driver_ops	*ops;
133	struct list_head			vfio_next;
134};
135
136int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
137void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
138
139struct vfio_container *vfio_container_from_file(struct file *filep);
140int vfio_group_use_container(struct vfio_group *group);
141void vfio_group_unuse_container(struct vfio_group *group);
142int vfio_container_attach_group(struct vfio_container *container,
143				struct vfio_group *group);
144void vfio_group_detach_container(struct vfio_group *group);
145void vfio_device_container_register(struct vfio_device *device);
146void vfio_device_container_unregister(struct vfio_device *device);
147int vfio_device_container_pin_pages(struct vfio_device *device,
148				    dma_addr_t iova, int npage,
149				    int prot, struct page **pages);
150void vfio_device_container_unpin_pages(struct vfio_device *device,
151				       dma_addr_t iova, int npage);
152int vfio_device_container_dma_rw(struct vfio_device *device,
153				 dma_addr_t iova, void *data,
154				 size_t len, bool write);
155
156int __init vfio_container_init(void);
157void vfio_container_cleanup(void);
158#else
159static inline struct vfio_container *
160vfio_container_from_file(struct file *filep)
161{
162	return NULL;
163}
164
165static inline int vfio_group_use_container(struct vfio_group *group)
166{
167	return -EOPNOTSUPP;
168}
169
170static inline void vfio_group_unuse_container(struct vfio_group *group)
171{
172}
173
174static inline int vfio_container_attach_group(struct vfio_container *container,
175					      struct vfio_group *group)
176{
177	return -EOPNOTSUPP;
178}
179
180static inline void vfio_group_detach_container(struct vfio_group *group)
181{
182}
183
184static inline void vfio_device_container_register(struct vfio_device *device)
185{
186}
187
188static inline void vfio_device_container_unregister(struct vfio_device *device)
189{
190}
191
192static inline int vfio_device_container_pin_pages(struct vfio_device *device,
193						  dma_addr_t iova, int npage,
194						  int prot, struct page **pages)
195{
196	return -EOPNOTSUPP;
197}
198
199static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
200						     dma_addr_t iova, int npage)
201{
202}
203
204static inline int vfio_device_container_dma_rw(struct vfio_device *device,
205					       dma_addr_t iova, void *data,
206					       size_t len, bool write)
207{
208	return -EOPNOTSUPP;
209}
210
211static inline int vfio_container_init(void)
212{
213	return 0;
214}
215static inline void vfio_container_cleanup(void)
216{
217}
218#endif
219
220#if IS_ENABLED(CONFIG_IOMMUFD)
221int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
222void vfio_iommufd_unbind(struct vfio_device *device);
 
 
 
 
223#else
224static inline int vfio_iommufd_bind(struct vfio_device *device,
 
225				    struct iommufd_ctx *ictx)
226{
 
 
 
 
 
227	return -EOPNOTSUPP;
228}
229
230static inline void vfio_iommufd_unbind(struct vfio_device *device)
231{
232}
 
 
 
 
 
 
 
233#endif
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
236int __init vfio_virqfd_init(void);
237void vfio_virqfd_exit(void);
238#else
239static inline int __init vfio_virqfd_init(void)
240{
241	return 0;
242}
243static inline void vfio_virqfd_exit(void)
244{
245}
246#endif
247
248#ifdef CONFIG_VFIO_NOIOMMU
249extern bool vfio_noiommu __read_mostly;
 
250#else
251enum { vfio_noiommu = false };
 
 
 
 
 
 
 
252#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
254#endif