Loading...
Note: File does not exist in v4.6.
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
5 */
6#ifndef __VFIO_VFIO_H__
7#define __VFIO_VFIO_H__
8
9#include <linux/file.h>
10#include <linux/device.h>
11#include <linux/cdev.h>
12#include <linux/module.h>
13
14struct iommufd_ctx;
15struct iommu_group;
16struct vfio_device;
17struct vfio_container;
18
19void vfio_device_put_registration(struct vfio_device *device);
20bool vfio_device_try_get_registration(struct vfio_device *device);
21int vfio_device_open(struct vfio_device *device,
22 struct iommufd_ctx *iommufd, struct kvm *kvm);
23void vfio_device_close(struct vfio_device *device,
24 struct iommufd_ctx *iommufd);
25
26extern const struct file_operations vfio_device_fops;
27
28enum vfio_group_type {
29 /*
30 * Physical device with IOMMU backing.
31 */
32 VFIO_IOMMU,
33
34 /*
35 * Virtual device without IOMMU backing. The VFIO core fakes up an
36 * iommu_group as the iommu_group sysfs interface is part of the
37 * userspace ABI. The user of these devices must not be able to
38 * directly trigger unmediated DMA.
39 */
40 VFIO_EMULATED_IOMMU,
41
42 /*
43 * Physical device without IOMMU backing. The VFIO core fakes up an
44 * iommu_group as the iommu_group sysfs interface is part of the
45 * userspace ABI. Users can trigger unmediated DMA by the device,
46 * usage is highly dangerous, requires an explicit opt-in and will
47 * taint the kernel.
48 */
49 VFIO_NO_IOMMU,
50};
51
52struct vfio_group {
53 struct device dev;
54 struct cdev cdev;
55 /*
56 * When drivers is non-zero a driver is attached to the struct device
57 * that provided the iommu_group and thus the iommu_group is a valid
58 * pointer. When drivers is 0 the driver is being detached. Once users
59 * reaches 0 then the iommu_group is invalid.
60 */
61 refcount_t drivers;
62 unsigned int container_users;
63 struct iommu_group *iommu_group;
64 struct vfio_container *container;
65 struct list_head device_list;
66 struct mutex device_lock;
67 struct list_head vfio_next;
68#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
69 struct list_head container_next;
70#endif
71 enum vfio_group_type type;
72 struct mutex group_lock;
73 struct kvm *kvm;
74 struct file *opened_file;
75 struct blocking_notifier_head notifier;
76 struct iommufd_ctx *iommufd;
77};
78
79int vfio_device_set_group(struct vfio_device *device,
80 enum vfio_group_type type);
81void vfio_device_remove_group(struct vfio_device *device);
82void vfio_device_group_register(struct vfio_device *device);
83void vfio_device_group_unregister(struct vfio_device *device);
84int vfio_device_group_use_iommu(struct vfio_device *device);
85void vfio_device_group_unuse_iommu(struct vfio_device *device);
86void vfio_device_group_close(struct vfio_device *device);
87bool vfio_device_has_container(struct vfio_device *device);
88int __init vfio_group_init(void);
89void vfio_group_cleanup(void);
90
91#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
92/* events for the backend driver notify callback */
93enum vfio_iommu_notify_type {
94 VFIO_IOMMU_CONTAINER_CLOSE = 0,
95};
96
97/**
98 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
99 */
100struct vfio_iommu_driver_ops {
101 char *name;
102 struct module *owner;
103 void *(*open)(unsigned long arg);
104 void (*release)(void *iommu_data);
105 long (*ioctl)(void *iommu_data, unsigned int cmd,
106 unsigned long arg);
107 int (*attach_group)(void *iommu_data,
108 struct iommu_group *group,
109 enum vfio_group_type);
110 void (*detach_group)(void *iommu_data,
111 struct iommu_group *group);
112 int (*pin_pages)(void *iommu_data,
113 struct iommu_group *group,
114 dma_addr_t user_iova,
115 int npage, int prot,
116 struct page **pages);
117 void (*unpin_pages)(void *iommu_data,
118 dma_addr_t user_iova, int npage);
119 void (*register_device)(void *iommu_data,
120 struct vfio_device *vdev);
121 void (*unregister_device)(void *iommu_data,
122 struct vfio_device *vdev);
123 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
124 void *data, size_t count, bool write);
125 struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
126 struct iommu_group *group);
127 void (*notify)(void *iommu_data,
128 enum vfio_iommu_notify_type event);
129};
130
131struct vfio_iommu_driver {
132 const struct vfio_iommu_driver_ops *ops;
133 struct list_head vfio_next;
134};
135
136int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
137void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
138
139struct vfio_container *vfio_container_from_file(struct file *filep);
140int vfio_group_use_container(struct vfio_group *group);
141void vfio_group_unuse_container(struct vfio_group *group);
142int vfio_container_attach_group(struct vfio_container *container,
143 struct vfio_group *group);
144void vfio_group_detach_container(struct vfio_group *group);
145void vfio_device_container_register(struct vfio_device *device);
146void vfio_device_container_unregister(struct vfio_device *device);
147int vfio_device_container_pin_pages(struct vfio_device *device,
148 dma_addr_t iova, int npage,
149 int prot, struct page **pages);
150void vfio_device_container_unpin_pages(struct vfio_device *device,
151 dma_addr_t iova, int npage);
152int vfio_device_container_dma_rw(struct vfio_device *device,
153 dma_addr_t iova, void *data,
154 size_t len, bool write);
155
156int __init vfio_container_init(void);
157void vfio_container_cleanup(void);
158#else
159static inline struct vfio_container *
160vfio_container_from_file(struct file *filep)
161{
162 return NULL;
163}
164
165static inline int vfio_group_use_container(struct vfio_group *group)
166{
167 return -EOPNOTSUPP;
168}
169
170static inline void vfio_group_unuse_container(struct vfio_group *group)
171{
172}
173
174static inline int vfio_container_attach_group(struct vfio_container *container,
175 struct vfio_group *group)
176{
177 return -EOPNOTSUPP;
178}
179
180static inline void vfio_group_detach_container(struct vfio_group *group)
181{
182}
183
184static inline void vfio_device_container_register(struct vfio_device *device)
185{
186}
187
188static inline void vfio_device_container_unregister(struct vfio_device *device)
189{
190}
191
192static inline int vfio_device_container_pin_pages(struct vfio_device *device,
193 dma_addr_t iova, int npage,
194 int prot, struct page **pages)
195{
196 return -EOPNOTSUPP;
197}
198
199static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
200 dma_addr_t iova, int npage)
201{
202}
203
204static inline int vfio_device_container_dma_rw(struct vfio_device *device,
205 dma_addr_t iova, void *data,
206 size_t len, bool write)
207{
208 return -EOPNOTSUPP;
209}
210
211static inline int vfio_container_init(void)
212{
213 return 0;
214}
215static inline void vfio_container_cleanup(void)
216{
217}
218#endif
219
220#if IS_ENABLED(CONFIG_IOMMUFD)
221int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
222void vfio_iommufd_unbind(struct vfio_device *device);
223#else
224static inline int vfio_iommufd_bind(struct vfio_device *device,
225 struct iommufd_ctx *ictx)
226{
227 return -EOPNOTSUPP;
228}
229
230static inline void vfio_iommufd_unbind(struct vfio_device *device)
231{
232}
233#endif
234
235#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
236int __init vfio_virqfd_init(void);
237void vfio_virqfd_exit(void);
238#else
239static inline int __init vfio_virqfd_init(void)
240{
241 return 0;
242}
243static inline void vfio_virqfd_exit(void)
244{
245}
246#endif
247
248#ifdef CONFIG_VFIO_NOIOMMU
249extern bool vfio_noiommu __read_mostly;
250#else
251enum { vfio_noiommu = false };
252#endif
253
254#endif