Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2023 Intel Corporation.
  4 */
  5#include <linux/vfio.h>
  6#include <linux/iommufd.h>
  7
  8#include "vfio.h"
  9
 10static dev_t device_devt;
 11
 12void vfio_init_device_cdev(struct vfio_device *device)
 13{
 14	device->device.devt = MKDEV(MAJOR(device_devt), device->index);
 15	cdev_init(&device->cdev, &vfio_device_fops);
 16	device->cdev.owner = THIS_MODULE;
 17}
 18
 19/*
 20 * device access via the fd opened by this function is blocked until
 21 * .open_device() is called successfully during BIND_IOMMUFD.
 22 */
 23int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
 24{
 25	struct vfio_device *device = container_of(inode->i_cdev,
 26						  struct vfio_device, cdev);
 27	struct vfio_device_file *df;
 28	int ret;
 29
 30	/* Paired with the put in vfio_device_fops_release() */
 31	if (!vfio_device_try_get_registration(device))
 32		return -ENODEV;
 33
 34	df = vfio_allocate_device_file(device);
 35	if (IS_ERR(df)) {
 36		ret = PTR_ERR(df);
 37		goto err_put_registration;
 38	}
 39
 40	filep->private_data = df;
 41
 
 
 
 
 
 
 
 42	return 0;
 43
 44err_put_registration:
 45	vfio_device_put_registration(device);
 46	return ret;
 47}
 48
 49static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
 50{
 51	spin_lock(&df->kvm_ref_lock);
 52	vfio_device_get_kvm_safe(df->device, df->kvm);
 53	spin_unlock(&df->kvm_ref_lock);
 54}
 55
 56long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
 57				struct vfio_device_bind_iommufd __user *arg)
 58{
 59	struct vfio_device *device = df->device;
 60	struct vfio_device_bind_iommufd bind;
 61	unsigned long minsz;
 62	int ret;
 63
 64	static_assert(__same_type(arg->out_devid, df->devid));
 65
 66	minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
 67
 68	if (copy_from_user(&bind, arg, minsz))
 69		return -EFAULT;
 70
 71	if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
 72		return -EINVAL;
 73
 74	/* BIND_IOMMUFD only allowed for cdev fds */
 75	if (df->group)
 76		return -EINVAL;
 77
 78	ret = vfio_device_block_group(device);
 79	if (ret)
 80		return ret;
 81
 82	mutex_lock(&device->dev_set->lock);
 83	/* one device cannot be bound twice */
 84	if (df->access_granted) {
 85		ret = -EINVAL;
 86		goto out_unlock;
 87	}
 88
 89	df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
 90	if (IS_ERR(df->iommufd)) {
 91		ret = PTR_ERR(df->iommufd);
 92		df->iommufd = NULL;
 93		goto out_unlock;
 94	}
 95
 96	/*
 97	 * Before the device open, get the KVM pointer currently
 98	 * associated with the device file (if there is) and obtain
 99	 * a reference.  This reference is held until device closed.
100	 * Save the pointer in the device for use by drivers.
101	 */
102	vfio_df_get_kvm_safe(df);
103
104	ret = vfio_df_open(df);
105	if (ret)
106		goto out_put_kvm;
107
108	ret = copy_to_user(&arg->out_devid, &df->devid,
109			   sizeof(df->devid)) ? -EFAULT : 0;
110	if (ret)
111		goto out_close_device;
112
113	device->cdev_opened = true;
114	/*
115	 * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
116	 * read/write/mmap
117	 */
118	smp_store_release(&df->access_granted, true);
119	mutex_unlock(&device->dev_set->lock);
120	return 0;
121
122out_close_device:
123	vfio_df_close(df);
124out_put_kvm:
125	vfio_device_put_kvm(device);
126	iommufd_ctx_put(df->iommufd);
127	df->iommufd = NULL;
128out_unlock:
129	mutex_unlock(&device->dev_set->lock);
130	vfio_device_unblock_group(device);
131	return ret;
132}
133
134void vfio_df_unbind_iommufd(struct vfio_device_file *df)
135{
136	struct vfio_device *device = df->device;
137
138	/*
139	 * In the time of close, there is no contention with another one
140	 * changing this flag.  So read df->access_granted without lock
141	 * and no smp_load_acquire() is ok.
142	 */
143	if (!df->access_granted)
144		return;
145
146	mutex_lock(&device->dev_set->lock);
147	vfio_df_close(df);
148	vfio_device_put_kvm(device);
149	iommufd_ctx_put(df->iommufd);
150	device->cdev_opened = false;
151	mutex_unlock(&device->dev_set->lock);
152	vfio_device_unblock_group(device);
153}
154
155int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
156			    struct vfio_device_attach_iommufd_pt __user *arg)
157{
158	struct vfio_device *device = df->device;
159	struct vfio_device_attach_iommufd_pt attach;
160	unsigned long minsz;
161	int ret;
162
163	minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
164
165	if (copy_from_user(&attach, arg, minsz))
166		return -EFAULT;
167
168	if (attach.argsz < minsz || attach.flags)
169		return -EINVAL;
170
171	mutex_lock(&device->dev_set->lock);
172	ret = device->ops->attach_ioas(device, &attach.pt_id);
173	if (ret)
174		goto out_unlock;
175
176	if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
177		ret = -EFAULT;
178		goto out_detach;
179	}
180	mutex_unlock(&device->dev_set->lock);
181
182	return 0;
183
184out_detach:
185	device->ops->detach_ioas(device);
186out_unlock:
187	mutex_unlock(&device->dev_set->lock);
188	return ret;
189}
190
191int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
192			    struct vfio_device_detach_iommufd_pt __user *arg)
193{
194	struct vfio_device *device = df->device;
195	struct vfio_device_detach_iommufd_pt detach;
196	unsigned long minsz;
197
198	minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
199
200	if (copy_from_user(&detach, arg, minsz))
201		return -EFAULT;
202
203	if (detach.argsz < minsz || detach.flags)
204		return -EINVAL;
205
206	mutex_lock(&device->dev_set->lock);
207	device->ops->detach_ioas(device);
208	mutex_unlock(&device->dev_set->lock);
209
210	return 0;
211}
212
213static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
214{
215	return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
216}
217
218int vfio_cdev_init(struct class *device_class)
219{
220	device_class->devnode = vfio_device_devnode;
221	return alloc_chrdev_region(&device_devt, 0,
222				   MINORMASK + 1, "vfio-dev");
223}
224
225void vfio_cdev_cleanup(void)
226{
227	unregister_chrdev_region(device_devt, MINORMASK + 1);
228}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2023 Intel Corporation.
  4 */
  5#include <linux/vfio.h>
  6#include <linux/iommufd.h>
  7
  8#include "vfio.h"
  9
 10static dev_t device_devt;
 11
 12void vfio_init_device_cdev(struct vfio_device *device)
 13{
 14	device->device.devt = MKDEV(MAJOR(device_devt), device->index);
 15	cdev_init(&device->cdev, &vfio_device_fops);
 16	device->cdev.owner = THIS_MODULE;
 17}
 18
 19/*
 20 * device access via the fd opened by this function is blocked until
 21 * .open_device() is called successfully during BIND_IOMMUFD.
 22 */
 23int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
 24{
 25	struct vfio_device *device = container_of(inode->i_cdev,
 26						  struct vfio_device, cdev);
 27	struct vfio_device_file *df;
 28	int ret;
 29
 30	/* Paired with the put in vfio_device_fops_release() */
 31	if (!vfio_device_try_get_registration(device))
 32		return -ENODEV;
 33
 34	df = vfio_allocate_device_file(device);
 35	if (IS_ERR(df)) {
 36		ret = PTR_ERR(df);
 37		goto err_put_registration;
 38	}
 39
 40	filep->private_data = df;
 41
 42	/*
 43	 * Use the pseudo fs inode on the device to link all mmaps
 44	 * to the same address space, allowing us to unmap all vmas
 45	 * associated to this device using unmap_mapping_range().
 46	 */
 47	filep->f_mapping = device->inode->i_mapping;
 48
 49	return 0;
 50
 51err_put_registration:
 52	vfio_device_put_registration(device);
 53	return ret;
 54}
 55
 56static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
 57{
 58	spin_lock(&df->kvm_ref_lock);
 59	vfio_device_get_kvm_safe(df->device, df->kvm);
 60	spin_unlock(&df->kvm_ref_lock);
 61}
 62
 63long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
 64				struct vfio_device_bind_iommufd __user *arg)
 65{
 66	struct vfio_device *device = df->device;
 67	struct vfio_device_bind_iommufd bind;
 68	unsigned long minsz;
 69	int ret;
 70
 71	static_assert(__same_type(arg->out_devid, df->devid));
 72
 73	minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
 74
 75	if (copy_from_user(&bind, arg, minsz))
 76		return -EFAULT;
 77
 78	if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
 79		return -EINVAL;
 80
 81	/* BIND_IOMMUFD only allowed for cdev fds */
 82	if (df->group)
 83		return -EINVAL;
 84
 85	ret = vfio_device_block_group(device);
 86	if (ret)
 87		return ret;
 88
 89	mutex_lock(&device->dev_set->lock);
 90	/* one device cannot be bound twice */
 91	if (df->access_granted) {
 92		ret = -EINVAL;
 93		goto out_unlock;
 94	}
 95
 96	df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
 97	if (IS_ERR(df->iommufd)) {
 98		ret = PTR_ERR(df->iommufd);
 99		df->iommufd = NULL;
100		goto out_unlock;
101	}
102
103	/*
104	 * Before the device open, get the KVM pointer currently
105	 * associated with the device file (if there is) and obtain
106	 * a reference.  This reference is held until device closed.
107	 * Save the pointer in the device for use by drivers.
108	 */
109	vfio_df_get_kvm_safe(df);
110
111	ret = vfio_df_open(df);
112	if (ret)
113		goto out_put_kvm;
114
115	ret = copy_to_user(&arg->out_devid, &df->devid,
116			   sizeof(df->devid)) ? -EFAULT : 0;
117	if (ret)
118		goto out_close_device;
119
120	device->cdev_opened = true;
121	/*
122	 * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
123	 * read/write/mmap
124	 */
125	smp_store_release(&df->access_granted, true);
126	mutex_unlock(&device->dev_set->lock);
127	return 0;
128
129out_close_device:
130	vfio_df_close(df);
131out_put_kvm:
132	vfio_device_put_kvm(device);
133	iommufd_ctx_put(df->iommufd);
134	df->iommufd = NULL;
135out_unlock:
136	mutex_unlock(&device->dev_set->lock);
137	vfio_device_unblock_group(device);
138	return ret;
139}
140
141void vfio_df_unbind_iommufd(struct vfio_device_file *df)
142{
143	struct vfio_device *device = df->device;
144
145	/*
146	 * In the time of close, there is no contention with another one
147	 * changing this flag.  So read df->access_granted without lock
148	 * and no smp_load_acquire() is ok.
149	 */
150	if (!df->access_granted)
151		return;
152
153	mutex_lock(&device->dev_set->lock);
154	vfio_df_close(df);
155	vfio_device_put_kvm(device);
156	iommufd_ctx_put(df->iommufd);
157	device->cdev_opened = false;
158	mutex_unlock(&device->dev_set->lock);
159	vfio_device_unblock_group(device);
160}
161
162int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
163			    struct vfio_device_attach_iommufd_pt __user *arg)
164{
165	struct vfio_device *device = df->device;
166	struct vfio_device_attach_iommufd_pt attach;
167	unsigned long minsz;
168	int ret;
169
170	minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
171
172	if (copy_from_user(&attach, arg, minsz))
173		return -EFAULT;
174
175	if (attach.argsz < minsz || attach.flags)
176		return -EINVAL;
177
178	mutex_lock(&device->dev_set->lock);
179	ret = device->ops->attach_ioas(device, &attach.pt_id);
180	if (ret)
181		goto out_unlock;
182
183	if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
184		ret = -EFAULT;
185		goto out_detach;
186	}
187	mutex_unlock(&device->dev_set->lock);
188
189	return 0;
190
191out_detach:
192	device->ops->detach_ioas(device);
193out_unlock:
194	mutex_unlock(&device->dev_set->lock);
195	return ret;
196}
197
198int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
199			    struct vfio_device_detach_iommufd_pt __user *arg)
200{
201	struct vfio_device *device = df->device;
202	struct vfio_device_detach_iommufd_pt detach;
203	unsigned long minsz;
204
205	minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
206
207	if (copy_from_user(&detach, arg, minsz))
208		return -EFAULT;
209
210	if (detach.argsz < minsz || detach.flags)
211		return -EINVAL;
212
213	mutex_lock(&device->dev_set->lock);
214	device->ops->detach_ioas(device);
215	mutex_unlock(&device->dev_set->lock);
216
217	return 0;
218}
219
220static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
221{
222	return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
223}
224
225int vfio_cdev_init(struct class *device_class)
226{
227	device_class->devnode = vfio_device_devnode;
228	return alloc_chrdev_region(&device_devt, 0,
229				   MINORMASK + 1, "vfio-dev");
230}
231
232void vfio_cdev_cleanup(void)
233{
234	unregister_chrdev_region(device_devt, MINORMASK + 1);
235}