Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VFIO-KVM bridge pseudo device
  4 *
  5 * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
  6 *     Author: Alex Williamson <alex.williamson@redhat.com>
 
 
 
 
  7 */
  8
  9#include <linux/errno.h>
 10#include <linux/file.h>
 11#include <linux/kvm_host.h>
 12#include <linux/list.h>
 13#include <linux/module.h>
 14#include <linux/mutex.h>
 15#include <linux/slab.h>
 16#include <linux/uaccess.h>
 17#include <linux/vfio.h>
 18#include "vfio.h"
 19
 20#ifdef CONFIG_SPAPR_TCE_IOMMU
 21#include <asm/kvm_ppc.h>
 22#endif
 23
 24struct kvm_vfio_group {
 25	struct list_head node;
 26	struct file *file;
 27#ifdef CONFIG_SPAPR_TCE_IOMMU
 28	struct iommu_group *iommu_group;
 29#endif
 30};
 31
 32struct kvm_vfio {
 33	struct list_head group_list;
 34	struct mutex lock;
 35	bool noncoherent;
 36};
 37
 38static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
 39{
 40	void (*fn)(struct file *file, struct kvm *kvm);
 
 41
 42	fn = symbol_get(vfio_file_set_kvm);
 43	if (!fn)
 44		return;
 45
 46	fn(file, kvm);
 47
 48	symbol_put(vfio_file_set_kvm);
 
 
 49}
 50
 51static bool kvm_vfio_file_enforced_coherent(struct file *file)
 
 52{
 53	bool (*fn)(struct file *file);
 54	bool ret;
 55
 56	fn = symbol_get(vfio_file_enforced_coherent);
 57	if (!fn)
 58		return false;
 59
 60	ret = fn(file);
 61
 62	symbol_put(vfio_file_enforced_coherent);
 63
 64	return ret;
 65}
 66
 67static bool kvm_vfio_file_is_group(struct file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 68{
 69	bool (*fn)(struct file *file);
 70	bool ret;
 71
 72	fn = symbol_get(vfio_file_is_group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73	if (!fn)
 74		return false;
 75
 76	ret = fn(file);
 77
 78	symbol_put(vfio_file_is_group);
 79
 80	return ret;
 81}
 82
 83#ifdef CONFIG_SPAPR_TCE_IOMMU
 84static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
 85{
 86	struct iommu_group *(*fn)(struct file *file);
 87	struct iommu_group *ret;
 88
 89	fn = symbol_get(vfio_file_iommu_group);
 90	if (!fn)
 91		return NULL;
 92
 93	ret = fn(file);
 94
 95	symbol_put(vfio_file_iommu_group);
 96
 97	return ret;
 98}
 99
 
 
 
 
 
 
 
 
 
 
 
100static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
101					     struct kvm_vfio_group *kvg)
102{
103	if (WARN_ON_ONCE(!kvg->iommu_group))
 
 
104		return;
105
106	kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
107	iommu_group_put(kvg->iommu_group);
108	kvg->iommu_group = NULL;
109}
110#endif
111
112/*
113 * Groups can use the same or different IOMMU domains.  If the same then
114 * adding a new group may change the coherency of groups we've previously
115 * been told about.  We don't want to care about any of that so we retest
116 * each group and bail as soon as we find one that's noncoherent.  This
117 * means we only ever [un]register_noncoherent_dma once for the whole device.
118 */
119static void kvm_vfio_update_coherency(struct kvm_device *dev)
120{
121	struct kvm_vfio *kv = dev->private;
122	bool noncoherent = false;
123	struct kvm_vfio_group *kvg;
124
125	mutex_lock(&kv->lock);
126
127	list_for_each_entry(kvg, &kv->group_list, node) {
128		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
129			noncoherent = true;
130			break;
131		}
132	}
133
134	if (noncoherent != kv->noncoherent) {
135		kv->noncoherent = noncoherent;
136
137		if (kv->noncoherent)
138			kvm_arch_register_noncoherent_dma(dev->kvm);
139		else
140			kvm_arch_unregister_noncoherent_dma(dev->kvm);
141	}
142
143	mutex_unlock(&kv->lock);
144}
145
146static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
147{
148	struct kvm_vfio *kv = dev->private;
 
149	struct kvm_vfio_group *kvg;
150	struct file *filp;
 
 
151	int ret;
152
153	filp = fget(fd);
154	if (!filp)
155		return -EBADF;
156
157	/* Ensure the FD is a vfio group FD.*/
158	if (!kvm_vfio_file_is_group(filp)) {
159		ret = -EINVAL;
160		goto err_fput;
161	}
162
163	mutex_lock(&kv->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
165	list_for_each_entry(kvg, &kv->group_list, node) {
166		if (kvg->file == filp) {
167			ret = -EEXIST;
168			goto err_unlock;
 
169		}
170	}
171
172	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
173	if (!kvg) {
174		ret = -ENOMEM;
175		goto err_unlock;
176	}
177
178	kvg->file = filp;
179	list_add_tail(&kvg->node, &kv->group_list);
180
181	kvm_arch_start_assignment(dev->kvm);
182
183	mutex_unlock(&kv->lock);
184
185	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
186	kvm_vfio_update_coherency(dev);
187
188	return 0;
189err_unlock:
190	mutex_unlock(&kv->lock);
191err_fput:
192	fput(filp);
193	return ret;
194}
195
196static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
197{
198	struct kvm_vfio *kv = dev->private;
199	struct kvm_vfio_group *kvg;
200	struct fd f;
201	int ret;
202
203	f = fdget(fd);
204	if (!f.file)
205		return -EBADF;
206
207	ret = -ENOENT;
208
209	mutex_lock(&kv->lock);
210
211	list_for_each_entry(kvg, &kv->group_list, node) {
212		if (kvg->file != f.file)
213			continue;
 
214
215		list_del(&kvg->node);
216		kvm_arch_end_assignment(dev->kvm);
217#ifdef CONFIG_SPAPR_TCE_IOMMU
218		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
 
219#endif
220		kvm_vfio_file_set_kvm(kvg->file, NULL);
221		fput(kvg->file);
222		kfree(kvg);
223		ret = 0;
224		break;
225	}
226
227	mutex_unlock(&kv->lock);
228
229	fdput(f);
230
231	kvm_vfio_update_coherency(dev);
232
233	return ret;
234}
235
236#ifdef CONFIG_SPAPR_TCE_IOMMU
237static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
238					void __user *arg)
239{
240	struct kvm_vfio_spapr_tce param;
241	struct kvm_vfio *kv = dev->private;
242	struct kvm_vfio_group *kvg;
243	struct fd f;
244	int ret;
245
246	if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
247		return -EFAULT;
 
248
249	f = fdget(param.groupfd);
250	if (!f.file)
251		return -EBADF;
 
 
 
 
 
 
 
 
 
 
 
 
252
253	ret = -ENOENT;
254
255	mutex_lock(&kv->lock);
256
257	list_for_each_entry(kvg, &kv->group_list, node) {
258		if (kvg->file != f.file)
259			continue;
260
261		if (!kvg->iommu_group) {
262			kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
263			if (WARN_ON_ONCE(!kvg->iommu_group)) {
264				ret = -EIO;
265				goto err_fdput;
266			}
267		}
268
269		ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
270						       kvg->iommu_group);
271		break;
272	}
273
274err_fdput:
275	mutex_unlock(&kv->lock);
276	fdput(f);
277	return ret;
278}
279#endif
280
281static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
282			      void __user *arg)
283{
284	int32_t __user *argp = arg;
285	int32_t fd;
286
287	switch (attr) {
288	case KVM_DEV_VFIO_GROUP_ADD:
289		if (get_user(fd, argp))
290			return -EFAULT;
291		return kvm_vfio_group_add(dev, fd);
292
293	case KVM_DEV_VFIO_GROUP_DEL:
294		if (get_user(fd, argp))
295			return -EFAULT;
296		return kvm_vfio_group_del(dev, fd);
297
298#ifdef CONFIG_SPAPR_TCE_IOMMU
299	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
300		return kvm_vfio_group_set_spapr_tce(dev, arg);
301#endif
302	}
303
304	return -ENXIO;
305}
306
307static int kvm_vfio_set_attr(struct kvm_device *dev,
308			     struct kvm_device_attr *attr)
309{
310	switch (attr->group) {
311	case KVM_DEV_VFIO_GROUP:
312		return kvm_vfio_set_group(dev, attr->attr,
313					  u64_to_user_ptr(attr->addr));
314	}
315
316	return -ENXIO;
317}
318
319static int kvm_vfio_has_attr(struct kvm_device *dev,
320			     struct kvm_device_attr *attr)
321{
322	switch (attr->group) {
323	case KVM_DEV_VFIO_GROUP:
324		switch (attr->attr) {
325		case KVM_DEV_VFIO_GROUP_ADD:
326		case KVM_DEV_VFIO_GROUP_DEL:
327#ifdef CONFIG_SPAPR_TCE_IOMMU
328		case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
329#endif
330			return 0;
331		}
332
333		break;
334	}
335
336	return -ENXIO;
337}
338
339static void kvm_vfio_release(struct kvm_device *dev)
340{
341	struct kvm_vfio *kv = dev->private;
342	struct kvm_vfio_group *kvg, *tmp;
343
344	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
345#ifdef CONFIG_SPAPR_TCE_IOMMU
346		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
347#endif
348		kvm_vfio_file_set_kvm(kvg->file, NULL);
349		fput(kvg->file);
350		list_del(&kvg->node);
351		kfree(kvg);
352		kvm_arch_end_assignment(dev->kvm);
353	}
354
355	kvm_vfio_update_coherency(dev);
356
357	kfree(kv);
358	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
359}
360
361static int kvm_vfio_create(struct kvm_device *dev, u32 type);
362
363static struct kvm_device_ops kvm_vfio_ops = {
364	.name = "kvm-vfio",
365	.create = kvm_vfio_create,
366	.release = kvm_vfio_release,
367	.set_attr = kvm_vfio_set_attr,
368	.has_attr = kvm_vfio_has_attr,
369};
370
371static int kvm_vfio_create(struct kvm_device *dev, u32 type)
372{
373	struct kvm_device *tmp;
374	struct kvm_vfio *kv;
375
376	/* Only one VFIO "device" per VM */
377	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
378		if (tmp->ops == &kvm_vfio_ops)
379			return -EBUSY;
380
381	kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
382	if (!kv)
383		return -ENOMEM;
384
385	INIT_LIST_HEAD(&kv->group_list);
386	mutex_init(&kv->lock);
387
388	dev->private = kv;
389
390	return 0;
391}
392
393int kvm_vfio_ops_init(void)
394{
395	return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
396}
397
398void kvm_vfio_ops_exit(void)
399{
400	kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
401}
v4.17
 
  1/*
  2 * VFIO-KVM bridge pseudo device
  3 *
  4 * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
  5 *     Author: Alex Williamson <alex.williamson@redhat.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11
 12#include <linux/errno.h>
 13#include <linux/file.h>
 14#include <linux/kvm_host.h>
 15#include <linux/list.h>
 16#include <linux/module.h>
 17#include <linux/mutex.h>
 18#include <linux/slab.h>
 19#include <linux/uaccess.h>
 20#include <linux/vfio.h>
 21#include "vfio.h"
 22
 23#ifdef CONFIG_SPAPR_TCE_IOMMU
 24#include <asm/kvm_ppc.h>
 25#endif
 26
 27struct kvm_vfio_group {
 28	struct list_head node;
 29	struct vfio_group *vfio_group;
 
 
 
 30};
 31
 32struct kvm_vfio {
 33	struct list_head group_list;
 34	struct mutex lock;
 35	bool noncoherent;
 36};
 37
 38static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
 39{
 40	struct vfio_group *vfio_group;
 41	struct vfio_group *(*fn)(struct file *);
 42
 43	fn = symbol_get(vfio_group_get_external_user);
 44	if (!fn)
 45		return ERR_PTR(-EINVAL);
 46
 47	vfio_group = fn(filep);
 48
 49	symbol_put(vfio_group_get_external_user);
 50
 51	return vfio_group;
 52}
 53
 54static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
 55					       struct file *filep)
 56{
 57	bool ret, (*fn)(struct vfio_group *, struct file *);
 
 58
 59	fn = symbol_get(vfio_external_group_match_file);
 60	if (!fn)
 61		return false;
 62
 63	ret = fn(group, filep);
 64
 65	symbol_put(vfio_external_group_match_file);
 66
 67	return ret;
 68}
 69
 70static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
 71{
 72	void (*fn)(struct vfio_group *);
 73
 74	fn = symbol_get(vfio_group_put_external_user);
 75	if (!fn)
 76		return;
 77
 78	fn(vfio_group);
 79
 80	symbol_put(vfio_group_put_external_user);
 81}
 82
 83static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
 84{
 85	void (*fn)(struct vfio_group *, struct kvm *);
 
 86
 87	fn = symbol_get(vfio_group_set_kvm);
 88	if (!fn)
 89		return;
 90
 91	fn(group, kvm);
 92
 93	symbol_put(vfio_group_set_kvm);
 94}
 95
 96static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
 97{
 98	long (*fn)(struct vfio_group *, unsigned long);
 99	long ret;
100
101	fn = symbol_get(vfio_external_check_extension);
102	if (!fn)
103		return false;
104
105	ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
106
107	symbol_put(vfio_external_check_extension);
108
109	return ret > 0;
110}
111
112#ifdef CONFIG_SPAPR_TCE_IOMMU
113static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
114{
115	int (*fn)(struct vfio_group *);
116	int ret = -EINVAL;
117
118	fn = symbol_get(vfio_external_user_iommu_id);
119	if (!fn)
120		return ret;
121
122	ret = fn(vfio_group);
123
124	symbol_put(vfio_external_user_iommu_id);
125
126	return ret;
127}
128
129static struct iommu_group *kvm_vfio_group_get_iommu_group(
130		struct vfio_group *group)
131{
132	int group_id = kvm_vfio_external_user_iommu_id(group);
133
134	if (group_id < 0)
135		return NULL;
136
137	return iommu_group_get_by_id(group_id);
138}
139
140static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
141		struct vfio_group *vfio_group)
142{
143	struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
144
145	if (WARN_ON_ONCE(!grp))
146		return;
147
148	kvm_spapr_tce_release_iommu_group(kvm, grp);
149	iommu_group_put(grp);
 
150}
151#endif
152
153/*
154 * Groups can use the same or different IOMMU domains.  If the same then
155 * adding a new group may change the coherency of groups we've previously
156 * been told about.  We don't want to care about any of that so we retest
157 * each group and bail as soon as we find one that's noncoherent.  This
158 * means we only ever [un]register_noncoherent_dma once for the whole device.
159 */
160static void kvm_vfio_update_coherency(struct kvm_device *dev)
161{
162	struct kvm_vfio *kv = dev->private;
163	bool noncoherent = false;
164	struct kvm_vfio_group *kvg;
165
166	mutex_lock(&kv->lock);
167
168	list_for_each_entry(kvg, &kv->group_list, node) {
169		if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
170			noncoherent = true;
171			break;
172		}
173	}
174
175	if (noncoherent != kv->noncoherent) {
176		kv->noncoherent = noncoherent;
177
178		if (kv->noncoherent)
179			kvm_arch_register_noncoherent_dma(dev->kvm);
180		else
181			kvm_arch_unregister_noncoherent_dma(dev->kvm);
182	}
183
184	mutex_unlock(&kv->lock);
185}
186
187static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
188{
189	struct kvm_vfio *kv = dev->private;
190	struct vfio_group *vfio_group;
191	struct kvm_vfio_group *kvg;
192	int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
193	struct fd f;
194	int32_t fd;
195	int ret;
196
197	switch (attr) {
198	case KVM_DEV_VFIO_GROUP_ADD:
199		if (get_user(fd, argp))
200			return -EFAULT;
 
 
 
 
 
201
202		f = fdget(fd);
203		if (!f.file)
204			return -EBADF;
205
206		vfio_group = kvm_vfio_group_get_external_user(f.file);
207		fdput(f);
208
209		if (IS_ERR(vfio_group))
210			return PTR_ERR(vfio_group);
211
212		mutex_lock(&kv->lock);
213
214		list_for_each_entry(kvg, &kv->group_list, node) {
215			if (kvg->vfio_group == vfio_group) {
216				mutex_unlock(&kv->lock);
217				kvm_vfio_group_put_external_user(vfio_group);
218				return -EEXIST;
219			}
220		}
221
222		kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
223		if (!kvg) {
224			mutex_unlock(&kv->lock);
225			kvm_vfio_group_put_external_user(vfio_group);
226			return -ENOMEM;
227		}
 
228
229		list_add_tail(&kvg->node, &kv->group_list);
230		kvg->vfio_group = vfio_group;
 
 
 
231
232		kvm_arch_start_assignment(dev->kvm);
 
233
234		mutex_unlock(&kv->lock);
235
236		kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
237
238		kvm_vfio_update_coherency(dev);
 
239
240		return 0;
 
 
 
 
 
 
241
242	case KVM_DEV_VFIO_GROUP_DEL:
243		if (get_user(fd, argp))
244			return -EFAULT;
 
 
 
245
246		f = fdget(fd);
247		if (!f.file)
248			return -EBADF;
249
250		ret = -ENOENT;
251
252		mutex_lock(&kv->lock);
253
254		list_for_each_entry(kvg, &kv->group_list, node) {
255			if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
256								f.file))
257				continue;
258
259			list_del(&kvg->node);
260			kvm_arch_end_assignment(dev->kvm);
261#ifdef CONFIG_SPAPR_TCE_IOMMU
262			kvm_spapr_tce_release_vfio_group(dev->kvm,
263							 kvg->vfio_group);
264#endif
265			kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
266			kvm_vfio_group_put_external_user(kvg->vfio_group);
267			kfree(kvg);
268			ret = 0;
269			break;
270		}
271
272		mutex_unlock(&kv->lock);
273
274		fdput(f);
275
276		kvm_vfio_update_coherency(dev);
277
278		return ret;
 
279
280#ifdef CONFIG_SPAPR_TCE_IOMMU
281	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
282		struct kvm_vfio_spapr_tce param;
283		struct kvm_vfio *kv = dev->private;
284		struct vfio_group *vfio_group;
285		struct kvm_vfio_group *kvg;
286		struct fd f;
287		struct iommu_group *grp;
 
288
289		if (copy_from_user(&param, (void __user *)arg,
290				sizeof(struct kvm_vfio_spapr_tce)))
291			return -EFAULT;
292
293		f = fdget(param.groupfd);
294		if (!f.file)
295			return -EBADF;
296
297		vfio_group = kvm_vfio_group_get_external_user(f.file);
298		fdput(f);
299
300		if (IS_ERR(vfio_group))
301			return PTR_ERR(vfio_group);
302
303		grp = kvm_vfio_group_get_iommu_group(vfio_group);
304		if (WARN_ON_ONCE(!grp)) {
305			kvm_vfio_group_put_external_user(vfio_group);
306			return -EIO;
307		}
308
309		ret = -ENOENT;
310
311		mutex_lock(&kv->lock);
312
313		list_for_each_entry(kvg, &kv->group_list, node) {
314			if (kvg->vfio_group != vfio_group)
315				continue;
316
317			ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
318					param.tablefd, grp);
319			break;
 
 
 
320		}
321
322		mutex_unlock(&kv->lock);
 
 
 
323
324		iommu_group_put(grp);
325		kvm_vfio_group_put_external_user(vfio_group);
 
 
 
 
 
 
 
 
 
 
326
327		return ret;
328	}
329#endif /* CONFIG_SPAPR_TCE_IOMMU */
 
 
 
 
 
 
 
 
 
 
 
 
330	}
331
332	return -ENXIO;
333}
334
335static int kvm_vfio_set_attr(struct kvm_device *dev,
336			     struct kvm_device_attr *attr)
337{
338	switch (attr->group) {
339	case KVM_DEV_VFIO_GROUP:
340		return kvm_vfio_set_group(dev, attr->attr, attr->addr);
 
341	}
342
343	return -ENXIO;
344}
345
346static int kvm_vfio_has_attr(struct kvm_device *dev,
347			     struct kvm_device_attr *attr)
348{
349	switch (attr->group) {
350	case KVM_DEV_VFIO_GROUP:
351		switch (attr->attr) {
352		case KVM_DEV_VFIO_GROUP_ADD:
353		case KVM_DEV_VFIO_GROUP_DEL:
354#ifdef CONFIG_SPAPR_TCE_IOMMU
355		case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
356#endif
357			return 0;
358		}
359
360		break;
361	}
362
363	return -ENXIO;
364}
365
366static void kvm_vfio_destroy(struct kvm_device *dev)
367{
368	struct kvm_vfio *kv = dev->private;
369	struct kvm_vfio_group *kvg, *tmp;
370
371	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
372#ifdef CONFIG_SPAPR_TCE_IOMMU
373		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
374#endif
375		kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
376		kvm_vfio_group_put_external_user(kvg->vfio_group);
377		list_del(&kvg->node);
378		kfree(kvg);
379		kvm_arch_end_assignment(dev->kvm);
380	}
381
382	kvm_vfio_update_coherency(dev);
383
384	kfree(kv);
385	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
386}
387
388static int kvm_vfio_create(struct kvm_device *dev, u32 type);
389
390static struct kvm_device_ops kvm_vfio_ops = {
391	.name = "kvm-vfio",
392	.create = kvm_vfio_create,
393	.destroy = kvm_vfio_destroy,
394	.set_attr = kvm_vfio_set_attr,
395	.has_attr = kvm_vfio_has_attr,
396};
397
398static int kvm_vfio_create(struct kvm_device *dev, u32 type)
399{
400	struct kvm_device *tmp;
401	struct kvm_vfio *kv;
402
403	/* Only one VFIO "device" per VM */
404	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
405		if (tmp->ops == &kvm_vfio_ops)
406			return -EBUSY;
407
408	kv = kzalloc(sizeof(*kv), GFP_KERNEL);
409	if (!kv)
410		return -ENOMEM;
411
412	INIT_LIST_HEAD(&kv->group_list);
413	mutex_init(&kv->lock);
414
415	dev->private = kv;
416
417	return 0;
418}
419
420int kvm_vfio_ops_init(void)
421{
422	return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
423}
424
425void kvm_vfio_ops_exit(void)
426{
427	kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
428}