Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VFIO platform devices interrupt handling
  4 *
  5 * Copyright (C) 2013 - Virtual Open Systems
  6 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/eventfd.h>
 10#include <linux/interrupt.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/vfio.h>
 14#include <linux/irq.h>
 15
 16#include "vfio_platform_private.h"
 17
 18static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
 19{
 20	unsigned long flags;
 21
 22	spin_lock_irqsave(&irq_ctx->lock, flags);
 23
 24	if (!irq_ctx->masked) {
 25		disable_irq_nosync(irq_ctx->hwirq);
 26		irq_ctx->masked = true;
 27	}
 28
 29	spin_unlock_irqrestore(&irq_ctx->lock, flags);
 30}
 31
 32static int vfio_platform_mask_handler(void *opaque, void *unused)
 33{
 34	struct vfio_platform_irq *irq_ctx = opaque;
 35
 36	vfio_platform_mask(irq_ctx);
 37
 38	return 0;
 39}
 40
 41static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
 42				      unsigned index, unsigned start,
 43				      unsigned count, uint32_t flags,
 44				      void *data)
 45{
 46	if (start != 0 || count != 1)
 47		return -EINVAL;
 48
 49	if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
 50		return -EINVAL;
 51
 52	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 53		int32_t fd = *(int32_t *)data;
 54
 55		if (fd >= 0)
 56			return vfio_virqfd_enable((void *) &vdev->irqs[index],
 57						  vfio_platform_mask_handler,
 58						  NULL, NULL,
 59						  &vdev->irqs[index].mask, fd);
 60
 61		vfio_virqfd_disable(&vdev->irqs[index].mask);
 62		return 0;
 63	}
 64
 65	if (flags & VFIO_IRQ_SET_DATA_NONE) {
 66		vfio_platform_mask(&vdev->irqs[index]);
 67
 68	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
 69		uint8_t mask = *(uint8_t *)data;
 70
 71		if (mask)
 72			vfio_platform_mask(&vdev->irqs[index]);
 73	}
 74
 75	return 0;
 76}
 77
 78static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
 79{
 80	unsigned long flags;
 81
 82	spin_lock_irqsave(&irq_ctx->lock, flags);
 83
 84	if (irq_ctx->masked) {
 85		enable_irq(irq_ctx->hwirq);
 86		irq_ctx->masked = false;
 87	}
 88
 89	spin_unlock_irqrestore(&irq_ctx->lock, flags);
 90}
 91
 92static int vfio_platform_unmask_handler(void *opaque, void *unused)
 93{
 94	struct vfio_platform_irq *irq_ctx = opaque;
 95
 96	vfio_platform_unmask(irq_ctx);
 97
 98	return 0;
 99}
100
101static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
102					unsigned index, unsigned start,
103					unsigned count, uint32_t flags,
104					void *data)
105{
106	if (start != 0 || count != 1)
107		return -EINVAL;
108
109	if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
110		return -EINVAL;
111
112	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
113		int32_t fd = *(int32_t *)data;
114
115		if (fd >= 0)
116			return vfio_virqfd_enable((void *) &vdev->irqs[index],
117						  vfio_platform_unmask_handler,
118						  NULL, NULL,
119						  &vdev->irqs[index].unmask,
120						  fd);
121
122		vfio_virqfd_disable(&vdev->irqs[index].unmask);
123		return 0;
124	}
125
126	if (flags & VFIO_IRQ_SET_DATA_NONE) {
127		vfio_platform_unmask(&vdev->irqs[index]);
128
129	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
130		uint8_t unmask = *(uint8_t *)data;
131
132		if (unmask)
133			vfio_platform_unmask(&vdev->irqs[index]);
134	}
135
136	return 0;
137}
138
139static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
140{
141	struct vfio_platform_irq *irq_ctx = dev_id;
142	unsigned long flags;
143	int ret = IRQ_NONE;
144
145	spin_lock_irqsave(&irq_ctx->lock, flags);
146
147	if (!irq_ctx->masked) {
148		ret = IRQ_HANDLED;
149
150		/* automask maskable interrupts */
151		disable_irq_nosync(irq_ctx->hwirq);
152		irq_ctx->masked = true;
153	}
154
155	spin_unlock_irqrestore(&irq_ctx->lock, flags);
156
157	if (ret == IRQ_HANDLED)
158		eventfd_signal(irq_ctx->trigger);
159
160	return ret;
161}
162
163static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
164{
165	struct vfio_platform_irq *irq_ctx = dev_id;
166
167	eventfd_signal(irq_ctx->trigger);
168
169	return IRQ_HANDLED;
170}
171
172static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
173			    int fd, irq_handler_t handler)
174{
175	struct vfio_platform_irq *irq = &vdev->irqs[index];
176	struct eventfd_ctx *trigger;
177	int ret;
178
179	if (irq->trigger) {
180		irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
181		free_irq(irq->hwirq, irq);
182		kfree(irq->name);
183		eventfd_ctx_put(irq->trigger);
184		irq->trigger = NULL;
185	}
186
187	if (fd < 0) /* Disable only */
188		return 0;
189	irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
190			      irq->hwirq, vdev->name);
 
191	if (!irq->name)
192		return -ENOMEM;
193
194	trigger = eventfd_ctx_fdget(fd);
195	if (IS_ERR(trigger)) {
196		kfree(irq->name);
197		return PTR_ERR(trigger);
198	}
199
200	irq->trigger = trigger;
201
202	irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
203	ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
204	if (ret) {
205		kfree(irq->name);
206		eventfd_ctx_put(trigger);
207		irq->trigger = NULL;
208		return ret;
209	}
210
211	if (!irq->masked)
212		enable_irq(irq->hwirq);
213
214	return 0;
215}
216
217static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
218					 unsigned index, unsigned start,
219					 unsigned count, uint32_t flags,
220					 void *data)
221{
222	struct vfio_platform_irq *irq = &vdev->irqs[index];
223	irq_handler_t handler;
224
225	if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
226		handler = vfio_automasked_irq_handler;
227	else
228		handler = vfio_irq_handler;
229
230	if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
231		return vfio_set_trigger(vdev, index, -1, handler);
232
233	if (start != 0 || count != 1)
234		return -EINVAL;
235
236	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
237		int32_t fd = *(int32_t *)data;
238
239		return vfio_set_trigger(vdev, index, fd, handler);
240	}
241
242	if (flags & VFIO_IRQ_SET_DATA_NONE) {
243		handler(irq->hwirq, irq);
244
245	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
246		uint8_t trigger = *(uint8_t *)data;
247
248		if (trigger)
249			handler(irq->hwirq, irq);
250	}
251
252	return 0;
253}
254
255int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
256				 uint32_t flags, unsigned index, unsigned start,
257				 unsigned count, void *data)
258{
259	int (*func)(struct vfio_platform_device *vdev, unsigned index,
260		    unsigned start, unsigned count, uint32_t flags,
261		    void *data) = NULL;
262
263	switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
264	case VFIO_IRQ_SET_ACTION_MASK:
265		func = vfio_platform_set_irq_mask;
266		break;
267	case VFIO_IRQ_SET_ACTION_UNMASK:
268		func = vfio_platform_set_irq_unmask;
269		break;
270	case VFIO_IRQ_SET_ACTION_TRIGGER:
271		func = vfio_platform_set_irq_trigger;
272		break;
273	}
274
275	if (!func)
276		return -ENOTTY;
277
278	return func(vdev, index, start, count, flags, data);
279}
280
281int vfio_platform_irq_init(struct vfio_platform_device *vdev)
282{
283	int cnt = 0, i;
284
285	while (vdev->get_irq(vdev, cnt) >= 0)
286		cnt++;
287
288	vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq),
289			     GFP_KERNEL_ACCOUNT);
290	if (!vdev->irqs)
291		return -ENOMEM;
292
293	for (i = 0; i < cnt; i++) {
294		int hwirq = vdev->get_irq(vdev, i);
295
296		if (hwirq < 0)
297			goto err;
298
299		spin_lock_init(&vdev->irqs[i].lock);
300
301		vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
302
303		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
304			vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
305						| VFIO_IRQ_INFO_AUTOMASKED;
306
307		vdev->irqs[i].count = 1;
308		vdev->irqs[i].hwirq = hwirq;
309		vdev->irqs[i].masked = false;
310	}
311
312	vdev->num_irqs = cnt;
313
314	return 0;
315err:
316	kfree(vdev->irqs);
317	return -EINVAL;
318}
319
320void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
321{
322	int i;
323
324	for (i = 0; i < vdev->num_irqs; i++)
325		vfio_set_trigger(vdev, i, -1, NULL);
326
327	vdev->num_irqs = 0;
328	kfree(vdev->irqs);
329}
v4.10.11
 
  1/*
  2 * VFIO platform devices interrupt handling
  3 *
  4 * Copyright (C) 2013 - Virtual Open Systems
  5 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License, version 2, as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 */
 16
 17#include <linux/eventfd.h>
 18#include <linux/interrupt.h>
 19#include <linux/slab.h>
 20#include <linux/types.h>
 21#include <linux/vfio.h>
 22#include <linux/irq.h>
 23
 24#include "vfio_platform_private.h"
 25
 26static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
 27{
 28	unsigned long flags;
 29
 30	spin_lock_irqsave(&irq_ctx->lock, flags);
 31
 32	if (!irq_ctx->masked) {
 33		disable_irq_nosync(irq_ctx->hwirq);
 34		irq_ctx->masked = true;
 35	}
 36
 37	spin_unlock_irqrestore(&irq_ctx->lock, flags);
 38}
 39
 40static int vfio_platform_mask_handler(void *opaque, void *unused)
 41{
 42	struct vfio_platform_irq *irq_ctx = opaque;
 43
 44	vfio_platform_mask(irq_ctx);
 45
 46	return 0;
 47}
 48
 49static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
 50				      unsigned index, unsigned start,
 51				      unsigned count, uint32_t flags,
 52				      void *data)
 53{
 54	if (start != 0 || count != 1)
 55		return -EINVAL;
 56
 57	if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
 58		return -EINVAL;
 59
 60	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 61		int32_t fd = *(int32_t *)data;
 62
 63		if (fd >= 0)
 64			return vfio_virqfd_enable((void *) &vdev->irqs[index],
 65						  vfio_platform_mask_handler,
 66						  NULL, NULL,
 67						  &vdev->irqs[index].mask, fd);
 68
 69		vfio_virqfd_disable(&vdev->irqs[index].mask);
 70		return 0;
 71	}
 72
 73	if (flags & VFIO_IRQ_SET_DATA_NONE) {
 74		vfio_platform_mask(&vdev->irqs[index]);
 75
 76	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
 77		uint8_t mask = *(uint8_t *)data;
 78
 79		if (mask)
 80			vfio_platform_mask(&vdev->irqs[index]);
 81	}
 82
 83	return 0;
 84}
 85
 86static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
 87{
 88	unsigned long flags;
 89
 90	spin_lock_irqsave(&irq_ctx->lock, flags);
 91
 92	if (irq_ctx->masked) {
 93		enable_irq(irq_ctx->hwirq);
 94		irq_ctx->masked = false;
 95	}
 96
 97	spin_unlock_irqrestore(&irq_ctx->lock, flags);
 98}
 99
100static int vfio_platform_unmask_handler(void *opaque, void *unused)
101{
102	struct vfio_platform_irq *irq_ctx = opaque;
103
104	vfio_platform_unmask(irq_ctx);
105
106	return 0;
107}
108
109static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
110					unsigned index, unsigned start,
111					unsigned count, uint32_t flags,
112					void *data)
113{
114	if (start != 0 || count != 1)
115		return -EINVAL;
116
117	if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
118		return -EINVAL;
119
120	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
121		int32_t fd = *(int32_t *)data;
122
123		if (fd >= 0)
124			return vfio_virqfd_enable((void *) &vdev->irqs[index],
125						  vfio_platform_unmask_handler,
126						  NULL, NULL,
127						  &vdev->irqs[index].unmask,
128						  fd);
129
130		vfio_virqfd_disable(&vdev->irqs[index].unmask);
131		return 0;
132	}
133
134	if (flags & VFIO_IRQ_SET_DATA_NONE) {
135		vfio_platform_unmask(&vdev->irqs[index]);
136
137	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
138		uint8_t unmask = *(uint8_t *)data;
139
140		if (unmask)
141			vfio_platform_unmask(&vdev->irqs[index]);
142	}
143
144	return 0;
145}
146
147static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
148{
149	struct vfio_platform_irq *irq_ctx = dev_id;
150	unsigned long flags;
151	int ret = IRQ_NONE;
152
153	spin_lock_irqsave(&irq_ctx->lock, flags);
154
155	if (!irq_ctx->masked) {
156		ret = IRQ_HANDLED;
157
158		/* automask maskable interrupts */
159		disable_irq_nosync(irq_ctx->hwirq);
160		irq_ctx->masked = true;
161	}
162
163	spin_unlock_irqrestore(&irq_ctx->lock, flags);
164
165	if (ret == IRQ_HANDLED)
166		eventfd_signal(irq_ctx->trigger, 1);
167
168	return ret;
169}
170
171static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
172{
173	struct vfio_platform_irq *irq_ctx = dev_id;
174
175	eventfd_signal(irq_ctx->trigger, 1);
176
177	return IRQ_HANDLED;
178}
179
180static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
181			    int fd, irq_handler_t handler)
182{
183	struct vfio_platform_irq *irq = &vdev->irqs[index];
184	struct eventfd_ctx *trigger;
185	int ret;
186
187	if (irq->trigger) {
188		irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
189		free_irq(irq->hwirq, irq);
190		kfree(irq->name);
191		eventfd_ctx_put(irq->trigger);
192		irq->trigger = NULL;
193	}
194
195	if (fd < 0) /* Disable only */
196		return 0;
197
198	irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
199						irq->hwirq, vdev->name);
200	if (!irq->name)
201		return -ENOMEM;
202
203	trigger = eventfd_ctx_fdget(fd);
204	if (IS_ERR(trigger)) {
205		kfree(irq->name);
206		return PTR_ERR(trigger);
207	}
208
209	irq->trigger = trigger;
210
211	irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
212	ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
213	if (ret) {
214		kfree(irq->name);
215		eventfd_ctx_put(trigger);
216		irq->trigger = NULL;
217		return ret;
218	}
219
220	if (!irq->masked)
221		enable_irq(irq->hwirq);
222
223	return 0;
224}
225
226static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
227					 unsigned index, unsigned start,
228					 unsigned count, uint32_t flags,
229					 void *data)
230{
231	struct vfio_platform_irq *irq = &vdev->irqs[index];
232	irq_handler_t handler;
233
234	if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
235		handler = vfio_automasked_irq_handler;
236	else
237		handler = vfio_irq_handler;
238
239	if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
240		return vfio_set_trigger(vdev, index, -1, handler);
241
242	if (start != 0 || count != 1)
243		return -EINVAL;
244
245	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
246		int32_t fd = *(int32_t *)data;
247
248		return vfio_set_trigger(vdev, index, fd, handler);
249	}
250
251	if (flags & VFIO_IRQ_SET_DATA_NONE) {
252		handler(irq->hwirq, irq);
253
254	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
255		uint8_t trigger = *(uint8_t *)data;
256
257		if (trigger)
258			handler(irq->hwirq, irq);
259	}
260
261	return 0;
262}
263
264int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
265				 uint32_t flags, unsigned index, unsigned start,
266				 unsigned count, void *data)
267{
268	int (*func)(struct vfio_platform_device *vdev, unsigned index,
269		    unsigned start, unsigned count, uint32_t flags,
270		    void *data) = NULL;
271
272	switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
273	case VFIO_IRQ_SET_ACTION_MASK:
274		func = vfio_platform_set_irq_mask;
275		break;
276	case VFIO_IRQ_SET_ACTION_UNMASK:
277		func = vfio_platform_set_irq_unmask;
278		break;
279	case VFIO_IRQ_SET_ACTION_TRIGGER:
280		func = vfio_platform_set_irq_trigger;
281		break;
282	}
283
284	if (!func)
285		return -ENOTTY;
286
287	return func(vdev, index, start, count, flags, data);
288}
289
290int vfio_platform_irq_init(struct vfio_platform_device *vdev)
291{
292	int cnt = 0, i;
293
294	while (vdev->get_irq(vdev, cnt) >= 0)
295		cnt++;
296
297	vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
 
298	if (!vdev->irqs)
299		return -ENOMEM;
300
301	for (i = 0; i < cnt; i++) {
302		int hwirq = vdev->get_irq(vdev, i);
303
304		if (hwirq < 0)
305			goto err;
306
307		spin_lock_init(&vdev->irqs[i].lock);
308
309		vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
310
311		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
312			vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
313						| VFIO_IRQ_INFO_AUTOMASKED;
314
315		vdev->irqs[i].count = 1;
316		vdev->irqs[i].hwirq = hwirq;
317		vdev->irqs[i].masked = false;
318	}
319
320	vdev->num_irqs = cnt;
321
322	return 0;
323err:
324	kfree(vdev->irqs);
325	return -EINVAL;
326}
327
328void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
329{
330	int i;
331
332	for (i = 0; i < vdev->num_irqs; i++)
333		vfio_set_trigger(vdev, i, -1, NULL);
334
335	vdev->num_irqs = 0;
336	kfree(vdev->irqs);
337}