Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/mm.h>
  5#include <linux/slab.h>
  6#include <linux/eventfd.h>
  7#include <linux/eventpoll.h>
  8#include <linux/io_uring.h>
  9#include <linux/io_uring_types.h>
 10
 11#include "io-wq.h"
 12#include "eventfd.h"
 13
 14struct io_ev_fd {
 15	struct eventfd_ctx	*cq_ev_fd;
 16	unsigned int		eventfd_async;
 17	/* protected by ->completion_lock */
 18	unsigned		last_cq_tail;
 19	refcount_t		refs;
 20	atomic_t		ops;
 21	struct rcu_head		rcu;
 22};
 23
 24enum {
 25	IO_EVENTFD_OP_SIGNAL_BIT,
 26};
 27
 28static void io_eventfd_free(struct rcu_head *rcu)
 29{
 30	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
 31
 32	eventfd_ctx_put(ev_fd->cq_ev_fd);
 33	kfree(ev_fd);
 34}
 35
 36static void io_eventfd_put(struct io_ev_fd *ev_fd)
 37{
 38	if (refcount_dec_and_test(&ev_fd->refs))
 39		call_rcu(&ev_fd->rcu, io_eventfd_free);
 40}
 41
 42static void io_eventfd_do_signal(struct rcu_head *rcu)
 43{
 44	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
 45
 46	eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
 47	io_eventfd_put(ev_fd);
 48}
 49
 50static void io_eventfd_release(struct io_ev_fd *ev_fd, bool put_ref)
 51{
 52	if (put_ref)
 53		io_eventfd_put(ev_fd);
 54	rcu_read_unlock();
 55}
 56
 57/*
 58 * Returns true if the caller should put the ev_fd reference, false if not.
 59 */
 60static bool __io_eventfd_signal(struct io_ev_fd *ev_fd)
 61{
 62	if (eventfd_signal_allowed()) {
 63		eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
 64		return true;
 65	}
 66	if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
 67		call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
 68		return false;
 69	}
 70	return true;
 71}
 72
 73/*
 74 * Trigger if eventfd_async isn't set, or if it's set and the caller is
 75 * an async worker. If ev_fd isn't valid, obviously return false.
 76 */
 77static bool io_eventfd_trigger(struct io_ev_fd *ev_fd)
 78{
 79	if (ev_fd)
 80		return !ev_fd->eventfd_async || io_wq_current_is_worker();
 81	return false;
 82}
 83
 84/*
 85 * On success, returns with an ev_fd reference grabbed and the RCU read
 86 * lock held.
 87 */
 88static struct io_ev_fd *io_eventfd_grab(struct io_ring_ctx *ctx)
 89{
 90	struct io_ev_fd *ev_fd;
 91
 92	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
 93		return NULL;
 94
 95	rcu_read_lock();
 96
 97	/*
 98	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
 99	 * and eventfd_signal
100	 */
101	ev_fd = rcu_dereference(ctx->io_ev_fd);
102
103	/*
104	 * Check again if ev_fd exists in case an io_eventfd_unregister call
105	 * completed between the NULL check of ctx->io_ev_fd at the start of
106	 * the function and rcu_read_lock.
107	 */
108	if (io_eventfd_trigger(ev_fd) && refcount_inc_not_zero(&ev_fd->refs))
109		return ev_fd;
110
111	rcu_read_unlock();
112	return NULL;
113}
114
115void io_eventfd_signal(struct io_ring_ctx *ctx)
116{
117	struct io_ev_fd *ev_fd;
118
119	ev_fd = io_eventfd_grab(ctx);
120	if (ev_fd)
121		io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd));
122}
123
124void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
125{
126	struct io_ev_fd *ev_fd;
127
128	ev_fd = io_eventfd_grab(ctx);
129	if (ev_fd) {
130		bool skip, put_ref = true;
131
132		/*
133		 * Eventfd should only get triggered when at least one event
134		 * has been posted. Some applications rely on the eventfd
135		 * notification count only changing IFF a new CQE has been
136		 * added to the CQ ring. There's no dependency on 1:1
137		 * relationship between how many times this function is called
138		 * (and hence the eventfd count) and number of CQEs posted to
139		 * the CQ ring.
140		 */
141		spin_lock(&ctx->completion_lock);
142		skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
143		ev_fd->last_cq_tail = ctx->cached_cq_tail;
144		spin_unlock(&ctx->completion_lock);
145
146		if (!skip)
147			put_ref = __io_eventfd_signal(ev_fd);
148
149		io_eventfd_release(ev_fd, put_ref);
150	}
151}
152
153int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
154			unsigned int eventfd_async)
155{
156	struct io_ev_fd *ev_fd;
157	__s32 __user *fds = arg;
158	int fd;
159
160	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
161					lockdep_is_held(&ctx->uring_lock));
162	if (ev_fd)
163		return -EBUSY;
164
165	if (copy_from_user(&fd, fds, sizeof(*fds)))
166		return -EFAULT;
167
168	ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
169	if (!ev_fd)
170		return -ENOMEM;
171
172	ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
173	if (IS_ERR(ev_fd->cq_ev_fd)) {
174		int ret = PTR_ERR(ev_fd->cq_ev_fd);
175
176		kfree(ev_fd);
177		return ret;
178	}
179
180	spin_lock(&ctx->completion_lock);
181	ev_fd->last_cq_tail = ctx->cached_cq_tail;
182	spin_unlock(&ctx->completion_lock);
183
184	ev_fd->eventfd_async = eventfd_async;
185	ctx->has_evfd = true;
186	refcount_set(&ev_fd->refs, 1);
187	atomic_set(&ev_fd->ops, 0);
188	rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
189	return 0;
190}
191
192int io_eventfd_unregister(struct io_ring_ctx *ctx)
193{
194	struct io_ev_fd *ev_fd;
195
196	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
197					lockdep_is_held(&ctx->uring_lock));
198	if (ev_fd) {
199		ctx->has_evfd = false;
200		rcu_assign_pointer(ctx->io_ev_fd, NULL);
201		io_eventfd_put(ev_fd);
202		return 0;
203	}
204
205	return -ENXIO;
206}