Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
  1#ifndef IOU_CORE_H
  2#define IOU_CORE_H
  3
  4#include <linux/errno.h>
  5#include <linux/lockdep.h>
  6#include <linux/resume_user_mode.h>
  7#include <linux/kasan.h>
 
  8#include <linux/io_uring_types.h>
  9#include <uapi/linux/eventpoll.h>
 10#include "io-wq.h"
 11#include "slist.h"
 12#include "filetable.h"
 13
 14#ifndef CREATE_TRACE_POINTS
 15#include <trace/events/io_uring.h>
 16#endif
 17
 18enum {
 19	IOU_OK			= 0,
 20	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
 21
 22	/*
 23	 * Requeue the task_work to restart operations on this request. The
 24	 * actual value isn't important, should just be not an otherwise
 25	 * valid error code, yet less than -MAX_ERRNO and valid internally.
 26	 */
 27	IOU_REQUEUE		= -3072,
 28
 29	/*
 30	 * Intended only when both IO_URING_F_MULTISHOT is passed
 31	 * to indicate to the poll runner that multishot should be
 32	 * removed and the result is set on req->cqe.res.
 33	 */
 34	IOU_STOP_MULTISHOT	= -ECANCELED,
 35};
 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
 38void io_req_cqe_overflow(struct io_kiocb *req);
 39int io_run_task_work_sig(struct io_ring_ctx *ctx);
 40void io_req_defer_failed(struct io_kiocb *req, s32 res);
 41void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
 42bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 43bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
 
 44void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 45
 46struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
 47
 48struct file *io_file_get_normal(struct io_kiocb *req, int fd);
 49struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 50			       unsigned issue_flags);
 51
 52void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 
 
 53bool io_alloc_async_data(struct io_kiocb *req);
 54void io_req_task_queue(struct io_kiocb *req);
 55void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
 56void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 57void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 58void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
 
 
 59void tctx_task_work(struct callback_head *cb);
 60__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
 61int io_uring_alloc_task_context(struct task_struct *task,
 62				struct io_ring_ctx *ctx);
 63
 64int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
 65				     int start, int end);
 
 66
 67int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
 68int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
 69int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
 70void __io_submit_flush_completions(struct io_ring_ctx *ctx);
 71int io_req_prep_async(struct io_kiocb *req);
 72
 73struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
 74void io_wq_submit_work(struct io_wq_work *work);
 75
 76void io_free_req(struct io_kiocb *req);
 77void io_queue_next(struct io_kiocb *req);
 78void io_task_refs_refill(struct io_uring_task *tctx);
 79bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
 80
 81bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
 82			bool cancel_all);
 83
 84void *io_mem_alloc(size_t size);
 85void io_mem_free(void *ptr);
 86
 87enum {
 88	IO_EVENTFD_OP_SIGNAL_BIT,
 89	IO_EVENTFD_OP_FREE_BIT,
 90};
 91
 92void io_eventfd_ops(struct rcu_head *rcu);
 93void io_activate_pollwq(struct io_ring_ctx *ctx);
 94
 95#if defined(CONFIG_PROVE_LOCKING)
 96static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
 97{
 
 98	lockdep_assert(in_task());
 99
 
 
 
100	if (ctx->flags & IORING_SETUP_IOPOLL) {
101		lockdep_assert_held(&ctx->uring_lock);
102	} else if (!ctx->task_complete) {
103		lockdep_assert_held(&ctx->completion_lock);
104	} else if (ctx->submitter_task) {
105		/*
106		 * ->submitter_task may be NULL and we can still post a CQE,
107		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
108		 * Not from an SQE, as those cannot be submitted, but via
109		 * updating tagged resources.
110		 */
111		if (ctx->submitter_task->flags & PF_EXITING)
112			lockdep_assert(current_work());
113		else
114			lockdep_assert(current == ctx->submitter_task);
115	}
116}
117#else
118static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
119{
120}
121#endif
 
122
123static inline void io_req_task_work_add(struct io_kiocb *req)
124{
125	__io_req_task_work_add(req, 0);
126}
127
 
 
 
 
 
 
 
128#define io_for_each_link(pos, head) \
129	for (pos = (head); pos; pos = pos->link)
130
131static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
132					struct io_uring_cqe **ret,
133					bool overflow)
134{
135	io_lockdep_assert_cq_locked(ctx);
136
137	if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
138		if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
139			return false;
140	}
141	*ret = ctx->cqe_cached;
142	ctx->cached_cq_tail++;
143	ctx->cqe_cached++;
144	if (ctx->flags & IORING_SETUP_CQE32)
145		ctx->cqe_cached++;
146	return true;
147}
148
149static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
150{
151	return io_get_cqe_overflow(ctx, ret, false);
152}
153
154static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
155					    struct io_kiocb *req)
156{
157	struct io_uring_cqe *cqe;
158
159	/*
160	 * If we can't get a cq entry, userspace overflowed the
161	 * submission (by quite a lot). Increment the overflow count in
162	 * the ring.
163	 */
164	if (unlikely(!io_get_cqe(ctx, &cqe)))
165		return false;
166
167	if (trace_io_uring_complete_enabled())
168		trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
169					req->cqe.res, req->cqe.flags,
170					req->big_cqe.extra1, req->big_cqe.extra2);
171
172	memcpy(cqe, &req->cqe, sizeof(*cqe));
173	if (ctx->flags & IORING_SETUP_CQE32) {
174		memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
175		memset(&req->big_cqe, 0, sizeof(req->big_cqe));
176	}
 
 
 
177	return true;
178}
179
180static inline void req_set_fail(struct io_kiocb *req)
181{
182	req->flags |= REQ_F_FAIL;
183	if (req->flags & REQ_F_CQE_SKIP) {
184		req->flags &= ~REQ_F_CQE_SKIP;
185		req->flags |= REQ_F_SKIP_LINK_CQES;
186	}
187}
188
189static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
190{
191	req->cqe.res = res;
192	req->cqe.flags = cflags;
193}
194
195static inline bool req_has_async_data(struct io_kiocb *req)
196{
197	return req->flags & REQ_F_ASYNC_DATA;
198}
199
200static inline void io_put_file(struct io_kiocb *req)
201{
202	if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
203		fput(req->file);
204}
205
206static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
207					 unsigned issue_flags)
208{
209	lockdep_assert_held(&ctx->uring_lock);
210	if (issue_flags & IO_URING_F_UNLOCKED)
211		mutex_unlock(&ctx->uring_lock);
212}
213
214static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
215				       unsigned issue_flags)
216{
217	/*
218	 * "Normal" inline submissions always hold the uring_lock, since we
219	 * grab it from the system call. Same is true for the SQPOLL offload.
220	 * The only exception is when we've detached the request and issue it
221	 * from an async worker thread, grab the lock for that case.
222	 */
223	if (issue_flags & IO_URING_F_UNLOCKED)
224		mutex_lock(&ctx->uring_lock);
225	lockdep_assert_held(&ctx->uring_lock);
226}
227
228static inline void io_commit_cqring(struct io_ring_ctx *ctx)
229{
230	/* order cqe stores with ring update */
231	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
232}
233
234static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
235{
236	if (wq_has_sleeper(&ctx->poll_wq))
237		__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
238				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
239}
240
241static inline void io_cqring_wake(struct io_ring_ctx *ctx)
242{
243	/*
244	 * Trigger waitqueue handler on all waiters on our waitqueue. This
245	 * won't necessarily wake up all the tasks, io_should_wake() will make
246	 * that decision.
247	 *
248	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
249	 * set in the mask so that if we recurse back into our own poll
250	 * waitqueue handlers, we know we have a dependency between eventfd or
251	 * epoll and should terminate multishot poll at that point.
252	 */
253	if (wq_has_sleeper(&ctx->cq_wait))
254		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
255				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
256}
257
258static inline bool io_sqring_full(struct io_ring_ctx *ctx)
259{
260	struct io_rings *r = ctx->rings;
261
262	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
 
 
 
 
 
 
 
263}
264
265static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
266{
267	struct io_rings *rings = ctx->rings;
268	unsigned int entries;
269
270	/* make sure SQ entry isn't read before tail */
271	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
272	return min(entries, ctx->sq_entries);
273}
274
275static inline int io_run_task_work(void)
276{
 
 
277	/*
278	 * Always check-and-clear the task_work notification signal. With how
279	 * signaling works for task_work, we can find it set with nothing to
280	 * run. We need to clear it for that case, like get_signal() does.
281	 */
282	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
283		clear_notify_signal();
284	/*
285	 * PF_IO_WORKER never returns to userspace, so check here if we have
286	 * notify work that needs processing.
287	 */
288	if (current->flags & PF_IO_WORKER &&
289	    test_thread_flag(TIF_NOTIFY_RESUME)) {
290		__set_current_state(TASK_RUNNING);
291		resume_user_mode_work(NULL);
 
 
 
 
 
 
 
 
 
292	}
293	if (task_work_pending(current)) {
294		__set_current_state(TASK_RUNNING);
295		task_work_run();
296		return 1;
297	}
298
299	return 0;
 
 
 
 
 
300}
301
302static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
303{
304	return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
305}
306
307static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
308{
309	if (!ts->locked) {
310		mutex_lock(&ctx->uring_lock);
311		ts->locked = true;
312	}
313}
314
315/*
316 * Don't complete immediately but use deferred completion infrastructure.
317 * Protected by ->uring_lock and can only be used either with
318 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
319 */
320static inline void io_req_complete_defer(struct io_kiocb *req)
321	__must_hold(&req->ctx->uring_lock)
322{
323	struct io_submit_state *state = &req->ctx->submit_state;
324
325	lockdep_assert_held(&req->ctx->uring_lock);
326
327	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
328}
329
330static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
331{
332	if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
333		     ctx->has_evfd || ctx->poll_activated))
334		__io_commit_cqring_flush(ctx);
335}
336
337static inline void io_get_task_refs(int nr)
338{
339	struct io_uring_task *tctx = current->io_uring;
340
341	tctx->cached_refs -= nr;
342	if (unlikely(tctx->cached_refs < 0))
343		io_task_refs_refill(tctx);
344}
345
346static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
347{
348	return !ctx->submit_state.free_list.next;
349}
350
351extern struct kmem_cache *req_cachep;
352extern struct kmem_cache *io_buf_cachep;
353
354static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
355{
356	struct io_kiocb *req;
357
358	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
359	wq_stack_extract(&ctx->submit_state.free_list);
360	return req;
361}
362
363static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
364{
365	if (unlikely(io_req_cache_empty(ctx))) {
366		if (!__io_alloc_req_refill(ctx))
367			return false;
368	}
369	*req = io_extract_req(ctx);
370	return true;
371}
372
373static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
374{
375	return likely(ctx->submitter_task == current);
376}
377
378static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
379{
380	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
381		      ctx->submitter_task == current);
382}
383
 
 
 
 
 
 
 
 
 
 
 
 
 
384static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
385{
386	io_req_set_res(req, res, 0);
387	req->io_task_work.func = io_req_task_complete;
388	io_req_task_work_add(req);
389}
390
391/*
392 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
393 * slot.
394 */
395static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
396{
397	if (ctx->flags & IORING_SETUP_SQE128)
398		return 2 * sizeof(struct io_uring_sqe);
399	return sizeof(struct io_uring_sqe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400}
401#endif
v6.13.7
  1#ifndef IOU_CORE_H
  2#define IOU_CORE_H
  3
  4#include <linux/errno.h>
  5#include <linux/lockdep.h>
  6#include <linux/resume_user_mode.h>
  7#include <linux/kasan.h>
  8#include <linux/poll.h>
  9#include <linux/io_uring_types.h>
 10#include <uapi/linux/eventpoll.h>
 11#include "io-wq.h"
 12#include "slist.h"
 13#include "filetable.h"
 14
 15#ifndef CREATE_TRACE_POINTS
 16#include <trace/events/io_uring.h>
 17#endif
 18
 19enum {
 20	IOU_OK			= 0,
 21	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
 22
 23	/*
 24	 * Requeue the task_work to restart operations on this request. The
 25	 * actual value isn't important, should just be not an otherwise
 26	 * valid error code, yet less than -MAX_ERRNO and valid internally.
 27	 */
 28	IOU_REQUEUE		= -3072,
 29
 30	/*
 31	 * Intended only when both IO_URING_F_MULTISHOT is passed
 32	 * to indicate to the poll runner that multishot should be
 33	 * removed and the result is set on req->cqe.res.
 34	 */
 35	IOU_STOP_MULTISHOT	= -ECANCELED,
 36};
 37
 38struct io_wait_queue {
 39	struct wait_queue_entry wq;
 40	struct io_ring_ctx *ctx;
 41	unsigned cq_tail;
 42	unsigned cq_min_tail;
 43	unsigned nr_timeouts;
 44	int hit_timeout;
 45	ktime_t min_timeout;
 46	ktime_t timeout;
 47	struct hrtimer t;
 48
 49#ifdef CONFIG_NET_RX_BUSY_POLL
 50	ktime_t napi_busy_poll_dt;
 51	bool napi_prefer_busy_poll;
 52#endif
 53};
 54
 55static inline bool io_should_wake(struct io_wait_queue *iowq)
 56{
 57	struct io_ring_ctx *ctx = iowq->ctx;
 58	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
 59
 60	/*
 61	 * Wake up if we have enough events, or if a timeout occurred since we
 62	 * started waiting. For timeouts, we always want to return to userspace,
 63	 * regardless of event count.
 64	 */
 65	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 66}
 67
 68#define IORING_MAX_ENTRIES	32768
 69#define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
 70
 71unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
 72			 unsigned int cq_entries, size_t *sq_offset);
 73int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
 74bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
 
 75int io_run_task_work_sig(struct io_ring_ctx *ctx);
 76void io_req_defer_failed(struct io_kiocb *req, s32 res);
 
 77bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 78void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 79bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
 80void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 81
 
 
 82struct file *io_file_get_normal(struct io_kiocb *req, int fd);
 83struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 84			       unsigned issue_flags);
 85
 86void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 87void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
 88				 unsigned flags);
 89bool io_alloc_async_data(struct io_kiocb *req);
 90void io_req_task_queue(struct io_kiocb *req);
 
 91void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 92void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 93void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
 94struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
 95struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
 96void tctx_task_work(struct callback_head *cb);
 97__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
 98int io_uring_alloc_task_context(struct task_struct *task,
 99				struct io_ring_ctx *ctx);
100
101int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
102				     int start, int end);
103void io_req_queue_iowq(struct io_kiocb *req);
104
105int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
106int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
107int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
108void __io_submit_flush_completions(struct io_ring_ctx *ctx);
 
109
110struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
111void io_wq_submit_work(struct io_wq_work *work);
112
113void io_free_req(struct io_kiocb *req);
114void io_queue_next(struct io_kiocb *req);
115void io_task_refs_refill(struct io_uring_task *tctx);
116bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
117
118bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
119			bool cancel_all);
120
 
 
 
 
 
 
 
 
 
121void io_activate_pollwq(struct io_ring_ctx *ctx);
122
 
123static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
124{
125#if defined(CONFIG_PROVE_LOCKING)
126	lockdep_assert(in_task());
127
128	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
129		lockdep_assert_held(&ctx->uring_lock);
130
131	if (ctx->flags & IORING_SETUP_IOPOLL) {
132		lockdep_assert_held(&ctx->uring_lock);
133	} else if (!ctx->task_complete) {
134		lockdep_assert_held(&ctx->completion_lock);
135	} else if (ctx->submitter_task) {
136		/*
137		 * ->submitter_task may be NULL and we can still post a CQE,
138		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
139		 * Not from an SQE, as those cannot be submitted, but via
140		 * updating tagged resources.
141		 */
142		if (!percpu_ref_is_dying(&ctx->refs))
 
 
143			lockdep_assert(current == ctx->submitter_task);
144	}
 
 
 
 
 
145#endif
146}
147
148static inline void io_req_task_work_add(struct io_kiocb *req)
149{
150	__io_req_task_work_add(req, 0);
151}
152
153static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
154{
155	if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
156	    ctx->submit_state.cq_flush)
157		__io_submit_flush_completions(ctx);
158}
159
160#define io_for_each_link(pos, head) \
161	for (pos = (head); pos; pos = pos->link)
162
163static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
164					struct io_uring_cqe **ret,
165					bool overflow)
166{
167	io_lockdep_assert_cq_locked(ctx);
168
169	if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
170		if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
171			return false;
172	}
173	*ret = ctx->cqe_cached;
174	ctx->cached_cq_tail++;
175	ctx->cqe_cached++;
176	if (ctx->flags & IORING_SETUP_CQE32)
177		ctx->cqe_cached++;
178	return true;
179}
180
181static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
182{
183	return io_get_cqe_overflow(ctx, ret, false);
184}
185
186static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
187					    struct io_kiocb *req)
188{
189	struct io_uring_cqe *cqe;
190
191	/*
192	 * If we can't get a cq entry, userspace overflowed the
193	 * submission (by quite a lot). Increment the overflow count in
194	 * the ring.
195	 */
196	if (unlikely(!io_get_cqe(ctx, &cqe)))
197		return false;
198
 
 
 
 
199
200	memcpy(cqe, &req->cqe, sizeof(*cqe));
201	if (ctx->flags & IORING_SETUP_CQE32) {
202		memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
203		memset(&req->big_cqe, 0, sizeof(req->big_cqe));
204	}
205
206	if (trace_io_uring_complete_enabled())
207		trace_io_uring_complete(req->ctx, req, cqe);
208	return true;
209}
210
211static inline void req_set_fail(struct io_kiocb *req)
212{
213	req->flags |= REQ_F_FAIL;
214	if (req->flags & REQ_F_CQE_SKIP) {
215		req->flags &= ~REQ_F_CQE_SKIP;
216		req->flags |= REQ_F_SKIP_LINK_CQES;
217	}
218}
219
220static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
221{
222	req->cqe.res = res;
223	req->cqe.flags = cflags;
224}
225
226static inline bool req_has_async_data(struct io_kiocb *req)
227{
228	return req->flags & REQ_F_ASYNC_DATA;
229}
230
231static inline void io_put_file(struct io_kiocb *req)
232{
233	if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
234		fput(req->file);
235}
236
237static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
238					 unsigned issue_flags)
239{
240	lockdep_assert_held(&ctx->uring_lock);
241	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
242		mutex_unlock(&ctx->uring_lock);
243}
244
245static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
246				       unsigned issue_flags)
247{
248	/*
249	 * "Normal" inline submissions always hold the uring_lock, since we
250	 * grab it from the system call. Same is true for the SQPOLL offload.
251	 * The only exception is when we've detached the request and issue it
252	 * from an async worker thread, grab the lock for that case.
253	 */
254	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
255		mutex_lock(&ctx->uring_lock);
256	lockdep_assert_held(&ctx->uring_lock);
257}
258
259static inline void io_commit_cqring(struct io_ring_ctx *ctx)
260{
261	/* order cqe stores with ring update */
262	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
263}
264
265static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
266{
267	if (wq_has_sleeper(&ctx->poll_wq))
268		__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
269				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
270}
271
272static inline void io_cqring_wake(struct io_ring_ctx *ctx)
273{
274	/*
275	 * Trigger waitqueue handler on all waiters on our waitqueue. This
276	 * won't necessarily wake up all the tasks, io_should_wake() will make
277	 * that decision.
278	 *
279	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
280	 * set in the mask so that if we recurse back into our own poll
281	 * waitqueue handlers, we know we have a dependency between eventfd or
282	 * epoll and should terminate multishot poll at that point.
283	 */
284	if (wq_has_sleeper(&ctx->cq_wait))
285		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
286				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
287}
288
289static inline bool io_sqring_full(struct io_ring_ctx *ctx)
290{
291	struct io_rings *r = ctx->rings;
292
293	/*
294	 * SQPOLL must use the actual sqring head, as using the cached_sq_head
295	 * is race prone if the SQPOLL thread has grabbed entries but not yet
296	 * committed them to the ring. For !SQPOLL, this doesn't matter, but
297	 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
298	 * just read the actual sqring head unconditionally.
299	 */
300	return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
301}
302
303static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
304{
305	struct io_rings *rings = ctx->rings;
306	unsigned int entries;
307
308	/* make sure SQ entry isn't read before tail */
309	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
310	return min(entries, ctx->sq_entries);
311}
312
313static inline int io_run_task_work(void)
314{
315	bool ret = false;
316
317	/*
318	 * Always check-and-clear the task_work notification signal. With how
319	 * signaling works for task_work, we can find it set with nothing to
320	 * run. We need to clear it for that case, like get_signal() does.
321	 */
322	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
323		clear_notify_signal();
324	/*
325	 * PF_IO_WORKER never returns to userspace, so check here if we have
326	 * notify work that needs processing.
327	 */
328	if (current->flags & PF_IO_WORKER) {
329		if (test_thread_flag(TIF_NOTIFY_RESUME)) {
330			__set_current_state(TASK_RUNNING);
331			resume_user_mode_work(NULL);
332		}
333		if (current->io_uring) {
334			unsigned int count = 0;
335
336			__set_current_state(TASK_RUNNING);
337			tctx_task_work_run(current->io_uring, UINT_MAX, &count);
338			if (count)
339				ret = true;
340		}
341	}
342	if (task_work_pending(current)) {
343		__set_current_state(TASK_RUNNING);
344		task_work_run();
345		ret = true;
346	}
347
348	return ret;
349}
350
351static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
352{
353	return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
354}
355
356static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
357{
358	return task_work_pending(current) || io_local_work_pending(ctx);
359}
360
361static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
362{
363	lockdep_assert_held(&ctx->uring_lock);
 
 
 
364}
365
366/*
367 * Don't complete immediately but use deferred completion infrastructure.
368 * Protected by ->uring_lock and can only be used either with
369 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
370 */
371static inline void io_req_complete_defer(struct io_kiocb *req)
372	__must_hold(&req->ctx->uring_lock)
373{
374	struct io_submit_state *state = &req->ctx->submit_state;
375
376	lockdep_assert_held(&req->ctx->uring_lock);
377
378	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
379}
380
381static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
382{
383	if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
384		     ctx->has_evfd || ctx->poll_activated))
385		__io_commit_cqring_flush(ctx);
386}
387
388static inline void io_get_task_refs(int nr)
389{
390	struct io_uring_task *tctx = current->io_uring;
391
392	tctx->cached_refs -= nr;
393	if (unlikely(tctx->cached_refs < 0))
394		io_task_refs_refill(tctx);
395}
396
397static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
398{
399	return !ctx->submit_state.free_list.next;
400}
401
402extern struct kmem_cache *req_cachep;
403extern struct kmem_cache *io_buf_cachep;
404
405static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
406{
407	struct io_kiocb *req;
408
409	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
410	wq_stack_extract(&ctx->submit_state.free_list);
411	return req;
412}
413
414static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
415{
416	if (unlikely(io_req_cache_empty(ctx))) {
417		if (!__io_alloc_req_refill(ctx))
418			return false;
419	}
420	*req = io_extract_req(ctx);
421	return true;
422}
423
424static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
425{
426	return likely(ctx->submitter_task == current);
427}
428
429static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
430{
431	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
432		      ctx->submitter_task == current);
433}
434
435/*
436 * Terminate the request if either of these conditions are true:
437 *
438 * 1) It's being executed by the original task, but that task is marked
439 *    with PF_EXITING as it's exiting.
440 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
441 *    our fallback task_work.
442 */
443static inline bool io_should_terminate_tw(void)
444{
445	return current->flags & (PF_KTHREAD | PF_EXITING);
446}
447
448static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
449{
450	io_req_set_res(req, res, 0);
451	req->io_task_work.func = io_req_task_complete;
452	io_req_task_work_add(req);
453}
454
455/*
456 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
457 * slot.
458 */
459static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
460{
461	if (ctx->flags & IORING_SETUP_SQE128)
462		return 2 * sizeof(struct io_uring_sqe);
463	return sizeof(struct io_uring_sqe);
464}
465
466static inline bool io_file_can_poll(struct io_kiocb *req)
467{
468	if (req->flags & REQ_F_CAN_POLL)
469		return true;
470	if (req->file && file_can_poll(req->file)) {
471		req->flags |= REQ_F_CAN_POLL;
472		return true;
473	}
474	return false;
475}
476
477static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
478{
479	if (ctx->clockid == CLOCK_MONOTONIC)
480		return ktime_get();
481
482	return ktime_get_with_offset(ctx->clock_offset);
483}
484
485enum {
486	IO_CHECK_CQ_OVERFLOW_BIT,
487	IO_CHECK_CQ_DROPPED_BIT,
488};
489
490static inline bool io_has_work(struct io_ring_ctx *ctx)
491{
492	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
493	       io_local_work_pending(ctx);
494}
495#endif