Loading...
Note: File does not exist in v5.4.
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
5#include <linux/lockdep.h>
6#include <linux/resume_user_mode.h>
7#include <linux/kasan.h>
8#include <linux/poll.h>
9#include <linux/io_uring_types.h>
10#include <uapi/linux/eventpoll.h>
11#include "io-wq.h"
12#include "slist.h"
13#include "filetable.h"
14
15#ifndef CREATE_TRACE_POINTS
16#include <trace/events/io_uring.h>
17#endif
18
19enum {
20 IOU_OK = 0,
21 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
22
23 /*
24 * Requeue the task_work to restart operations on this request. The
25 * actual value isn't important, should just be not an otherwise
26 * valid error code, yet less than -MAX_ERRNO and valid internally.
27 */
28 IOU_REQUEUE = -3072,
29
30 /*
31 * Intended only when both IO_URING_F_MULTISHOT is passed
32 * to indicate to the poll runner that multishot should be
33 * removed and the result is set on req->cqe.res.
34 */
35 IOU_STOP_MULTISHOT = -ECANCELED,
36};
37
38struct io_wait_queue {
39 struct wait_queue_entry wq;
40 struct io_ring_ctx *ctx;
41 unsigned cq_tail;
42 unsigned cq_min_tail;
43 unsigned nr_timeouts;
44 int hit_timeout;
45 ktime_t min_timeout;
46 ktime_t timeout;
47 struct hrtimer t;
48
49#ifdef CONFIG_NET_RX_BUSY_POLL
50 ktime_t napi_busy_poll_dt;
51 bool napi_prefer_busy_poll;
52#endif
53};
54
55static inline bool io_should_wake(struct io_wait_queue *iowq)
56{
57 struct io_ring_ctx *ctx = iowq->ctx;
58 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
59
60 /*
61 * Wake up if we have enough events, or if a timeout occurred since we
62 * started waiting. For timeouts, we always want to return to userspace,
63 * regardless of event count.
64 */
65 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
66}
67
68#define IORING_MAX_ENTRIES 32768
69#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
70
71unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
72 unsigned int cq_entries, size_t *sq_offset);
73int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
74bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
75int io_run_task_work_sig(struct io_ring_ctx *ctx);
76void io_req_defer_failed(struct io_kiocb *req, s32 res);
77bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
78void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
79bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
80void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
81
82struct file *io_file_get_normal(struct io_kiocb *req, int fd);
83struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
84 unsigned issue_flags);
85
86void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
87void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
88 unsigned flags);
89bool io_alloc_async_data(struct io_kiocb *req);
90void io_req_task_queue(struct io_kiocb *req);
91void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
92void io_req_task_queue_fail(struct io_kiocb *req, int ret);
93void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
94struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
95struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
96void tctx_task_work(struct callback_head *cb);
97__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
98int io_uring_alloc_task_context(struct task_struct *task,
99 struct io_ring_ctx *ctx);
100
101int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
102 int start, int end);
103void io_req_queue_iowq(struct io_kiocb *req);
104
105int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
106int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
107int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
108void __io_submit_flush_completions(struct io_ring_ctx *ctx);
109
110struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
111void io_wq_submit_work(struct io_wq_work *work);
112
113void io_free_req(struct io_kiocb *req);
114void io_queue_next(struct io_kiocb *req);
115void io_task_refs_refill(struct io_uring_task *tctx);
116bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
117
118bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
119 bool cancel_all);
120
121void io_activate_pollwq(struct io_ring_ctx *ctx);
122
123static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
124{
125#if defined(CONFIG_PROVE_LOCKING)
126 lockdep_assert(in_task());
127
128 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
129 lockdep_assert_held(&ctx->uring_lock);
130
131 if (ctx->flags & IORING_SETUP_IOPOLL) {
132 lockdep_assert_held(&ctx->uring_lock);
133 } else if (!ctx->task_complete) {
134 lockdep_assert_held(&ctx->completion_lock);
135 } else if (ctx->submitter_task) {
136 /*
137 * ->submitter_task may be NULL and we can still post a CQE,
138 * if the ring has been setup with IORING_SETUP_R_DISABLED.
139 * Not from an SQE, as those cannot be submitted, but via
140 * updating tagged resources.
141 */
142 if (!percpu_ref_is_dying(&ctx->refs))
143 lockdep_assert(current == ctx->submitter_task);
144 }
145#endif
146}
147
148static inline void io_req_task_work_add(struct io_kiocb *req)
149{
150 __io_req_task_work_add(req, 0);
151}
152
153static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
154{
155 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
156 ctx->submit_state.cq_flush)
157 __io_submit_flush_completions(ctx);
158}
159
160#define io_for_each_link(pos, head) \
161 for (pos = (head); pos; pos = pos->link)
162
163static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
164 struct io_uring_cqe **ret,
165 bool overflow)
166{
167 io_lockdep_assert_cq_locked(ctx);
168
169 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
170 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
171 return false;
172 }
173 *ret = ctx->cqe_cached;
174 ctx->cached_cq_tail++;
175 ctx->cqe_cached++;
176 if (ctx->flags & IORING_SETUP_CQE32)
177 ctx->cqe_cached++;
178 return true;
179}
180
181static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
182{
183 return io_get_cqe_overflow(ctx, ret, false);
184}
185
186static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
187 struct io_kiocb *req)
188{
189 struct io_uring_cqe *cqe;
190
191 /*
192 * If we can't get a cq entry, userspace overflowed the
193 * submission (by quite a lot). Increment the overflow count in
194 * the ring.
195 */
196 if (unlikely(!io_get_cqe(ctx, &cqe)))
197 return false;
198
199
200 memcpy(cqe, &req->cqe, sizeof(*cqe));
201 if (ctx->flags & IORING_SETUP_CQE32) {
202 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
203 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
204 }
205
206 if (trace_io_uring_complete_enabled())
207 trace_io_uring_complete(req->ctx, req, cqe);
208 return true;
209}
210
211static inline void req_set_fail(struct io_kiocb *req)
212{
213 req->flags |= REQ_F_FAIL;
214 if (req->flags & REQ_F_CQE_SKIP) {
215 req->flags &= ~REQ_F_CQE_SKIP;
216 req->flags |= REQ_F_SKIP_LINK_CQES;
217 }
218}
219
220static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
221{
222 req->cqe.res = res;
223 req->cqe.flags = cflags;
224}
225
226static inline bool req_has_async_data(struct io_kiocb *req)
227{
228 return req->flags & REQ_F_ASYNC_DATA;
229}
230
231static inline void io_put_file(struct io_kiocb *req)
232{
233 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
234 fput(req->file);
235}
236
237static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
238 unsigned issue_flags)
239{
240 lockdep_assert_held(&ctx->uring_lock);
241 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
242 mutex_unlock(&ctx->uring_lock);
243}
244
245static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
246 unsigned issue_flags)
247{
248 /*
249 * "Normal" inline submissions always hold the uring_lock, since we
250 * grab it from the system call. Same is true for the SQPOLL offload.
251 * The only exception is when we've detached the request and issue it
252 * from an async worker thread, grab the lock for that case.
253 */
254 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
255 mutex_lock(&ctx->uring_lock);
256 lockdep_assert_held(&ctx->uring_lock);
257}
258
259static inline void io_commit_cqring(struct io_ring_ctx *ctx)
260{
261 /* order cqe stores with ring update */
262 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
263}
264
265static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
266{
267 if (wq_has_sleeper(&ctx->poll_wq))
268 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
269 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
270}
271
272static inline void io_cqring_wake(struct io_ring_ctx *ctx)
273{
274 /*
275 * Trigger waitqueue handler on all waiters on our waitqueue. This
276 * won't necessarily wake up all the tasks, io_should_wake() will make
277 * that decision.
278 *
279 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
280 * set in the mask so that if we recurse back into our own poll
281 * waitqueue handlers, we know we have a dependency between eventfd or
282 * epoll and should terminate multishot poll at that point.
283 */
284 if (wq_has_sleeper(&ctx->cq_wait))
285 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
286 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
287}
288
289static inline bool io_sqring_full(struct io_ring_ctx *ctx)
290{
291 struct io_rings *r = ctx->rings;
292
293 /*
294 * SQPOLL must use the actual sqring head, as using the cached_sq_head
295 * is race prone if the SQPOLL thread has grabbed entries but not yet
296 * committed them to the ring. For !SQPOLL, this doesn't matter, but
297 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
298 * just read the actual sqring head unconditionally.
299 */
300 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
301}
302
303static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
304{
305 struct io_rings *rings = ctx->rings;
306 unsigned int entries;
307
308 /* make sure SQ entry isn't read before tail */
309 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
310 return min(entries, ctx->sq_entries);
311}
312
313static inline int io_run_task_work(void)
314{
315 bool ret = false;
316
317 /*
318 * Always check-and-clear the task_work notification signal. With how
319 * signaling works for task_work, we can find it set with nothing to
320 * run. We need to clear it for that case, like get_signal() does.
321 */
322 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
323 clear_notify_signal();
324 /*
325 * PF_IO_WORKER never returns to userspace, so check here if we have
326 * notify work that needs processing.
327 */
328 if (current->flags & PF_IO_WORKER) {
329 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
330 __set_current_state(TASK_RUNNING);
331 resume_user_mode_work(NULL);
332 }
333 if (current->io_uring) {
334 unsigned int count = 0;
335
336 __set_current_state(TASK_RUNNING);
337 tctx_task_work_run(current->io_uring, UINT_MAX, &count);
338 if (count)
339 ret = true;
340 }
341 }
342 if (task_work_pending(current)) {
343 __set_current_state(TASK_RUNNING);
344 task_work_run();
345 ret = true;
346 }
347
348 return ret;
349}
350
351static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
352{
353 return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
354}
355
356static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
357{
358 return task_work_pending(current) || io_local_work_pending(ctx);
359}
360
361static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
362{
363 lockdep_assert_held(&ctx->uring_lock);
364}
365
366/*
367 * Don't complete immediately but use deferred completion infrastructure.
368 * Protected by ->uring_lock and can only be used either with
369 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
370 */
371static inline void io_req_complete_defer(struct io_kiocb *req)
372 __must_hold(&req->ctx->uring_lock)
373{
374 struct io_submit_state *state = &req->ctx->submit_state;
375
376 lockdep_assert_held(&req->ctx->uring_lock);
377
378 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
379}
380
381static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
382{
383 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
384 ctx->has_evfd || ctx->poll_activated))
385 __io_commit_cqring_flush(ctx);
386}
387
388static inline void io_get_task_refs(int nr)
389{
390 struct io_uring_task *tctx = current->io_uring;
391
392 tctx->cached_refs -= nr;
393 if (unlikely(tctx->cached_refs < 0))
394 io_task_refs_refill(tctx);
395}
396
397static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
398{
399 return !ctx->submit_state.free_list.next;
400}
401
402extern struct kmem_cache *req_cachep;
403extern struct kmem_cache *io_buf_cachep;
404
405static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
406{
407 struct io_kiocb *req;
408
409 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
410 wq_stack_extract(&ctx->submit_state.free_list);
411 return req;
412}
413
414static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
415{
416 if (unlikely(io_req_cache_empty(ctx))) {
417 if (!__io_alloc_req_refill(ctx))
418 return false;
419 }
420 *req = io_extract_req(ctx);
421 return true;
422}
423
424static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
425{
426 return likely(ctx->submitter_task == current);
427}
428
429static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
430{
431 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
432 ctx->submitter_task == current);
433}
434
435/*
436 * Terminate the request if either of these conditions are true:
437 *
438 * 1) It's being executed by the original task, but that task is marked
439 * with PF_EXITING as it's exiting.
440 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
441 * our fallback task_work.
442 */
443static inline bool io_should_terminate_tw(void)
444{
445 return current->flags & (PF_KTHREAD | PF_EXITING);
446}
447
448static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
449{
450 io_req_set_res(req, res, 0);
451 req->io_task_work.func = io_req_task_complete;
452 io_req_task_work_add(req);
453}
454
455/*
456 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
457 * slot.
458 */
459static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
460{
461 if (ctx->flags & IORING_SETUP_SQE128)
462 return 2 * sizeof(struct io_uring_sqe);
463 return sizeof(struct io_uring_sqe);
464}
465
466static inline bool io_file_can_poll(struct io_kiocb *req)
467{
468 if (req->flags & REQ_F_CAN_POLL)
469 return true;
470 if (req->file && file_can_poll(req->file)) {
471 req->flags |= REQ_F_CAN_POLL;
472 return true;
473 }
474 return false;
475}
476
477static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
478{
479 if (ctx->clockid == CLOCK_MONOTONIC)
480 return ktime_get();
481
482 return ktime_get_with_offset(ctx->clock_offset);
483}
484
485enum {
486 IO_CHECK_CQ_OVERFLOW_BIT,
487 IO_CHECK_CQ_DROPPED_BIT,
488};
489
490static inline bool io_has_work(struct io_ring_ctx *ctx)
491{
492 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
493 io_local_work_pending(ctx);
494}
495#endif