Loading...
Note: File does not exist in v5.9.
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
5#include <linux/lockdep.h>
6#include <linux/resume_user_mode.h>
7#include <linux/kasan.h>
8#include <linux/io_uring_types.h>
9#include <uapi/linux/eventpoll.h>
10#include "io-wq.h"
11#include "slist.h"
12#include "filetable.h"
13
14#ifndef CREATE_TRACE_POINTS
15#include <trace/events/io_uring.h>
16#endif
17
18enum {
19 IOU_OK = 0,
20 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
21
22 /*
23 * Requeue the task_work to restart operations on this request. The
24 * actual value isn't important, should just be not an otherwise
25 * valid error code, yet less than -MAX_ERRNO and valid internally.
26 */
27 IOU_REQUEUE = -3072,
28
29 /*
30 * Intended only when both IO_URING_F_MULTISHOT is passed
31 * to indicate to the poll runner that multishot should be
32 * removed and the result is set on req->cqe.res.
33 */
34 IOU_STOP_MULTISHOT = -ECANCELED,
35};
36
37bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
38void io_req_cqe_overflow(struct io_kiocb *req);
39int io_run_task_work_sig(struct io_ring_ctx *ctx);
40void io_req_defer_failed(struct io_kiocb *req, s32 res);
41void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
42bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
43bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
44void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
45
46struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
47
48struct file *io_file_get_normal(struct io_kiocb *req, int fd);
49struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
50 unsigned issue_flags);
51
52void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
53bool io_alloc_async_data(struct io_kiocb *req);
54void io_req_task_queue(struct io_kiocb *req);
55void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
56void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
57void io_req_task_queue_fail(struct io_kiocb *req, int ret);
58void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
59void tctx_task_work(struct callback_head *cb);
60__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
61int io_uring_alloc_task_context(struct task_struct *task,
62 struct io_ring_ctx *ctx);
63
64int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
65 int start, int end);
66
67int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
68int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
69int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
70void __io_submit_flush_completions(struct io_ring_ctx *ctx);
71int io_req_prep_async(struct io_kiocb *req);
72
73struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
74void io_wq_submit_work(struct io_wq_work *work);
75
76void io_free_req(struct io_kiocb *req);
77void io_queue_next(struct io_kiocb *req);
78void io_task_refs_refill(struct io_uring_task *tctx);
79bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
80
81bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
82 bool cancel_all);
83
84void *io_mem_alloc(size_t size);
85void io_mem_free(void *ptr);
86
87enum {
88 IO_EVENTFD_OP_SIGNAL_BIT,
89 IO_EVENTFD_OP_FREE_BIT,
90};
91
92void io_eventfd_ops(struct rcu_head *rcu);
93void io_activate_pollwq(struct io_ring_ctx *ctx);
94
95#if defined(CONFIG_PROVE_LOCKING)
96static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
97{
98 lockdep_assert(in_task());
99
100 if (ctx->flags & IORING_SETUP_IOPOLL) {
101 lockdep_assert_held(&ctx->uring_lock);
102 } else if (!ctx->task_complete) {
103 lockdep_assert_held(&ctx->completion_lock);
104 } else if (ctx->submitter_task) {
105 /*
106 * ->submitter_task may be NULL and we can still post a CQE,
107 * if the ring has been setup with IORING_SETUP_R_DISABLED.
108 * Not from an SQE, as those cannot be submitted, but via
109 * updating tagged resources.
110 */
111 if (ctx->submitter_task->flags & PF_EXITING)
112 lockdep_assert(current_work());
113 else
114 lockdep_assert(current == ctx->submitter_task);
115 }
116}
117#else
118static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
119{
120}
121#endif
122
123static inline void io_req_task_work_add(struct io_kiocb *req)
124{
125 __io_req_task_work_add(req, 0);
126}
127
128#define io_for_each_link(pos, head) \
129 for (pos = (head); pos; pos = pos->link)
130
131static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
132 struct io_uring_cqe **ret,
133 bool overflow)
134{
135 io_lockdep_assert_cq_locked(ctx);
136
137 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
138 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
139 return false;
140 }
141 *ret = ctx->cqe_cached;
142 ctx->cached_cq_tail++;
143 ctx->cqe_cached++;
144 if (ctx->flags & IORING_SETUP_CQE32)
145 ctx->cqe_cached++;
146 return true;
147}
148
149static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
150{
151 return io_get_cqe_overflow(ctx, ret, false);
152}
153
154static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
155 struct io_kiocb *req)
156{
157 struct io_uring_cqe *cqe;
158
159 /*
160 * If we can't get a cq entry, userspace overflowed the
161 * submission (by quite a lot). Increment the overflow count in
162 * the ring.
163 */
164 if (unlikely(!io_get_cqe(ctx, &cqe)))
165 return false;
166
167 if (trace_io_uring_complete_enabled())
168 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
169 req->cqe.res, req->cqe.flags,
170 req->big_cqe.extra1, req->big_cqe.extra2);
171
172 memcpy(cqe, &req->cqe, sizeof(*cqe));
173 if (ctx->flags & IORING_SETUP_CQE32) {
174 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
175 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
176 }
177 return true;
178}
179
180static inline void req_set_fail(struct io_kiocb *req)
181{
182 req->flags |= REQ_F_FAIL;
183 if (req->flags & REQ_F_CQE_SKIP) {
184 req->flags &= ~REQ_F_CQE_SKIP;
185 req->flags |= REQ_F_SKIP_LINK_CQES;
186 }
187}
188
189static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
190{
191 req->cqe.res = res;
192 req->cqe.flags = cflags;
193}
194
195static inline bool req_has_async_data(struct io_kiocb *req)
196{
197 return req->flags & REQ_F_ASYNC_DATA;
198}
199
200static inline void io_put_file(struct io_kiocb *req)
201{
202 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
203 fput(req->file);
204}
205
206static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
207 unsigned issue_flags)
208{
209 lockdep_assert_held(&ctx->uring_lock);
210 if (issue_flags & IO_URING_F_UNLOCKED)
211 mutex_unlock(&ctx->uring_lock);
212}
213
214static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
215 unsigned issue_flags)
216{
217 /*
218 * "Normal" inline submissions always hold the uring_lock, since we
219 * grab it from the system call. Same is true for the SQPOLL offload.
220 * The only exception is when we've detached the request and issue it
221 * from an async worker thread, grab the lock for that case.
222 */
223 if (issue_flags & IO_URING_F_UNLOCKED)
224 mutex_lock(&ctx->uring_lock);
225 lockdep_assert_held(&ctx->uring_lock);
226}
227
228static inline void io_commit_cqring(struct io_ring_ctx *ctx)
229{
230 /* order cqe stores with ring update */
231 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
232}
233
234static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
235{
236 if (wq_has_sleeper(&ctx->poll_wq))
237 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
238 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
239}
240
241static inline void io_cqring_wake(struct io_ring_ctx *ctx)
242{
243 /*
244 * Trigger waitqueue handler on all waiters on our waitqueue. This
245 * won't necessarily wake up all the tasks, io_should_wake() will make
246 * that decision.
247 *
248 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
249 * set in the mask so that if we recurse back into our own poll
250 * waitqueue handlers, we know we have a dependency between eventfd or
251 * epoll and should terminate multishot poll at that point.
252 */
253 if (wq_has_sleeper(&ctx->cq_wait))
254 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
255 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
256}
257
258static inline bool io_sqring_full(struct io_ring_ctx *ctx)
259{
260 struct io_rings *r = ctx->rings;
261
262 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
263}
264
265static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
266{
267 struct io_rings *rings = ctx->rings;
268 unsigned int entries;
269
270 /* make sure SQ entry isn't read before tail */
271 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
272 return min(entries, ctx->sq_entries);
273}
274
275static inline int io_run_task_work(void)
276{
277 /*
278 * Always check-and-clear the task_work notification signal. With how
279 * signaling works for task_work, we can find it set with nothing to
280 * run. We need to clear it for that case, like get_signal() does.
281 */
282 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
283 clear_notify_signal();
284 /*
285 * PF_IO_WORKER never returns to userspace, so check here if we have
286 * notify work that needs processing.
287 */
288 if (current->flags & PF_IO_WORKER &&
289 test_thread_flag(TIF_NOTIFY_RESUME)) {
290 __set_current_state(TASK_RUNNING);
291 resume_user_mode_work(NULL);
292 }
293 if (task_work_pending(current)) {
294 __set_current_state(TASK_RUNNING);
295 task_work_run();
296 return 1;
297 }
298
299 return 0;
300}
301
302static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
303{
304 return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
305}
306
307static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
308{
309 if (!ts->locked) {
310 mutex_lock(&ctx->uring_lock);
311 ts->locked = true;
312 }
313}
314
315/*
316 * Don't complete immediately but use deferred completion infrastructure.
317 * Protected by ->uring_lock and can only be used either with
318 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
319 */
320static inline void io_req_complete_defer(struct io_kiocb *req)
321 __must_hold(&req->ctx->uring_lock)
322{
323 struct io_submit_state *state = &req->ctx->submit_state;
324
325 lockdep_assert_held(&req->ctx->uring_lock);
326
327 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
328}
329
330static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
331{
332 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
333 ctx->has_evfd || ctx->poll_activated))
334 __io_commit_cqring_flush(ctx);
335}
336
337static inline void io_get_task_refs(int nr)
338{
339 struct io_uring_task *tctx = current->io_uring;
340
341 tctx->cached_refs -= nr;
342 if (unlikely(tctx->cached_refs < 0))
343 io_task_refs_refill(tctx);
344}
345
346static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
347{
348 return !ctx->submit_state.free_list.next;
349}
350
351extern struct kmem_cache *req_cachep;
352extern struct kmem_cache *io_buf_cachep;
353
354static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
355{
356 struct io_kiocb *req;
357
358 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
359 wq_stack_extract(&ctx->submit_state.free_list);
360 return req;
361}
362
363static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
364{
365 if (unlikely(io_req_cache_empty(ctx))) {
366 if (!__io_alloc_req_refill(ctx))
367 return false;
368 }
369 *req = io_extract_req(ctx);
370 return true;
371}
372
373static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
374{
375 return likely(ctx->submitter_task == current);
376}
377
378static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
379{
380 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
381 ctx->submitter_task == current);
382}
383
384static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
385{
386 io_req_set_res(req, res, 0);
387 req->io_task_work.func = io_req_task_complete;
388 io_req_task_work_add(req);
389}
390
391/*
392 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
393 * slot.
394 */
395static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
396{
397 if (ctx->flags & IORING_SETUP_SQE128)
398 return 2 * sizeof(struct io_uring_sqe);
399 return sizeof(struct io_uring_sqe);
400}
401#endif