Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <net/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49#include <linux/bits.h>
50
51#include <linux/sched/signal.h>
52#include <linux/fs.h>
53#include <linux/file.h>
54#include <linux/fdtable.h>
55#include <linux/mm.h>
56#include <linux/mman.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
59#include <linux/bvec.h>
60#include <linux/net.h>
61#include <net/sock.h>
62#include <net/af_unix.h>
63#include <net/scm.h>
64#include <linux/anon_inodes.h>
65#include <linux/sched/mm.h>
66#include <linux/uaccess.h>
67#include <linux/nospec.h>
68#include <linux/highmem.h>
69#include <linux/fsnotify.h>
70#include <linux/fadvise.h>
71#include <linux/task_work.h>
72#include <linux/io_uring.h>
73#include <linux/audit.h>
74#include <linux/security.h>
75
76#define CREATE_TRACE_POINTS
77#include <trace/events/io_uring.h>
78
79#include <uapi/linux/io_uring.h>
80
81#include "io-wq.h"
82
83#include "io_uring.h"
84#include "opdef.h"
85#include "refs.h"
86#include "tctx.h"
87#include "sqpoll.h"
88#include "fdinfo.h"
89#include "kbuf.h"
90#include "rsrc.h"
91#include "cancel.h"
92#include "net.h"
93#include "notif.h"
94
95#include "timeout.h"
96#include "poll.h"
97#include "alloc_cache.h"
98
99#define IORING_MAX_ENTRIES 32768
100#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
101
102#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
103 IORING_REGISTER_LAST + IORING_OP_LAST)
104
105#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
106 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
107
108#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
109 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
110
111#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
112 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
113 REQ_F_ASYNC_DATA)
114
115#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
116 IO_REQ_CLEAN_FLAGS)
117
118#define IO_TCTX_REFS_CACHE_NR (1U << 10)
119
120#define IO_COMPL_BATCH 32
121#define IO_REQ_ALLOC_BATCH 8
122
123enum {
124 IO_CHECK_CQ_OVERFLOW_BIT,
125 IO_CHECK_CQ_DROPPED_BIT,
126};
127
128enum {
129 IO_EVENTFD_OP_SIGNAL_BIT,
130 IO_EVENTFD_OP_FREE_BIT,
131};
132
133struct io_defer_entry {
134 struct list_head list;
135 struct io_kiocb *req;
136 u32 seq;
137};
138
139/* requests with any of those set should undergo io_disarm_next() */
140#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
141#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
142
143static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
144 struct task_struct *task,
145 bool cancel_all);
146
147static void io_dismantle_req(struct io_kiocb *req);
148static void io_clean_op(struct io_kiocb *req);
149static void io_queue_sqe(struct io_kiocb *req);
150static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
151static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
152static __cold void io_fallback_tw(struct io_uring_task *tctx);
153
154static struct kmem_cache *req_cachep;
155
156struct sock *io_uring_get_socket(struct file *file)
157{
158#if defined(CONFIG_UNIX)
159 if (io_is_uring_fops(file)) {
160 struct io_ring_ctx *ctx = file->private_data;
161
162 return ctx->ring_sock->sk;
163 }
164#endif
165 return NULL;
166}
167EXPORT_SYMBOL(io_uring_get_socket);
168
169static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
170{
171 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
172 ctx->submit_state.cqes_count)
173 __io_submit_flush_completions(ctx);
174}
175
176static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
177{
178 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
179}
180
181static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
182{
183 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
184}
185
186static bool io_match_linked(struct io_kiocb *head)
187{
188 struct io_kiocb *req;
189
190 io_for_each_link(req, head) {
191 if (req->flags & REQ_F_INFLIGHT)
192 return true;
193 }
194 return false;
195}
196
197/*
198 * As io_match_task() but protected against racing with linked timeouts.
199 * User must not hold timeout_lock.
200 */
201bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
202 bool cancel_all)
203{
204 bool matched;
205
206 if (task && head->task != task)
207 return false;
208 if (cancel_all)
209 return true;
210
211 if (head->flags & REQ_F_LINK_TIMEOUT) {
212 struct io_ring_ctx *ctx = head->ctx;
213
214 /* protect against races with linked timeouts */
215 spin_lock_irq(&ctx->timeout_lock);
216 matched = io_match_linked(head);
217 spin_unlock_irq(&ctx->timeout_lock);
218 } else {
219 matched = io_match_linked(head);
220 }
221 return matched;
222}
223
224static inline void req_fail_link_node(struct io_kiocb *req, int res)
225{
226 req_set_fail(req);
227 io_req_set_res(req, res, 0);
228}
229
230static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
231{
232 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
233}
234
235static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
236{
237 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
238
239 complete(&ctx->ref_comp);
240}
241
242static __cold void io_fallback_req_func(struct work_struct *work)
243{
244 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
245 fallback_work.work);
246 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
247 struct io_kiocb *req, *tmp;
248 bool locked = false;
249
250 percpu_ref_get(&ctx->refs);
251 llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
252 req->io_task_work.func(req, &locked);
253
254 if (locked) {
255 io_submit_flush_completions(ctx);
256 mutex_unlock(&ctx->uring_lock);
257 }
258 percpu_ref_put(&ctx->refs);
259}
260
261static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
262{
263 unsigned hash_buckets = 1U << bits;
264 size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
265
266 table->hbs = kmalloc(hash_size, GFP_KERNEL);
267 if (!table->hbs)
268 return -ENOMEM;
269
270 table->hash_bits = bits;
271 init_hash_table(table, hash_buckets);
272 return 0;
273}
274
275static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
276{
277 struct io_ring_ctx *ctx;
278 int hash_bits;
279
280 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
281 if (!ctx)
282 return NULL;
283
284 xa_init(&ctx->io_bl_xa);
285
286 /*
287 * Use 5 bits less than the max cq entries, that should give us around
288 * 32 entries per hash list if totally full and uniformly spread, but
289 * don't keep too many buckets to not overconsume memory.
290 */
291 hash_bits = ilog2(p->cq_entries) - 5;
292 hash_bits = clamp(hash_bits, 1, 8);
293 if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
294 goto err;
295 if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
296 goto err;
297
298 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
299 if (!ctx->dummy_ubuf)
300 goto err;
301 /* set invalid range, so io_import_fixed() fails meeting it */
302 ctx->dummy_ubuf->ubuf = -1UL;
303
304 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
305 0, GFP_KERNEL))
306 goto err;
307
308 ctx->flags = p->flags;
309 init_waitqueue_head(&ctx->sqo_sq_wait);
310 INIT_LIST_HEAD(&ctx->sqd_list);
311 INIT_LIST_HEAD(&ctx->cq_overflow_list);
312 INIT_LIST_HEAD(&ctx->io_buffers_cache);
313 io_alloc_cache_init(&ctx->apoll_cache);
314 io_alloc_cache_init(&ctx->netmsg_cache);
315 init_completion(&ctx->ref_comp);
316 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
317 mutex_init(&ctx->uring_lock);
318 init_waitqueue_head(&ctx->cq_wait);
319 spin_lock_init(&ctx->completion_lock);
320 spin_lock_init(&ctx->timeout_lock);
321 INIT_WQ_LIST(&ctx->iopoll_list);
322 INIT_LIST_HEAD(&ctx->io_buffers_pages);
323 INIT_LIST_HEAD(&ctx->io_buffers_comp);
324 INIT_LIST_HEAD(&ctx->defer_list);
325 INIT_LIST_HEAD(&ctx->timeout_list);
326 INIT_LIST_HEAD(&ctx->ltimeout_list);
327 spin_lock_init(&ctx->rsrc_ref_lock);
328 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
329 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
330 init_task_work(&ctx->rsrc_put_tw, io_rsrc_put_tw);
331 init_llist_head(&ctx->rsrc_put_llist);
332 init_llist_head(&ctx->work_llist);
333 INIT_LIST_HEAD(&ctx->tctx_list);
334 ctx->submit_state.free_list.next = NULL;
335 INIT_WQ_LIST(&ctx->locked_free_list);
336 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
337 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
338 return ctx;
339err:
340 kfree(ctx->dummy_ubuf);
341 kfree(ctx->cancel_table.hbs);
342 kfree(ctx->cancel_table_locked.hbs);
343 kfree(ctx->io_bl);
344 xa_destroy(&ctx->io_bl_xa);
345 kfree(ctx);
346 return NULL;
347}
348
349static void io_account_cq_overflow(struct io_ring_ctx *ctx)
350{
351 struct io_rings *r = ctx->rings;
352
353 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
354 ctx->cq_extra--;
355}
356
357static bool req_need_defer(struct io_kiocb *req, u32 seq)
358{
359 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
360 struct io_ring_ctx *ctx = req->ctx;
361
362 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
363 }
364
365 return false;
366}
367
368static inline void io_req_track_inflight(struct io_kiocb *req)
369{
370 if (!(req->flags & REQ_F_INFLIGHT)) {
371 req->flags |= REQ_F_INFLIGHT;
372 atomic_inc(&req->task->io_uring->inflight_tracked);
373 }
374}
375
376static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
377{
378 if (WARN_ON_ONCE(!req->link))
379 return NULL;
380
381 req->flags &= ~REQ_F_ARM_LTIMEOUT;
382 req->flags |= REQ_F_LINK_TIMEOUT;
383
384 /* linked timeouts should have two refs once prep'ed */
385 io_req_set_refcount(req);
386 __io_req_set_refcount(req->link, 2);
387 return req->link;
388}
389
390static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
391{
392 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
393 return NULL;
394 return __io_prep_linked_timeout(req);
395}
396
397static noinline void __io_arm_ltimeout(struct io_kiocb *req)
398{
399 io_queue_linked_timeout(__io_prep_linked_timeout(req));
400}
401
402static inline void io_arm_ltimeout(struct io_kiocb *req)
403{
404 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
405 __io_arm_ltimeout(req);
406}
407
408static void io_prep_async_work(struct io_kiocb *req)
409{
410 const struct io_op_def *def = &io_op_defs[req->opcode];
411 struct io_ring_ctx *ctx = req->ctx;
412
413 if (!(req->flags & REQ_F_CREDS)) {
414 req->flags |= REQ_F_CREDS;
415 req->creds = get_current_cred();
416 }
417
418 req->work.list.next = NULL;
419 req->work.flags = 0;
420 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
421 if (req->flags & REQ_F_FORCE_ASYNC)
422 req->work.flags |= IO_WQ_WORK_CONCURRENT;
423
424 if (req->file && !io_req_ffs_set(req))
425 req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
426
427 if (req->flags & REQ_F_ISREG) {
428 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
429 io_wq_hash_work(&req->work, file_inode(req->file));
430 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
431 if (def->unbound_nonreg_file)
432 req->work.flags |= IO_WQ_WORK_UNBOUND;
433 }
434}
435
436static void io_prep_async_link(struct io_kiocb *req)
437{
438 struct io_kiocb *cur;
439
440 if (req->flags & REQ_F_LINK_TIMEOUT) {
441 struct io_ring_ctx *ctx = req->ctx;
442
443 spin_lock_irq(&ctx->timeout_lock);
444 io_for_each_link(cur, req)
445 io_prep_async_work(cur);
446 spin_unlock_irq(&ctx->timeout_lock);
447 } else {
448 io_for_each_link(cur, req)
449 io_prep_async_work(cur);
450 }
451}
452
453void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
454{
455 struct io_kiocb *link = io_prep_linked_timeout(req);
456 struct io_uring_task *tctx = req->task->io_uring;
457
458 BUG_ON(!tctx);
459 BUG_ON(!tctx->io_wq);
460
461 /* init ->work of the whole link before punting */
462 io_prep_async_link(req);
463
464 /*
465 * Not expected to happen, but if we do have a bug where this _can_
466 * happen, catch it here and ensure the request is marked as
467 * canceled. That will make io-wq go through the usual work cancel
468 * procedure rather than attempt to run this request (or create a new
469 * worker for it).
470 */
471 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
472 req->work.flags |= IO_WQ_WORK_CANCEL;
473
474 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
475 io_wq_enqueue(tctx->io_wq, &req->work);
476 if (link)
477 io_queue_linked_timeout(link);
478}
479
480static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
481{
482 while (!list_empty(&ctx->defer_list)) {
483 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
484 struct io_defer_entry, list);
485
486 if (req_need_defer(de->req, de->seq))
487 break;
488 list_del_init(&de->list);
489 io_req_task_queue(de->req);
490 kfree(de);
491 }
492}
493
494
495static void io_eventfd_ops(struct rcu_head *rcu)
496{
497 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
498 int ops = atomic_xchg(&ev_fd->ops, 0);
499
500 if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
501 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
502
503 /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
504 * ordering in a race but if references are 0 we know we have to free
505 * it regardless.
506 */
507 if (atomic_dec_and_test(&ev_fd->refs)) {
508 eventfd_ctx_put(ev_fd->cq_ev_fd);
509 kfree(ev_fd);
510 }
511}
512
513static void io_eventfd_signal(struct io_ring_ctx *ctx)
514{
515 struct io_ev_fd *ev_fd = NULL;
516
517 rcu_read_lock();
518 /*
519 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
520 * and eventfd_signal
521 */
522 ev_fd = rcu_dereference(ctx->io_ev_fd);
523
524 /*
525 * Check again if ev_fd exists incase an io_eventfd_unregister call
526 * completed between the NULL check of ctx->io_ev_fd at the start of
527 * the function and rcu_read_lock.
528 */
529 if (unlikely(!ev_fd))
530 goto out;
531 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
532 goto out;
533 if (ev_fd->eventfd_async && !io_wq_current_is_worker())
534 goto out;
535
536 if (likely(eventfd_signal_allowed())) {
537 eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
538 } else {
539 atomic_inc(&ev_fd->refs);
540 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
541 call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
542 else
543 atomic_dec(&ev_fd->refs);
544 }
545
546out:
547 rcu_read_unlock();
548}
549
550static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
551{
552 bool skip;
553
554 spin_lock(&ctx->completion_lock);
555
556 /*
557 * Eventfd should only get triggered when at least one event has been
558 * posted. Some applications rely on the eventfd notification count
559 * only changing IFF a new CQE has been added to the CQ ring. There's
560 * no depedency on 1:1 relationship between how many times this
561 * function is called (and hence the eventfd count) and number of CQEs
562 * posted to the CQ ring.
563 */
564 skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
565 ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
566 spin_unlock(&ctx->completion_lock);
567 if (skip)
568 return;
569
570 io_eventfd_signal(ctx);
571}
572
573void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
574{
575 if (ctx->off_timeout_used)
576 io_flush_timeouts(ctx);
577 if (ctx->drain_active) {
578 spin_lock(&ctx->completion_lock);
579 io_queue_deferred(ctx);
580 spin_unlock(&ctx->completion_lock);
581 }
582 if (ctx->has_evfd)
583 io_eventfd_flush_signal(ctx);
584}
585
586static inline void __io_cq_lock(struct io_ring_ctx *ctx)
587 __acquires(ctx->completion_lock)
588{
589 if (!ctx->task_complete)
590 spin_lock(&ctx->completion_lock);
591}
592
593static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
594{
595 if (!ctx->task_complete)
596 spin_unlock(&ctx->completion_lock);
597}
598
599static inline void io_cq_lock(struct io_ring_ctx *ctx)
600 __acquires(ctx->completion_lock)
601{
602 spin_lock(&ctx->completion_lock);
603}
604
605static inline void io_cq_unlock(struct io_ring_ctx *ctx)
606 __releases(ctx->completion_lock)
607{
608 spin_unlock(&ctx->completion_lock);
609}
610
611/* keep it inlined for io_submit_flush_completions() */
612static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
613 __releases(ctx->completion_lock)
614{
615 io_commit_cqring(ctx);
616 __io_cq_unlock(ctx);
617 io_commit_cqring_flush(ctx);
618 io_cqring_wake(ctx);
619}
620
621void io_cq_unlock_post(struct io_ring_ctx *ctx)
622 __releases(ctx->completion_lock)
623{
624 io_commit_cqring(ctx);
625 spin_unlock(&ctx->completion_lock);
626 io_commit_cqring_flush(ctx);
627 io_cqring_wake(ctx);
628}
629
630/* Returns true if there are no backlogged entries after the flush */
631static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
632{
633 struct io_overflow_cqe *ocqe;
634 LIST_HEAD(list);
635
636 io_cq_lock(ctx);
637 list_splice_init(&ctx->cq_overflow_list, &list);
638 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
639 io_cq_unlock(ctx);
640
641 while (!list_empty(&list)) {
642 ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
643 list_del(&ocqe->list);
644 kfree(ocqe);
645 }
646}
647
648/* Returns true if there are no backlogged entries after the flush */
649static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
650{
651 size_t cqe_size = sizeof(struct io_uring_cqe);
652
653 if (__io_cqring_events(ctx) == ctx->cq_entries)
654 return;
655
656 if (ctx->flags & IORING_SETUP_CQE32)
657 cqe_size <<= 1;
658
659 io_cq_lock(ctx);
660 while (!list_empty(&ctx->cq_overflow_list)) {
661 struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
662 struct io_overflow_cqe *ocqe;
663
664 if (!cqe)
665 break;
666 ocqe = list_first_entry(&ctx->cq_overflow_list,
667 struct io_overflow_cqe, list);
668 memcpy(cqe, &ocqe->cqe, cqe_size);
669 list_del(&ocqe->list);
670 kfree(ocqe);
671 }
672
673 if (list_empty(&ctx->cq_overflow_list)) {
674 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
675 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
676 }
677 io_cq_unlock_post(ctx);
678}
679
680static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
681{
682 /* iopoll syncs against uring_lock, not completion_lock */
683 if (ctx->flags & IORING_SETUP_IOPOLL)
684 mutex_lock(&ctx->uring_lock);
685 __io_cqring_overflow_flush(ctx);
686 if (ctx->flags & IORING_SETUP_IOPOLL)
687 mutex_unlock(&ctx->uring_lock);
688}
689
690static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
691{
692 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
693 io_cqring_do_overflow_flush(ctx);
694}
695
696void __io_put_task(struct task_struct *task, int nr)
697{
698 struct io_uring_task *tctx = task->io_uring;
699
700 percpu_counter_sub(&tctx->inflight, nr);
701 if (unlikely(atomic_read(&tctx->in_idle)))
702 wake_up(&tctx->wait);
703 put_task_struct_many(task, nr);
704}
705
706void io_task_refs_refill(struct io_uring_task *tctx)
707{
708 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
709
710 percpu_counter_add(&tctx->inflight, refill);
711 refcount_add(refill, ¤t->usage);
712 tctx->cached_refs += refill;
713}
714
715static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
716{
717 struct io_uring_task *tctx = task->io_uring;
718 unsigned int refs = tctx->cached_refs;
719
720 if (refs) {
721 tctx->cached_refs = 0;
722 percpu_counter_sub(&tctx->inflight, refs);
723 put_task_struct_many(task, refs);
724 }
725}
726
727static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
728 s32 res, u32 cflags, u64 extra1, u64 extra2)
729{
730 struct io_overflow_cqe *ocqe;
731 size_t ocq_size = sizeof(struct io_overflow_cqe);
732 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
733
734 lockdep_assert_held(&ctx->completion_lock);
735
736 if (is_cqe32)
737 ocq_size += sizeof(struct io_uring_cqe);
738
739 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
740 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
741 if (!ocqe) {
742 /*
743 * If we're in ring overflow flush mode, or in task cancel mode,
744 * or cannot allocate an overflow entry, then we need to drop it
745 * on the floor.
746 */
747 io_account_cq_overflow(ctx);
748 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
749 return false;
750 }
751 if (list_empty(&ctx->cq_overflow_list)) {
752 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
753 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
754
755 }
756 ocqe->cqe.user_data = user_data;
757 ocqe->cqe.res = res;
758 ocqe->cqe.flags = cflags;
759 if (is_cqe32) {
760 ocqe->cqe.big_cqe[0] = extra1;
761 ocqe->cqe.big_cqe[1] = extra2;
762 }
763 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
764 return true;
765}
766
767bool io_req_cqe_overflow(struct io_kiocb *req)
768{
769 if (!(req->flags & REQ_F_CQE32_INIT)) {
770 req->extra1 = 0;
771 req->extra2 = 0;
772 }
773 return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
774 req->cqe.res, req->cqe.flags,
775 req->extra1, req->extra2);
776}
777
778/*
779 * writes to the cq entry need to come after reading head; the
780 * control dependency is enough as we're using WRITE_ONCE to
781 * fill the cq entry
782 */
783struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
784{
785 struct io_rings *rings = ctx->rings;
786 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
787 unsigned int free, queued, len;
788
789 /*
790 * Posting into the CQ when there are pending overflowed CQEs may break
791 * ordering guarantees, which will affect links, F_MORE users and more.
792 * Force overflow the completion.
793 */
794 if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
795 return NULL;
796
797 /* userspace may cheat modifying the tail, be safe and do min */
798 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
799 free = ctx->cq_entries - queued;
800 /* we need a contiguous range, limit based on the current array offset */
801 len = min(free, ctx->cq_entries - off);
802 if (!len)
803 return NULL;
804
805 if (ctx->flags & IORING_SETUP_CQE32) {
806 off <<= 1;
807 len <<= 1;
808 }
809
810 ctx->cqe_cached = &rings->cqes[off];
811 ctx->cqe_sentinel = ctx->cqe_cached + len;
812
813 ctx->cached_cq_tail++;
814 ctx->cqe_cached++;
815 if (ctx->flags & IORING_SETUP_CQE32)
816 ctx->cqe_cached++;
817 return &rings->cqes[off];
818}
819
820static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
821 u32 cflags)
822{
823 struct io_uring_cqe *cqe;
824
825 ctx->cq_extra++;
826
827 /*
828 * If we can't get a cq entry, userspace overflowed the
829 * submission (by quite a lot). Increment the overflow count in
830 * the ring.
831 */
832 cqe = io_get_cqe(ctx);
833 if (likely(cqe)) {
834 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
835
836 WRITE_ONCE(cqe->user_data, user_data);
837 WRITE_ONCE(cqe->res, res);
838 WRITE_ONCE(cqe->flags, cflags);
839
840 if (ctx->flags & IORING_SETUP_CQE32) {
841 WRITE_ONCE(cqe->big_cqe[0], 0);
842 WRITE_ONCE(cqe->big_cqe[1], 0);
843 }
844 return true;
845 }
846 return false;
847}
848
849static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
850 __must_hold(&ctx->uring_lock)
851{
852 struct io_submit_state *state = &ctx->submit_state;
853 unsigned int i;
854
855 lockdep_assert_held(&ctx->uring_lock);
856 for (i = 0; i < state->cqes_count; i++) {
857 struct io_uring_cqe *cqe = &state->cqes[i];
858
859 if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
860 if (ctx->task_complete) {
861 spin_lock(&ctx->completion_lock);
862 io_cqring_event_overflow(ctx, cqe->user_data,
863 cqe->res, cqe->flags, 0, 0);
864 spin_unlock(&ctx->completion_lock);
865 } else {
866 io_cqring_event_overflow(ctx, cqe->user_data,
867 cqe->res, cqe->flags, 0, 0);
868 }
869 }
870 }
871 state->cqes_count = 0;
872}
873
874static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
875 bool allow_overflow)
876{
877 bool filled;
878
879 io_cq_lock(ctx);
880 filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
881 if (!filled && allow_overflow)
882 filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
883
884 io_cq_unlock_post(ctx);
885 return filled;
886}
887
888bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
889{
890 return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
891}
892
893bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
894 bool allow_overflow)
895{
896 struct io_uring_cqe *cqe;
897 unsigned int length;
898
899 if (!defer)
900 return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
901
902 length = ARRAY_SIZE(ctx->submit_state.cqes);
903
904 lockdep_assert_held(&ctx->uring_lock);
905
906 if (ctx->submit_state.cqes_count == length) {
907 __io_cq_lock(ctx);
908 __io_flush_post_cqes(ctx);
909 /* no need to flush - flush is deferred */
910 __io_cq_unlock_post(ctx);
911 }
912
913 /* For defered completions this is not as strict as it is otherwise,
914 * however it's main job is to prevent unbounded posted completions,
915 * and in that it works just as well.
916 */
917 if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
918 return false;
919
920 cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
921 cqe->user_data = user_data;
922 cqe->res = res;
923 cqe->flags = cflags;
924 return true;
925}
926
927static void __io_req_complete_post(struct io_kiocb *req)
928{
929 struct io_ring_ctx *ctx = req->ctx;
930
931 io_cq_lock(ctx);
932 if (!(req->flags & REQ_F_CQE_SKIP))
933 io_fill_cqe_req(ctx, req);
934
935 /*
936 * If we're the last reference to this request, add to our locked
937 * free_list cache.
938 */
939 if (req_ref_put_and_test(req)) {
940 if (req->flags & IO_REQ_LINK_FLAGS) {
941 if (req->flags & IO_DISARM_MASK)
942 io_disarm_next(req);
943 if (req->link) {
944 io_req_task_queue(req->link);
945 req->link = NULL;
946 }
947 }
948 io_req_put_rsrc(req);
949 /*
950 * Selected buffer deallocation in io_clean_op() assumes that
951 * we don't hold ->completion_lock. Clean them here to avoid
952 * deadlocks.
953 */
954 io_put_kbuf_comp(req);
955 io_dismantle_req(req);
956 io_put_task(req->task, 1);
957 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
958 ctx->locked_free_nr++;
959 }
960 io_cq_unlock_post(ctx);
961}
962
963void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
964{
965 if (req->ctx->task_complete && (issue_flags & IO_URING_F_IOWQ)) {
966 req->io_task_work.func = io_req_task_complete;
967 io_req_task_work_add(req);
968 } else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
969 !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
970 __io_req_complete_post(req);
971 } else {
972 struct io_ring_ctx *ctx = req->ctx;
973
974 mutex_lock(&ctx->uring_lock);
975 __io_req_complete_post(req);
976 mutex_unlock(&ctx->uring_lock);
977 }
978}
979
980void io_req_defer_failed(struct io_kiocb *req, s32 res)
981 __must_hold(&ctx->uring_lock)
982{
983 const struct io_op_def *def = &io_op_defs[req->opcode];
984
985 lockdep_assert_held(&req->ctx->uring_lock);
986
987 req_set_fail(req);
988 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
989 if (def->fail)
990 def->fail(req);
991 io_req_complete_defer(req);
992}
993
994/*
995 * Don't initialise the fields below on every allocation, but do that in
996 * advance and keep them valid across allocations.
997 */
998static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
999{
1000 req->ctx = ctx;
1001 req->link = NULL;
1002 req->async_data = NULL;
1003 /* not necessary, but safer to zero */
1004 req->cqe.res = 0;
1005}
1006
1007static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1008 struct io_submit_state *state)
1009{
1010 spin_lock(&ctx->completion_lock);
1011 wq_list_splice(&ctx->locked_free_list, &state->free_list);
1012 ctx->locked_free_nr = 0;
1013 spin_unlock(&ctx->completion_lock);
1014}
1015
1016/*
1017 * A request might get retired back into the request caches even before opcode
1018 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1019 * Because of that, io_alloc_req() should be called only under ->uring_lock
1020 * and with extra caution to not get a request that is still worked on.
1021 */
1022__cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
1023 __must_hold(&ctx->uring_lock)
1024{
1025 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1026 void *reqs[IO_REQ_ALLOC_BATCH];
1027 int ret, i;
1028
1029 /*
1030 * If we have more than a batch's worth of requests in our IRQ side
1031 * locked cache, grab the lock and move them over to our submission
1032 * side cache.
1033 */
1034 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
1035 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
1036 if (!io_req_cache_empty(ctx))
1037 return true;
1038 }
1039
1040 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
1041
1042 /*
1043 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1044 * retry single alloc to be on the safe side.
1045 */
1046 if (unlikely(ret <= 0)) {
1047 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1048 if (!reqs[0])
1049 return false;
1050 ret = 1;
1051 }
1052
1053 percpu_ref_get_many(&ctx->refs, ret);
1054 for (i = 0; i < ret; i++) {
1055 struct io_kiocb *req = reqs[i];
1056
1057 io_preinit_req(req, ctx);
1058 io_req_add_to_cache(req, ctx);
1059 }
1060 return true;
1061}
1062
1063static inline void io_dismantle_req(struct io_kiocb *req)
1064{
1065 unsigned int flags = req->flags;
1066
1067 if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
1068 io_clean_op(req);
1069 if (!(flags & REQ_F_FIXED_FILE))
1070 io_put_file(req->file);
1071}
1072
1073__cold void io_free_req(struct io_kiocb *req)
1074{
1075 struct io_ring_ctx *ctx = req->ctx;
1076
1077 io_req_put_rsrc(req);
1078 io_dismantle_req(req);
1079 io_put_task(req->task, 1);
1080
1081 spin_lock(&ctx->completion_lock);
1082 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
1083 ctx->locked_free_nr++;
1084 spin_unlock(&ctx->completion_lock);
1085}
1086
1087static void __io_req_find_next_prep(struct io_kiocb *req)
1088{
1089 struct io_ring_ctx *ctx = req->ctx;
1090
1091 spin_lock(&ctx->completion_lock);
1092 io_disarm_next(req);
1093 spin_unlock(&ctx->completion_lock);
1094}
1095
1096static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1097{
1098 struct io_kiocb *nxt;
1099
1100 /*
1101 * If LINK is set, we have dependent requests in this chain. If we
1102 * didn't fail this request, queue the first one up, moving any other
1103 * dependencies to the next request. In case of failure, fail the rest
1104 * of the chain.
1105 */
1106 if (unlikely(req->flags & IO_DISARM_MASK))
1107 __io_req_find_next_prep(req);
1108 nxt = req->link;
1109 req->link = NULL;
1110 return nxt;
1111}
1112
1113static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
1114{
1115 if (!ctx)
1116 return;
1117 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1118 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1119 if (*locked) {
1120 io_submit_flush_completions(ctx);
1121 mutex_unlock(&ctx->uring_lock);
1122 *locked = false;
1123 }
1124 percpu_ref_put(&ctx->refs);
1125}
1126
1127static unsigned int handle_tw_list(struct llist_node *node,
1128 struct io_ring_ctx **ctx, bool *locked,
1129 struct llist_node *last)
1130{
1131 unsigned int count = 0;
1132
1133 while (node != last) {
1134 struct llist_node *next = node->next;
1135 struct io_kiocb *req = container_of(node, struct io_kiocb,
1136 io_task_work.node);
1137
1138 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1139
1140 if (req->ctx != *ctx) {
1141 ctx_flush_and_put(*ctx, locked);
1142 *ctx = req->ctx;
1143 /* if not contended, grab and improve batching */
1144 *locked = mutex_trylock(&(*ctx)->uring_lock);
1145 percpu_ref_get(&(*ctx)->refs);
1146 }
1147 req->io_task_work.func(req, locked);
1148 node = next;
1149 count++;
1150 }
1151
1152 return count;
1153}
1154
1155/**
1156 * io_llist_xchg - swap all entries in a lock-less list
1157 * @head: the head of lock-less list to delete all entries
1158 * @new: new entry as the head of the list
1159 *
1160 * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1161 * The order of entries returned is from the newest to the oldest added one.
1162 */
1163static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1164 struct llist_node *new)
1165{
1166 return xchg(&head->first, new);
1167}
1168
1169/**
1170 * io_llist_cmpxchg - possibly swap all entries in a lock-less list
1171 * @head: the head of lock-less list to delete all entries
1172 * @old: expected old value of the first entry of the list
1173 * @new: new entry as the head of the list
1174 *
1175 * perform a cmpxchg on the first entry of the list.
1176 */
1177
1178static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1179 struct llist_node *old,
1180 struct llist_node *new)
1181{
1182 return cmpxchg(&head->first, old, new);
1183}
1184
1185void tctx_task_work(struct callback_head *cb)
1186{
1187 bool uring_locked = false;
1188 struct io_ring_ctx *ctx = NULL;
1189 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1190 task_work);
1191 struct llist_node fake = {};
1192 struct llist_node *node;
1193 unsigned int loops = 1;
1194 unsigned int count;
1195
1196 if (unlikely(current->flags & PF_EXITING)) {
1197 io_fallback_tw(tctx);
1198 return;
1199 }
1200
1201 node = io_llist_xchg(&tctx->task_list, &fake);
1202 count = handle_tw_list(node, &ctx, &uring_locked, NULL);
1203 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1204 while (node != &fake) {
1205 loops++;
1206 node = io_llist_xchg(&tctx->task_list, &fake);
1207 count += handle_tw_list(node, &ctx, &uring_locked, &fake);
1208 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1209 }
1210
1211 ctx_flush_and_put(ctx, &uring_locked);
1212
1213 /* relaxed read is enough as only the task itself sets ->in_idle */
1214 if (unlikely(atomic_read(&tctx->in_idle)))
1215 io_uring_drop_tctx_refs(current);
1216
1217 trace_io_uring_task_work_run(tctx, count, loops);
1218}
1219
1220static __cold void io_fallback_tw(struct io_uring_task *tctx)
1221{
1222 struct llist_node *node = llist_del_all(&tctx->task_list);
1223 struct io_kiocb *req;
1224
1225 while (node) {
1226 req = container_of(node, struct io_kiocb, io_task_work.node);
1227 node = node->next;
1228 if (llist_add(&req->io_task_work.node,
1229 &req->ctx->fallback_llist))
1230 schedule_delayed_work(&req->ctx->fallback_work, 1);
1231 }
1232}
1233
1234static void io_req_local_work_add(struct io_kiocb *req)
1235{
1236 struct io_ring_ctx *ctx = req->ctx;
1237
1238 percpu_ref_get(&ctx->refs);
1239
1240 if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
1241 percpu_ref_put(&ctx->refs);
1242 return;
1243 }
1244 /* need it for the following io_cqring_wake() */
1245 smp_mb__after_atomic();
1246
1247 if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
1248 io_move_task_work_from_local(ctx);
1249 percpu_ref_put(&ctx->refs);
1250 return;
1251 }
1252
1253 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1254 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1255
1256 if (ctx->has_evfd)
1257 io_eventfd_signal(ctx);
1258 __io_cqring_wake(ctx);
1259 percpu_ref_put(&ctx->refs);
1260}
1261
1262void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
1263{
1264 struct io_uring_task *tctx = req->task->io_uring;
1265 struct io_ring_ctx *ctx = req->ctx;
1266
1267 if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1268 io_req_local_work_add(req);
1269 return;
1270 }
1271
1272 /* task_work already pending, we're done */
1273 if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1274 return;
1275
1276 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1277 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1278
1279 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1280 return;
1281
1282 io_fallback_tw(tctx);
1283}
1284
1285static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1286{
1287 struct llist_node *node;
1288
1289 node = llist_del_all(&ctx->work_llist);
1290 while (node) {
1291 struct io_kiocb *req = container_of(node, struct io_kiocb,
1292 io_task_work.node);
1293
1294 node = node->next;
1295 __io_req_task_work_add(req, false);
1296 }
1297}
1298
1299int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
1300{
1301 struct llist_node *node;
1302 struct llist_node fake;
1303 struct llist_node *current_final = NULL;
1304 int ret;
1305 unsigned int loops = 1;
1306
1307 if (unlikely(ctx->submitter_task != current))
1308 return -EEXIST;
1309
1310 node = io_llist_xchg(&ctx->work_llist, &fake);
1311 ret = 0;
1312again:
1313 while (node != current_final) {
1314 struct llist_node *next = node->next;
1315 struct io_kiocb *req = container_of(node, struct io_kiocb,
1316 io_task_work.node);
1317 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1318 req->io_task_work.func(req, locked);
1319 ret++;
1320 node = next;
1321 }
1322
1323 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1324 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1325
1326 node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
1327 if (node != &fake) {
1328 loops++;
1329 current_final = &fake;
1330 node = io_llist_xchg(&ctx->work_llist, &fake);
1331 goto again;
1332 }
1333
1334 if (*locked)
1335 io_submit_flush_completions(ctx);
1336 trace_io_uring_local_work_run(ctx, ret, loops);
1337 return ret;
1338
1339}
1340
1341int io_run_local_work(struct io_ring_ctx *ctx)
1342{
1343 bool locked;
1344 int ret;
1345
1346 if (llist_empty(&ctx->work_llist))
1347 return 0;
1348
1349 __set_current_state(TASK_RUNNING);
1350 locked = mutex_trylock(&ctx->uring_lock);
1351 ret = __io_run_local_work(ctx, &locked);
1352 if (locked)
1353 mutex_unlock(&ctx->uring_lock);
1354
1355 return ret;
1356}
1357
1358static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
1359{
1360 io_tw_lock(req->ctx, locked);
1361 io_req_defer_failed(req, req->cqe.res);
1362}
1363
1364void io_req_task_submit(struct io_kiocb *req, bool *locked)
1365{
1366 io_tw_lock(req->ctx, locked);
1367 /* req->task == current here, checking PF_EXITING is safe */
1368 if (likely(!(req->task->flags & PF_EXITING)))
1369 io_queue_sqe(req);
1370 else
1371 io_req_defer_failed(req, -EFAULT);
1372}
1373
1374void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1375{
1376 io_req_set_res(req, ret, 0);
1377 req->io_task_work.func = io_req_task_cancel;
1378 io_req_task_work_add(req);
1379}
1380
1381void io_req_task_queue(struct io_kiocb *req)
1382{
1383 req->io_task_work.func = io_req_task_submit;
1384 io_req_task_work_add(req);
1385}
1386
1387void io_queue_next(struct io_kiocb *req)
1388{
1389 struct io_kiocb *nxt = io_req_find_next(req);
1390
1391 if (nxt)
1392 io_req_task_queue(nxt);
1393}
1394
1395void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
1396 __must_hold(&ctx->uring_lock)
1397{
1398 struct task_struct *task = NULL;
1399 int task_refs = 0;
1400
1401 do {
1402 struct io_kiocb *req = container_of(node, struct io_kiocb,
1403 comp_list);
1404
1405 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1406 if (req->flags & REQ_F_REFCOUNT) {
1407 node = req->comp_list.next;
1408 if (!req_ref_put_and_test(req))
1409 continue;
1410 }
1411 if ((req->flags & REQ_F_POLLED) && req->apoll) {
1412 struct async_poll *apoll = req->apoll;
1413
1414 if (apoll->double_poll)
1415 kfree(apoll->double_poll);
1416 if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1417 kfree(apoll);
1418 req->flags &= ~REQ_F_POLLED;
1419 }
1420 if (req->flags & IO_REQ_LINK_FLAGS)
1421 io_queue_next(req);
1422 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1423 io_clean_op(req);
1424 }
1425 if (!(req->flags & REQ_F_FIXED_FILE))
1426 io_put_file(req->file);
1427
1428 io_req_put_rsrc_locked(req, ctx);
1429
1430 if (req->task != task) {
1431 if (task)
1432 io_put_task(task, task_refs);
1433 task = req->task;
1434 task_refs = 0;
1435 }
1436 task_refs++;
1437 node = req->comp_list.next;
1438 io_req_add_to_cache(req, ctx);
1439 } while (node);
1440
1441 if (task)
1442 io_put_task(task, task_refs);
1443}
1444
1445static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1446 __must_hold(&ctx->uring_lock)
1447{
1448 struct io_wq_work_node *node, *prev;
1449 struct io_submit_state *state = &ctx->submit_state;
1450
1451 __io_cq_lock(ctx);
1452 /* must come first to preserve CQE ordering in failure cases */
1453 if (state->cqes_count)
1454 __io_flush_post_cqes(ctx);
1455 wq_list_for_each(node, prev, &state->compl_reqs) {
1456 struct io_kiocb *req = container_of(node, struct io_kiocb,
1457 comp_list);
1458
1459 if (!(req->flags & REQ_F_CQE_SKIP) &&
1460 unlikely(!__io_fill_cqe_req(ctx, req))) {
1461 if (ctx->task_complete) {
1462 spin_lock(&ctx->completion_lock);
1463 io_req_cqe_overflow(req);
1464 spin_unlock(&ctx->completion_lock);
1465 } else {
1466 io_req_cqe_overflow(req);
1467 }
1468 }
1469 }
1470 __io_cq_unlock_post(ctx);
1471
1472 if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
1473 io_free_batch_list(ctx, state->compl_reqs.first);
1474 INIT_WQ_LIST(&state->compl_reqs);
1475 }
1476}
1477
1478/*
1479 * Drop reference to request, return next in chain (if there is one) if this
1480 * was the last reference to this request.
1481 */
1482static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
1483{
1484 struct io_kiocb *nxt = NULL;
1485
1486 if (req_ref_put_and_test(req)) {
1487 if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
1488 nxt = io_req_find_next(req);
1489 io_free_req(req);
1490 }
1491 return nxt;
1492}
1493
1494static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1495{
1496 /* See comment at the top of this file */
1497 smp_rmb();
1498 return __io_cqring_events(ctx);
1499}
1500
1501/*
1502 * We can't just wait for polled events to come to us, we have to actively
1503 * find and complete them.
1504 */
1505static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1506{
1507 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1508 return;
1509
1510 mutex_lock(&ctx->uring_lock);
1511 while (!wq_list_empty(&ctx->iopoll_list)) {
1512 /* let it sleep and repeat later if can't complete a request */
1513 if (io_do_iopoll(ctx, true) == 0)
1514 break;
1515 /*
1516 * Ensure we allow local-to-the-cpu processing to take place,
1517 * in this case we need to ensure that we reap all events.
1518 * Also let task_work, etc. to progress by releasing the mutex
1519 */
1520 if (need_resched()) {
1521 mutex_unlock(&ctx->uring_lock);
1522 cond_resched();
1523 mutex_lock(&ctx->uring_lock);
1524 }
1525 }
1526 mutex_unlock(&ctx->uring_lock);
1527}
1528
1529static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1530{
1531 unsigned int nr_events = 0;
1532 int ret = 0;
1533 unsigned long check_cq;
1534
1535 if (!io_allowed_run_tw(ctx))
1536 return -EEXIST;
1537
1538 check_cq = READ_ONCE(ctx->check_cq);
1539 if (unlikely(check_cq)) {
1540 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1541 __io_cqring_overflow_flush(ctx);
1542 /*
1543 * Similarly do not spin if we have not informed the user of any
1544 * dropped CQE.
1545 */
1546 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1547 return -EBADR;
1548 }
1549 /*
1550 * Don't enter poll loop if we already have events pending.
1551 * If we do, we can potentially be spinning for commands that
1552 * already triggered a CQE (eg in error).
1553 */
1554 if (io_cqring_events(ctx))
1555 return 0;
1556
1557 do {
1558 /*
1559 * If a submit got punted to a workqueue, we can have the
1560 * application entering polling for a command before it gets
1561 * issued. That app will hold the uring_lock for the duration
1562 * of the poll right here, so we need to take a breather every
1563 * now and then to ensure that the issue has a chance to add
1564 * the poll to the issued list. Otherwise we can spin here
1565 * forever, while the workqueue is stuck trying to acquire the
1566 * very same mutex.
1567 */
1568 if (wq_list_empty(&ctx->iopoll_list) ||
1569 io_task_work_pending(ctx)) {
1570 u32 tail = ctx->cached_cq_tail;
1571
1572 (void) io_run_local_work_locked(ctx);
1573
1574 if (task_work_pending(current) ||
1575 wq_list_empty(&ctx->iopoll_list)) {
1576 mutex_unlock(&ctx->uring_lock);
1577 io_run_task_work();
1578 mutex_lock(&ctx->uring_lock);
1579 }
1580 /* some requests don't go through iopoll_list */
1581 if (tail != ctx->cached_cq_tail ||
1582 wq_list_empty(&ctx->iopoll_list))
1583 break;
1584 }
1585 ret = io_do_iopoll(ctx, !min);
1586 if (ret < 0)
1587 break;
1588 nr_events += ret;
1589 ret = 0;
1590 } while (nr_events < min && !need_resched());
1591
1592 return ret;
1593}
1594
1595void io_req_task_complete(struct io_kiocb *req, bool *locked)
1596{
1597 if (*locked)
1598 io_req_complete_defer(req);
1599 else
1600 io_req_complete_post(req, IO_URING_F_UNLOCKED);
1601}
1602
1603/*
1604 * After the iocb has been issued, it's safe to be found on the poll list.
1605 * Adding the kiocb to the list AFTER submission ensures that we don't
1606 * find it from a io_do_iopoll() thread before the issuer is done
1607 * accessing the kiocb cookie.
1608 */
1609static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1610{
1611 struct io_ring_ctx *ctx = req->ctx;
1612 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1613
1614 /* workqueue context doesn't hold uring_lock, grab it now */
1615 if (unlikely(needs_lock))
1616 mutex_lock(&ctx->uring_lock);
1617
1618 /*
1619 * Track whether we have multiple files in our lists. This will impact
1620 * how we do polling eventually, not spinning if we're on potentially
1621 * different devices.
1622 */
1623 if (wq_list_empty(&ctx->iopoll_list)) {
1624 ctx->poll_multi_queue = false;
1625 } else if (!ctx->poll_multi_queue) {
1626 struct io_kiocb *list_req;
1627
1628 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1629 comp_list);
1630 if (list_req->file != req->file)
1631 ctx->poll_multi_queue = true;
1632 }
1633
1634 /*
1635 * For fast devices, IO may have already completed. If it has, add
1636 * it to the front so we find it first.
1637 */
1638 if (READ_ONCE(req->iopoll_completed))
1639 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1640 else
1641 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1642
1643 if (unlikely(needs_lock)) {
1644 /*
1645 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1646 * in sq thread task context or in io worker task context. If
1647 * current task context is sq thread, we don't need to check
1648 * whether should wake up sq thread.
1649 */
1650 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1651 wq_has_sleeper(&ctx->sq_data->wait))
1652 wake_up(&ctx->sq_data->wait);
1653
1654 mutex_unlock(&ctx->uring_lock);
1655 }
1656}
1657
1658static bool io_bdev_nowait(struct block_device *bdev)
1659{
1660 return !bdev || bdev_nowait(bdev);
1661}
1662
1663/*
1664 * If we tracked the file through the SCM inflight mechanism, we could support
1665 * any file. For now, just ensure that anything potentially problematic is done
1666 * inline.
1667 */
1668static bool __io_file_supports_nowait(struct file *file, umode_t mode)
1669{
1670 if (S_ISBLK(mode)) {
1671 if (IS_ENABLED(CONFIG_BLOCK) &&
1672 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
1673 return true;
1674 return false;
1675 }
1676 if (S_ISSOCK(mode))
1677 return true;
1678 if (S_ISREG(mode)) {
1679 if (IS_ENABLED(CONFIG_BLOCK) &&
1680 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
1681 !io_is_uring_fops(file))
1682 return true;
1683 return false;
1684 }
1685
1686 /* any ->read/write should understand O_NONBLOCK */
1687 if (file->f_flags & O_NONBLOCK)
1688 return true;
1689 return file->f_mode & FMODE_NOWAIT;
1690}
1691
1692/*
1693 * If we tracked the file through the SCM inflight mechanism, we could support
1694 * any file. For now, just ensure that anything potentially problematic is done
1695 * inline.
1696 */
1697unsigned int io_file_get_flags(struct file *file)
1698{
1699 umode_t mode = file_inode(file)->i_mode;
1700 unsigned int res = 0;
1701
1702 if (S_ISREG(mode))
1703 res |= FFS_ISREG;
1704 if (__io_file_supports_nowait(file, mode))
1705 res |= FFS_NOWAIT;
1706 return res;
1707}
1708
1709bool io_alloc_async_data(struct io_kiocb *req)
1710{
1711 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
1712 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
1713 if (req->async_data) {
1714 req->flags |= REQ_F_ASYNC_DATA;
1715 return false;
1716 }
1717 return true;
1718}
1719
1720int io_req_prep_async(struct io_kiocb *req)
1721{
1722 const struct io_op_def *def = &io_op_defs[req->opcode];
1723
1724 /* assign early for deferred execution for non-fixed file */
1725 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
1726 req->file = io_file_get_normal(req, req->cqe.fd);
1727 if (!def->prep_async)
1728 return 0;
1729 if (WARN_ON_ONCE(req_has_async_data(req)))
1730 return -EFAULT;
1731 if (!io_op_defs[req->opcode].manual_alloc) {
1732 if (io_alloc_async_data(req))
1733 return -EAGAIN;
1734 }
1735 return def->prep_async(req);
1736}
1737
1738static u32 io_get_sequence(struct io_kiocb *req)
1739{
1740 u32 seq = req->ctx->cached_sq_head;
1741 struct io_kiocb *cur;
1742
1743 /* need original cached_sq_head, but it was increased for each req */
1744 io_for_each_link(cur, req)
1745 seq--;
1746 return seq;
1747}
1748
1749static __cold void io_drain_req(struct io_kiocb *req)
1750 __must_hold(&ctx->uring_lock)
1751{
1752 struct io_ring_ctx *ctx = req->ctx;
1753 struct io_defer_entry *de;
1754 int ret;
1755 u32 seq = io_get_sequence(req);
1756
1757 /* Still need defer if there is pending req in defer list. */
1758 spin_lock(&ctx->completion_lock);
1759 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1760 spin_unlock(&ctx->completion_lock);
1761queue:
1762 ctx->drain_active = false;
1763 io_req_task_queue(req);
1764 return;
1765 }
1766 spin_unlock(&ctx->completion_lock);
1767
1768 io_prep_async_link(req);
1769 de = kmalloc(sizeof(*de), GFP_KERNEL);
1770 if (!de) {
1771 ret = -ENOMEM;
1772 io_req_defer_failed(req, ret);
1773 return;
1774 }
1775
1776 spin_lock(&ctx->completion_lock);
1777 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1778 spin_unlock(&ctx->completion_lock);
1779 kfree(de);
1780 goto queue;
1781 }
1782
1783 trace_io_uring_defer(req);
1784 de->req = req;
1785 de->seq = seq;
1786 list_add_tail(&de->list, &ctx->defer_list);
1787 spin_unlock(&ctx->completion_lock);
1788}
1789
1790static void io_clean_op(struct io_kiocb *req)
1791{
1792 if (req->flags & REQ_F_BUFFER_SELECTED) {
1793 spin_lock(&req->ctx->completion_lock);
1794 io_put_kbuf_comp(req);
1795 spin_unlock(&req->ctx->completion_lock);
1796 }
1797
1798 if (req->flags & REQ_F_NEED_CLEANUP) {
1799 const struct io_op_def *def = &io_op_defs[req->opcode];
1800
1801 if (def->cleanup)
1802 def->cleanup(req);
1803 }
1804 if ((req->flags & REQ_F_POLLED) && req->apoll) {
1805 kfree(req->apoll->double_poll);
1806 kfree(req->apoll);
1807 req->apoll = NULL;
1808 }
1809 if (req->flags & REQ_F_INFLIGHT) {
1810 struct io_uring_task *tctx = req->task->io_uring;
1811
1812 atomic_dec(&tctx->inflight_tracked);
1813 }
1814 if (req->flags & REQ_F_CREDS)
1815 put_cred(req->creds);
1816 if (req->flags & REQ_F_ASYNC_DATA) {
1817 kfree(req->async_data);
1818 req->async_data = NULL;
1819 }
1820 req->flags &= ~IO_REQ_CLEAN_FLAGS;
1821}
1822
1823static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
1824{
1825 if (req->file || !io_op_defs[req->opcode].needs_file)
1826 return true;
1827
1828 if (req->flags & REQ_F_FIXED_FILE)
1829 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1830 else
1831 req->file = io_file_get_normal(req, req->cqe.fd);
1832
1833 return !!req->file;
1834}
1835
1836static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1837{
1838 const struct io_op_def *def = &io_op_defs[req->opcode];
1839 const struct cred *creds = NULL;
1840 int ret;
1841
1842 if (unlikely(!io_assign_file(req, issue_flags)))
1843 return -EBADF;
1844
1845 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1846 creds = override_creds(req->creds);
1847
1848 if (!def->audit_skip)
1849 audit_uring_entry(req->opcode);
1850
1851 ret = def->issue(req, issue_flags);
1852
1853 if (!def->audit_skip)
1854 audit_uring_exit(!ret, ret);
1855
1856 if (creds)
1857 revert_creds(creds);
1858
1859 if (ret == IOU_OK) {
1860 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1861 io_req_complete_defer(req);
1862 else
1863 io_req_complete_post(req, issue_flags);
1864 } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
1865 return ret;
1866
1867 /* If the op doesn't have a file, we're not polling for it */
1868 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1869 io_iopoll_req_issued(req, issue_flags);
1870
1871 return 0;
1872}
1873
1874int io_poll_issue(struct io_kiocb *req, bool *locked)
1875{
1876 io_tw_lock(req->ctx, locked);
1877 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1878 IO_URING_F_COMPLETE_DEFER);
1879}
1880
1881struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1882{
1883 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1884
1885 req = io_put_req_find_next(req);
1886 return req ? &req->work : NULL;
1887}
1888
1889void io_wq_submit_work(struct io_wq_work *work)
1890{
1891 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1892 const struct io_op_def *def = &io_op_defs[req->opcode];
1893 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1894 bool needs_poll = false;
1895 int ret = 0, err = -ECANCELED;
1896
1897 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */
1898 if (!(req->flags & REQ_F_REFCOUNT))
1899 __io_req_set_refcount(req, 2);
1900 else
1901 req_ref_get(req);
1902
1903 io_arm_ltimeout(req);
1904
1905 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1906 if (work->flags & IO_WQ_WORK_CANCEL) {
1907fail:
1908 io_req_task_queue_fail(req, err);
1909 return;
1910 }
1911 if (!io_assign_file(req, issue_flags)) {
1912 err = -EBADF;
1913 work->flags |= IO_WQ_WORK_CANCEL;
1914 goto fail;
1915 }
1916
1917 if (req->flags & REQ_F_FORCE_ASYNC) {
1918 bool opcode_poll = def->pollin || def->pollout;
1919
1920 if (opcode_poll && file_can_poll(req->file)) {
1921 needs_poll = true;
1922 issue_flags |= IO_URING_F_NONBLOCK;
1923 }
1924 }
1925
1926 do {
1927 ret = io_issue_sqe(req, issue_flags);
1928 if (ret != -EAGAIN)
1929 break;
1930 /*
1931 * We can get EAGAIN for iopolled IO even though we're
1932 * forcing a sync submission from here, since we can't
1933 * wait for request slots on the block side.
1934 */
1935 if (!needs_poll) {
1936 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1937 break;
1938 cond_resched();
1939 continue;
1940 }
1941
1942 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1943 return;
1944 /* aborted or ready, in either case retry blocking */
1945 needs_poll = false;
1946 issue_flags &= ~IO_URING_F_NONBLOCK;
1947 } while (1);
1948
1949 /* avoid locking problems by failing it from a clean context */
1950 if (ret < 0)
1951 io_req_task_queue_fail(req, ret);
1952}
1953
1954inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1955 unsigned int issue_flags)
1956{
1957 struct io_ring_ctx *ctx = req->ctx;
1958 struct file *file = NULL;
1959 unsigned long file_ptr;
1960
1961 io_ring_submit_lock(ctx, issue_flags);
1962
1963 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1964 goto out;
1965 fd = array_index_nospec(fd, ctx->nr_user_files);
1966 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
1967 file = (struct file *) (file_ptr & FFS_MASK);
1968 file_ptr &= ~FFS_MASK;
1969 /* mask in overlapping REQ_F and FFS bits */
1970 req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
1971 io_req_set_rsrc_node(req, ctx, 0);
1972out:
1973 io_ring_submit_unlock(ctx, issue_flags);
1974 return file;
1975}
1976
1977struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1978{
1979 struct file *file = fget(fd);
1980
1981 trace_io_uring_file_get(req, fd);
1982
1983 /* we don't allow fixed io_uring files */
1984 if (file && io_is_uring_fops(file))
1985 io_req_track_inflight(req);
1986 return file;
1987}
1988
1989static void io_queue_async(struct io_kiocb *req, int ret)
1990 __must_hold(&req->ctx->uring_lock)
1991{
1992 struct io_kiocb *linked_timeout;
1993
1994 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1995 io_req_defer_failed(req, ret);
1996 return;
1997 }
1998
1999 linked_timeout = io_prep_linked_timeout(req);
2000
2001 switch (io_arm_poll_handler(req, 0)) {
2002 case IO_APOLL_READY:
2003 io_kbuf_recycle(req, 0);
2004 io_req_task_queue(req);
2005 break;
2006 case IO_APOLL_ABORTED:
2007 io_kbuf_recycle(req, 0);
2008 io_queue_iowq(req, NULL);
2009 break;
2010 case IO_APOLL_OK:
2011 break;
2012 }
2013
2014 if (linked_timeout)
2015 io_queue_linked_timeout(linked_timeout);
2016}
2017
2018static inline void io_queue_sqe(struct io_kiocb *req)
2019 __must_hold(&req->ctx->uring_lock)
2020{
2021 int ret;
2022
2023 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
2024
2025 /*
2026 * We async punt it if the file wasn't marked NOWAIT, or if the file
2027 * doesn't support non-blocking read/write attempts
2028 */
2029 if (likely(!ret))
2030 io_arm_ltimeout(req);
2031 else
2032 io_queue_async(req, ret);
2033}
2034
2035static void io_queue_sqe_fallback(struct io_kiocb *req)
2036 __must_hold(&req->ctx->uring_lock)
2037{
2038 if (unlikely(req->flags & REQ_F_FAIL)) {
2039 /*
2040 * We don't submit, fail them all, for that replace hardlinks
2041 * with normal links. Extra REQ_F_LINK is tolerated.
2042 */
2043 req->flags &= ~REQ_F_HARDLINK;
2044 req->flags |= REQ_F_LINK;
2045 io_req_defer_failed(req, req->cqe.res);
2046 } else {
2047 int ret = io_req_prep_async(req);
2048
2049 if (unlikely(ret)) {
2050 io_req_defer_failed(req, ret);
2051 return;
2052 }
2053
2054 if (unlikely(req->ctx->drain_active))
2055 io_drain_req(req);
2056 else
2057 io_queue_iowq(req, NULL);
2058 }
2059}
2060
2061/*
2062 * Check SQE restrictions (opcode and flags).
2063 *
2064 * Returns 'true' if SQE is allowed, 'false' otherwise.
2065 */
2066static inline bool io_check_restriction(struct io_ring_ctx *ctx,
2067 struct io_kiocb *req,
2068 unsigned int sqe_flags)
2069{
2070 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
2071 return false;
2072
2073 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
2074 ctx->restrictions.sqe_flags_required)
2075 return false;
2076
2077 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
2078 ctx->restrictions.sqe_flags_required))
2079 return false;
2080
2081 return true;
2082}
2083
2084static void io_init_req_drain(struct io_kiocb *req)
2085{
2086 struct io_ring_ctx *ctx = req->ctx;
2087 struct io_kiocb *head = ctx->submit_state.link.head;
2088
2089 ctx->drain_active = true;
2090 if (head) {
2091 /*
2092 * If we need to drain a request in the middle of a link, drain
2093 * the head request and the next request/link after the current
2094 * link. Considering sequential execution of links,
2095 * REQ_F_IO_DRAIN will be maintained for every request of our
2096 * link.
2097 */
2098 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2099 ctx->drain_next = true;
2100 }
2101}
2102
2103static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2104 const struct io_uring_sqe *sqe)
2105 __must_hold(&ctx->uring_lock)
2106{
2107 const struct io_op_def *def;
2108 unsigned int sqe_flags;
2109 int personality;
2110 u8 opcode;
2111
2112 /* req is partially pre-initialised, see io_preinit_req() */
2113 req->opcode = opcode = READ_ONCE(sqe->opcode);
2114 /* same numerical values with corresponding REQ_F_*, safe to copy */
2115 req->flags = sqe_flags = READ_ONCE(sqe->flags);
2116 req->cqe.user_data = READ_ONCE(sqe->user_data);
2117 req->file = NULL;
2118 req->rsrc_node = NULL;
2119 req->task = current;
2120
2121 if (unlikely(opcode >= IORING_OP_LAST)) {
2122 req->opcode = 0;
2123 return -EINVAL;
2124 }
2125 def = &io_op_defs[opcode];
2126 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2127 /* enforce forwards compatibility on users */
2128 if (sqe_flags & ~SQE_VALID_FLAGS)
2129 return -EINVAL;
2130 if (sqe_flags & IOSQE_BUFFER_SELECT) {
2131 if (!def->buffer_select)
2132 return -EOPNOTSUPP;
2133 req->buf_index = READ_ONCE(sqe->buf_group);
2134 }
2135 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2136 ctx->drain_disabled = true;
2137 if (sqe_flags & IOSQE_IO_DRAIN) {
2138 if (ctx->drain_disabled)
2139 return -EOPNOTSUPP;
2140 io_init_req_drain(req);
2141 }
2142 }
2143 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2144 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2145 return -EACCES;
2146 /* knock it to the slow queue path, will be drained there */
2147 if (ctx->drain_active)
2148 req->flags |= REQ_F_FORCE_ASYNC;
2149 /* if there is no link, we're at "next" request and need to drain */
2150 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2151 ctx->drain_next = false;
2152 ctx->drain_active = true;
2153 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2154 }
2155 }
2156
2157 if (!def->ioprio && sqe->ioprio)
2158 return -EINVAL;
2159 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2160 return -EINVAL;
2161
2162 if (def->needs_file) {
2163 struct io_submit_state *state = &ctx->submit_state;
2164
2165 req->cqe.fd = READ_ONCE(sqe->fd);
2166
2167 /*
2168 * Plug now if we have more than 2 IO left after this, and the
2169 * target is potentially a read/write to block based storage.
2170 */
2171 if (state->need_plug && def->plug) {
2172 state->plug_started = true;
2173 state->need_plug = false;
2174 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2175 }
2176 }
2177
2178 personality = READ_ONCE(sqe->personality);
2179 if (personality) {
2180 int ret;
2181
2182 req->creds = xa_load(&ctx->personalities, personality);
2183 if (!req->creds)
2184 return -EINVAL;
2185 get_cred(req->creds);
2186 ret = security_uring_override_creds(req->creds);
2187 if (ret) {
2188 put_cred(req->creds);
2189 return ret;
2190 }
2191 req->flags |= REQ_F_CREDS;
2192 }
2193
2194 return def->prep(req, sqe);
2195}
2196
2197static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2198 struct io_kiocb *req, int ret)
2199{
2200 struct io_ring_ctx *ctx = req->ctx;
2201 struct io_submit_link *link = &ctx->submit_state.link;
2202 struct io_kiocb *head = link->head;
2203
2204 trace_io_uring_req_failed(sqe, req, ret);
2205
2206 /*
2207 * Avoid breaking links in the middle as it renders links with SQPOLL
2208 * unusable. Instead of failing eagerly, continue assembling the link if
2209 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2210 * should find the flag and handle the rest.
2211 */
2212 req_fail_link_node(req, ret);
2213 if (head && !(head->flags & REQ_F_FAIL))
2214 req_fail_link_node(head, -ECANCELED);
2215
2216 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2217 if (head) {
2218 link->last->link = req;
2219 link->head = NULL;
2220 req = head;
2221 }
2222 io_queue_sqe_fallback(req);
2223 return ret;
2224 }
2225
2226 if (head)
2227 link->last->link = req;
2228 else
2229 link->head = req;
2230 link->last = req;
2231 return 0;
2232}
2233
2234static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2235 const struct io_uring_sqe *sqe)
2236 __must_hold(&ctx->uring_lock)
2237{
2238 struct io_submit_link *link = &ctx->submit_state.link;
2239 int ret;
2240
2241 ret = io_init_req(ctx, req, sqe);
2242 if (unlikely(ret))
2243 return io_submit_fail_init(sqe, req, ret);
2244
2245 /* don't need @sqe from now on */
2246 trace_io_uring_submit_sqe(req, true);
2247
2248 /*
2249 * If we already have a head request, queue this one for async
2250 * submittal once the head completes. If we don't have a head but
2251 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2252 * submitted sync once the chain is complete. If none of those
2253 * conditions are true (normal request), then just queue it.
2254 */
2255 if (unlikely(link->head)) {
2256 ret = io_req_prep_async(req);
2257 if (unlikely(ret))
2258 return io_submit_fail_init(sqe, req, ret);
2259
2260 trace_io_uring_link(req, link->head);
2261 link->last->link = req;
2262 link->last = req;
2263
2264 if (req->flags & IO_REQ_LINK_FLAGS)
2265 return 0;
2266 /* last request of the link, flush it */
2267 req = link->head;
2268 link->head = NULL;
2269 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2270 goto fallback;
2271
2272 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2273 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2274 if (req->flags & IO_REQ_LINK_FLAGS) {
2275 link->head = req;
2276 link->last = req;
2277 } else {
2278fallback:
2279 io_queue_sqe_fallback(req);
2280 }
2281 return 0;
2282 }
2283
2284 io_queue_sqe(req);
2285 return 0;
2286}
2287
2288/*
2289 * Batched submission is done, ensure local IO is flushed out.
2290 */
2291static void io_submit_state_end(struct io_ring_ctx *ctx)
2292{
2293 struct io_submit_state *state = &ctx->submit_state;
2294
2295 if (unlikely(state->link.head))
2296 io_queue_sqe_fallback(state->link.head);
2297 /* flush only after queuing links as they can generate completions */
2298 io_submit_flush_completions(ctx);
2299 if (state->plug_started)
2300 blk_finish_plug(&state->plug);
2301}
2302
2303/*
2304 * Start submission side cache.
2305 */
2306static void io_submit_state_start(struct io_submit_state *state,
2307 unsigned int max_ios)
2308{
2309 state->plug_started = false;
2310 state->need_plug = max_ios > 2;
2311 state->submit_nr = max_ios;
2312 /* set only head, no need to init link_last in advance */
2313 state->link.head = NULL;
2314}
2315
2316static void io_commit_sqring(struct io_ring_ctx *ctx)
2317{
2318 struct io_rings *rings = ctx->rings;
2319
2320 /*
2321 * Ensure any loads from the SQEs are done at this point,
2322 * since once we write the new head, the application could
2323 * write new data to them.
2324 */
2325 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2326}
2327
2328/*
2329 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2330 * that is mapped by userspace. This means that care needs to be taken to
2331 * ensure that reads are stable, as we cannot rely on userspace always
2332 * being a good citizen. If members of the sqe are validated and then later
2333 * used, it's important that those reads are done through READ_ONCE() to
2334 * prevent a re-load down the line.
2335 */
2336static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2337{
2338 unsigned head, mask = ctx->sq_entries - 1;
2339 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2340
2341 /*
2342 * The cached sq head (or cq tail) serves two purposes:
2343 *
2344 * 1) allows us to batch the cost of updating the user visible
2345 * head updates.
2346 * 2) allows the kernel side to track the head on its own, even
2347 * though the application is the one updating it.
2348 */
2349 head = READ_ONCE(ctx->sq_array[sq_idx]);
2350 if (likely(head < ctx->sq_entries)) {
2351 /* double index for 128-byte SQEs, twice as long */
2352 if (ctx->flags & IORING_SETUP_SQE128)
2353 head <<= 1;
2354 return &ctx->sq_sqes[head];
2355 }
2356
2357 /* drop invalid entries */
2358 ctx->cq_extra--;
2359 WRITE_ONCE(ctx->rings->sq_dropped,
2360 READ_ONCE(ctx->rings->sq_dropped) + 1);
2361 return NULL;
2362}
2363
2364int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2365 __must_hold(&ctx->uring_lock)
2366{
2367 unsigned int entries = io_sqring_entries(ctx);
2368 unsigned int left;
2369 int ret;
2370
2371 if (unlikely(!entries))
2372 return 0;
2373 /* make sure SQ entry isn't read before tail */
2374 ret = left = min3(nr, ctx->sq_entries, entries);
2375 io_get_task_refs(left);
2376 io_submit_state_start(&ctx->submit_state, left);
2377
2378 do {
2379 const struct io_uring_sqe *sqe;
2380 struct io_kiocb *req;
2381
2382 if (unlikely(!io_alloc_req_refill(ctx)))
2383 break;
2384 req = io_alloc_req(ctx);
2385 sqe = io_get_sqe(ctx);
2386 if (unlikely(!sqe)) {
2387 io_req_add_to_cache(req, ctx);
2388 break;
2389 }
2390
2391 /*
2392 * Continue submitting even for sqe failure if the
2393 * ring was setup with IORING_SETUP_SUBMIT_ALL
2394 */
2395 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2396 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2397 left--;
2398 break;
2399 }
2400 } while (--left);
2401
2402 if (unlikely(left)) {
2403 ret -= left;
2404 /* try again if it submitted nothing and can't allocate a req */
2405 if (!ret && io_req_cache_empty(ctx))
2406 ret = -EAGAIN;
2407 current->io_uring->cached_refs += left;
2408 }
2409
2410 io_submit_state_end(ctx);
2411 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2412 io_commit_sqring(ctx);
2413 return ret;
2414}
2415
2416struct io_wait_queue {
2417 struct wait_queue_entry wq;
2418 struct io_ring_ctx *ctx;
2419 unsigned cq_tail;
2420 unsigned nr_timeouts;
2421};
2422
2423static inline bool io_has_work(struct io_ring_ctx *ctx)
2424{
2425 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
2426 ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
2427 !llist_empty(&ctx->work_llist));
2428}
2429
2430static inline bool io_should_wake(struct io_wait_queue *iowq)
2431{
2432 struct io_ring_ctx *ctx = iowq->ctx;
2433 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2434
2435 /*
2436 * Wake up if we have enough events, or if a timeout occurred since we
2437 * started waiting. For timeouts, we always want to return to userspace,
2438 * regardless of event count.
2439 */
2440 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2441}
2442
2443static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2444 int wake_flags, void *key)
2445{
2446 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2447 wq);
2448 struct io_ring_ctx *ctx = iowq->ctx;
2449
2450 /*
2451 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2452 * the task, and the next invocation will do it.
2453 */
2454 if (io_should_wake(iowq) || io_has_work(ctx))
2455 return autoremove_wake_function(curr, mode, wake_flags, key);
2456 return -1;
2457}
2458
2459int io_run_task_work_sig(struct io_ring_ctx *ctx)
2460{
2461 if (io_run_task_work_ctx(ctx) > 0)
2462 return 1;
2463 if (task_sigpending(current))
2464 return -EINTR;
2465 return 0;
2466}
2467
2468/* when returns >0, the caller should retry */
2469static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2470 struct io_wait_queue *iowq,
2471 ktime_t *timeout)
2472{
2473 int ret;
2474 unsigned long check_cq;
2475
2476 /* make sure we run task_work before checking for signals */
2477 ret = io_run_task_work_sig(ctx);
2478 if (ret || io_should_wake(iowq))
2479 return ret;
2480
2481 check_cq = READ_ONCE(ctx->check_cq);
2482 if (unlikely(check_cq)) {
2483 /* let the caller flush overflows, retry */
2484 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2485 return 1;
2486 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
2487 return -EBADR;
2488 }
2489 if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
2490 return -ETIME;
2491
2492 /*
2493 * Run task_work after scheduling. If we got woken because of
2494 * task_work being processed, run it now rather than let the caller
2495 * do another wait loop.
2496 */
2497 ret = io_run_task_work_sig(ctx);
2498 return ret < 0 ? ret : 1;
2499}
2500
2501/*
2502 * Wait until events become available, if we don't already have some. The
2503 * application must reap them itself, as they reside on the shared cq ring.
2504 */
2505static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2506 const sigset_t __user *sig, size_t sigsz,
2507 struct __kernel_timespec __user *uts)
2508{
2509 struct io_wait_queue iowq;
2510 struct io_rings *rings = ctx->rings;
2511 ktime_t timeout = KTIME_MAX;
2512 int ret;
2513
2514 if (!io_allowed_run_tw(ctx))
2515 return -EEXIST;
2516
2517 do {
2518 /* always run at least 1 task work to process local work */
2519 ret = io_run_task_work_ctx(ctx);
2520 if (ret < 0)
2521 return ret;
2522 io_cqring_overflow_flush(ctx);
2523
2524 /* if user messes with these they will just get an early return */
2525 if (__io_cqring_events_user(ctx) >= min_events)
2526 return 0;
2527 } while (ret > 0);
2528
2529 if (sig) {
2530#ifdef CONFIG_COMPAT
2531 if (in_compat_syscall())
2532 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2533 sigsz);
2534 else
2535#endif
2536 ret = set_user_sigmask(sig, sigsz);
2537
2538 if (ret)
2539 return ret;
2540 }
2541
2542 if (uts) {
2543 struct timespec64 ts;
2544
2545 if (get_timespec64(&ts, uts))
2546 return -EFAULT;
2547 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2548 }
2549
2550 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2551 iowq.wq.private = current;
2552 INIT_LIST_HEAD(&iowq.wq.entry);
2553 iowq.ctx = ctx;
2554 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2555 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2556
2557 trace_io_uring_cqring_wait(ctx, min_events);
2558 do {
2559 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2560 finish_wait(&ctx->cq_wait, &iowq.wq);
2561 io_cqring_do_overflow_flush(ctx);
2562 }
2563 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2564 TASK_INTERRUPTIBLE);
2565 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
2566 if (__io_cqring_events_user(ctx) >= min_events)
2567 break;
2568 cond_resched();
2569 } while (ret > 0);
2570
2571 finish_wait(&ctx->cq_wait, &iowq.wq);
2572 restore_saved_sigmask_unless(ret == -EINTR);
2573
2574 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2575}
2576
2577static void io_mem_free(void *ptr)
2578{
2579 struct page *page;
2580
2581 if (!ptr)
2582 return;
2583
2584 page = virt_to_head_page(ptr);
2585 if (put_page_testzero(page))
2586 free_compound_page(page);
2587}
2588
2589static void *io_mem_alloc(size_t size)
2590{
2591 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2592
2593 return (void *) __get_free_pages(gfp, get_order(size));
2594}
2595
2596static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2597 unsigned int cq_entries, size_t *sq_offset)
2598{
2599 struct io_rings *rings;
2600 size_t off, sq_array_size;
2601
2602 off = struct_size(rings, cqes, cq_entries);
2603 if (off == SIZE_MAX)
2604 return SIZE_MAX;
2605 if (ctx->flags & IORING_SETUP_CQE32) {
2606 if (check_shl_overflow(off, 1, &off))
2607 return SIZE_MAX;
2608 }
2609
2610#ifdef CONFIG_SMP
2611 off = ALIGN(off, SMP_CACHE_BYTES);
2612 if (off == 0)
2613 return SIZE_MAX;
2614#endif
2615
2616 if (sq_offset)
2617 *sq_offset = off;
2618
2619 sq_array_size = array_size(sizeof(u32), sq_entries);
2620 if (sq_array_size == SIZE_MAX)
2621 return SIZE_MAX;
2622
2623 if (check_add_overflow(off, sq_array_size, &off))
2624 return SIZE_MAX;
2625
2626 return off;
2627}
2628
2629static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2630 unsigned int eventfd_async)
2631{
2632 struct io_ev_fd *ev_fd;
2633 __s32 __user *fds = arg;
2634 int fd;
2635
2636 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2637 lockdep_is_held(&ctx->uring_lock));
2638 if (ev_fd)
2639 return -EBUSY;
2640
2641 if (copy_from_user(&fd, fds, sizeof(*fds)))
2642 return -EFAULT;
2643
2644 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
2645 if (!ev_fd)
2646 return -ENOMEM;
2647
2648 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
2649 if (IS_ERR(ev_fd->cq_ev_fd)) {
2650 int ret = PTR_ERR(ev_fd->cq_ev_fd);
2651 kfree(ev_fd);
2652 return ret;
2653 }
2654
2655 spin_lock(&ctx->completion_lock);
2656 ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
2657 spin_unlock(&ctx->completion_lock);
2658
2659 ev_fd->eventfd_async = eventfd_async;
2660 ctx->has_evfd = true;
2661 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
2662 atomic_set(&ev_fd->refs, 1);
2663 atomic_set(&ev_fd->ops, 0);
2664 return 0;
2665}
2666
2667static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2668{
2669 struct io_ev_fd *ev_fd;
2670
2671 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2672 lockdep_is_held(&ctx->uring_lock));
2673 if (ev_fd) {
2674 ctx->has_evfd = false;
2675 rcu_assign_pointer(ctx->io_ev_fd, NULL);
2676 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
2677 call_rcu(&ev_fd->rcu, io_eventfd_ops);
2678 return 0;
2679 }
2680
2681 return -ENXIO;
2682}
2683
2684static void io_req_caches_free(struct io_ring_ctx *ctx)
2685{
2686 int nr = 0;
2687
2688 mutex_lock(&ctx->uring_lock);
2689 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2690
2691 while (!io_req_cache_empty(ctx)) {
2692 struct io_kiocb *req = io_alloc_req(ctx);
2693
2694 kmem_cache_free(req_cachep, req);
2695 nr++;
2696 }
2697 if (nr)
2698 percpu_ref_put_many(&ctx->refs, nr);
2699 mutex_unlock(&ctx->uring_lock);
2700}
2701
2702static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2703{
2704 io_sq_thread_finish(ctx);
2705 io_rsrc_refs_drop(ctx);
2706 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2707 io_wait_rsrc_data(ctx->buf_data);
2708 io_wait_rsrc_data(ctx->file_data);
2709
2710 mutex_lock(&ctx->uring_lock);
2711 if (ctx->buf_data)
2712 __io_sqe_buffers_unregister(ctx);
2713 if (ctx->file_data)
2714 __io_sqe_files_unregister(ctx);
2715 io_cqring_overflow_kill(ctx);
2716 io_eventfd_unregister(ctx);
2717 io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2718 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2719 mutex_unlock(&ctx->uring_lock);
2720 io_destroy_buffers(ctx);
2721 if (ctx->sq_creds)
2722 put_cred(ctx->sq_creds);
2723 if (ctx->submitter_task)
2724 put_task_struct(ctx->submitter_task);
2725
2726 /* there are no registered resources left, nobody uses it */
2727 if (ctx->rsrc_node)
2728 io_rsrc_node_destroy(ctx->rsrc_node);
2729 if (ctx->rsrc_backup_node)
2730 io_rsrc_node_destroy(ctx->rsrc_backup_node);
2731 flush_delayed_work(&ctx->rsrc_put_work);
2732 flush_delayed_work(&ctx->fallback_work);
2733
2734 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2735 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
2736
2737#if defined(CONFIG_UNIX)
2738 if (ctx->ring_sock) {
2739 ctx->ring_sock->file = NULL; /* so that iput() is called */
2740 sock_release(ctx->ring_sock);
2741 }
2742#endif
2743 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2744
2745 if (ctx->mm_account) {
2746 mmdrop(ctx->mm_account);
2747 ctx->mm_account = NULL;
2748 }
2749 io_mem_free(ctx->rings);
2750 io_mem_free(ctx->sq_sqes);
2751
2752 percpu_ref_exit(&ctx->refs);
2753 free_uid(ctx->user);
2754 io_req_caches_free(ctx);
2755 if (ctx->hash_map)
2756 io_wq_put_hash(ctx->hash_map);
2757 kfree(ctx->cancel_table.hbs);
2758 kfree(ctx->cancel_table_locked.hbs);
2759 kfree(ctx->dummy_ubuf);
2760 kfree(ctx->io_bl);
2761 xa_destroy(&ctx->io_bl_xa);
2762 kfree(ctx);
2763}
2764
2765static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2766{
2767 struct io_ring_ctx *ctx = file->private_data;
2768 __poll_t mask = 0;
2769
2770 poll_wait(file, &ctx->cq_wait, wait);
2771 /*
2772 * synchronizes with barrier from wq_has_sleeper call in
2773 * io_commit_cqring
2774 */
2775 smp_rmb();
2776 if (!io_sqring_full(ctx))
2777 mask |= EPOLLOUT | EPOLLWRNORM;
2778
2779 /*
2780 * Don't flush cqring overflow list here, just do a simple check.
2781 * Otherwise there could possible be ABBA deadlock:
2782 * CPU0 CPU1
2783 * ---- ----
2784 * lock(&ctx->uring_lock);
2785 * lock(&ep->mtx);
2786 * lock(&ctx->uring_lock);
2787 * lock(&ep->mtx);
2788 *
2789 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2790 * pushes them to do the flush.
2791 */
2792
2793 if (io_cqring_events(ctx) || io_has_work(ctx))
2794 mask |= EPOLLIN | EPOLLRDNORM;
2795
2796 return mask;
2797}
2798
2799static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
2800{
2801 const struct cred *creds;
2802
2803 creds = xa_erase(&ctx->personalities, id);
2804 if (creds) {
2805 put_cred(creds);
2806 return 0;
2807 }
2808
2809 return -EINVAL;
2810}
2811
2812struct io_tctx_exit {
2813 struct callback_head task_work;
2814 struct completion completion;
2815 struct io_ring_ctx *ctx;
2816};
2817
2818static __cold void io_tctx_exit_cb(struct callback_head *cb)
2819{
2820 struct io_uring_task *tctx = current->io_uring;
2821 struct io_tctx_exit *work;
2822
2823 work = container_of(cb, struct io_tctx_exit, task_work);
2824 /*
2825 * When @in_idle, we're in cancellation and it's racy to remove the
2826 * node. It'll be removed by the end of cancellation, just ignore it.
2827 * tctx can be NULL if the queueing of this task_work raced with
2828 * work cancelation off the exec path.
2829 */
2830 if (tctx && !atomic_read(&tctx->in_idle))
2831 io_uring_del_tctx_node((unsigned long)work->ctx);
2832 complete(&work->completion);
2833}
2834
2835static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2836{
2837 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2838
2839 return req->ctx == data;
2840}
2841
2842static __cold void io_ring_exit_work(struct work_struct *work)
2843{
2844 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2845 unsigned long timeout = jiffies + HZ * 60 * 5;
2846 unsigned long interval = HZ / 20;
2847 struct io_tctx_exit exit;
2848 struct io_tctx_node *node;
2849 int ret;
2850
2851 /*
2852 * If we're doing polled IO and end up having requests being
2853 * submitted async (out-of-line), then completions can come in while
2854 * we're waiting for refs to drop. We need to reap these manually,
2855 * as nobody else will be looking for them.
2856 */
2857 do {
2858 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2859 mutex_lock(&ctx->uring_lock);
2860 io_cqring_overflow_kill(ctx);
2861 mutex_unlock(&ctx->uring_lock);
2862 }
2863
2864 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2865 io_move_task_work_from_local(ctx);
2866
2867 while (io_uring_try_cancel_requests(ctx, NULL, true))
2868 cond_resched();
2869
2870 if (ctx->sq_data) {
2871 struct io_sq_data *sqd = ctx->sq_data;
2872 struct task_struct *tsk;
2873
2874 io_sq_thread_park(sqd);
2875 tsk = sqd->thread;
2876 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2877 io_wq_cancel_cb(tsk->io_uring->io_wq,
2878 io_cancel_ctx_cb, ctx, true);
2879 io_sq_thread_unpark(sqd);
2880 }
2881
2882 io_req_caches_free(ctx);
2883
2884 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2885 /* there is little hope left, don't run it too often */
2886 interval = HZ * 60;
2887 }
2888 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
2889
2890 init_completion(&exit.completion);
2891 init_task_work(&exit.task_work, io_tctx_exit_cb);
2892 exit.ctx = ctx;
2893 /*
2894 * Some may use context even when all refs and requests have been put,
2895 * and they are free to do so while still holding uring_lock or
2896 * completion_lock, see io_req_task_submit(). Apart from other work,
2897 * this lock/unlock section also waits them to finish.
2898 */
2899 mutex_lock(&ctx->uring_lock);
2900 while (!list_empty(&ctx->tctx_list)) {
2901 WARN_ON_ONCE(time_after(jiffies, timeout));
2902
2903 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2904 ctx_node);
2905 /* don't spin on a single task if cancellation failed */
2906 list_rotate_left(&ctx->tctx_list);
2907 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2908 if (WARN_ON_ONCE(ret))
2909 continue;
2910
2911 mutex_unlock(&ctx->uring_lock);
2912 wait_for_completion(&exit.completion);
2913 mutex_lock(&ctx->uring_lock);
2914 }
2915 mutex_unlock(&ctx->uring_lock);
2916 spin_lock(&ctx->completion_lock);
2917 spin_unlock(&ctx->completion_lock);
2918
2919 io_ring_ctx_free(ctx);
2920}
2921
2922static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2923{
2924 unsigned long index;
2925 struct creds *creds;
2926
2927 mutex_lock(&ctx->uring_lock);
2928 percpu_ref_kill(&ctx->refs);
2929 xa_for_each(&ctx->personalities, index, creds)
2930 io_unregister_personality(ctx, index);
2931 if (ctx->rings)
2932 io_poll_remove_all(ctx, NULL, true);
2933 mutex_unlock(&ctx->uring_lock);
2934
2935 /*
2936 * If we failed setting up the ctx, we might not have any rings
2937 * and therefore did not submit any requests
2938 */
2939 if (ctx->rings)
2940 io_kill_timeouts(ctx, NULL, true);
2941
2942 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2943 /*
2944 * Use system_unbound_wq to avoid spawning tons of event kworkers
2945 * if we're exiting a ton of rings at the same time. It just adds
2946 * noise and overhead, there's no discernable change in runtime
2947 * over using system_wq.
2948 */
2949 queue_work(system_unbound_wq, &ctx->exit_work);
2950}
2951
2952static int io_uring_release(struct inode *inode, struct file *file)
2953{
2954 struct io_ring_ctx *ctx = file->private_data;
2955
2956 file->private_data = NULL;
2957 io_ring_ctx_wait_and_kill(ctx);
2958 return 0;
2959}
2960
2961struct io_task_cancel {
2962 struct task_struct *task;
2963 bool all;
2964};
2965
2966static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
2967{
2968 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2969 struct io_task_cancel *cancel = data;
2970
2971 return io_match_task_safe(req, cancel->task, cancel->all);
2972}
2973
2974static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
2975 struct task_struct *task,
2976 bool cancel_all)
2977{
2978 struct io_defer_entry *de;
2979 LIST_HEAD(list);
2980
2981 spin_lock(&ctx->completion_lock);
2982 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
2983 if (io_match_task_safe(de->req, task, cancel_all)) {
2984 list_cut_position(&list, &ctx->defer_list, &de->list);
2985 break;
2986 }
2987 }
2988 spin_unlock(&ctx->completion_lock);
2989 if (list_empty(&list))
2990 return false;
2991
2992 while (!list_empty(&list)) {
2993 de = list_first_entry(&list, struct io_defer_entry, list);
2994 list_del_init(&de->list);
2995 io_req_task_queue_fail(de->req, -ECANCELED);
2996 kfree(de);
2997 }
2998 return true;
2999}
3000
3001static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3002{
3003 struct io_tctx_node *node;
3004 enum io_wq_cancel cret;
3005 bool ret = false;
3006
3007 mutex_lock(&ctx->uring_lock);
3008 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3009 struct io_uring_task *tctx = node->task->io_uring;
3010
3011 /*
3012 * io_wq will stay alive while we hold uring_lock, because it's
3013 * killed after ctx nodes, which requires to take the lock.
3014 */
3015 if (!tctx || !tctx->io_wq)
3016 continue;
3017 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3018 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3019 }
3020 mutex_unlock(&ctx->uring_lock);
3021
3022 return ret;
3023}
3024
3025static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3026 struct task_struct *task,
3027 bool cancel_all)
3028{
3029 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3030 struct io_uring_task *tctx = task ? task->io_uring : NULL;
3031 enum io_wq_cancel cret;
3032 bool ret = false;
3033
3034 /* failed during ring init, it couldn't have issued any requests */
3035 if (!ctx->rings)
3036 return false;
3037
3038 if (!task) {
3039 ret |= io_uring_try_cancel_iowq(ctx);
3040 } else if (tctx && tctx->io_wq) {
3041 /*
3042 * Cancels requests of all rings, not only @ctx, but
3043 * it's fine as the task is in exit/exec.
3044 */
3045 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3046 &cancel, true);
3047 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3048 }
3049
3050 /* SQPOLL thread does its own polling */
3051 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3052 (ctx->sq_data && ctx->sq_data->thread == current)) {
3053 while (!wq_list_empty(&ctx->iopoll_list)) {
3054 io_iopoll_try_reap_events(ctx);
3055 ret = true;
3056 }
3057 }
3058
3059 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3060 ret |= io_run_local_work(ctx) > 0;
3061 ret |= io_cancel_defer_files(ctx, task, cancel_all);
3062 mutex_lock(&ctx->uring_lock);
3063 ret |= io_poll_remove_all(ctx, task, cancel_all);
3064 mutex_unlock(&ctx->uring_lock);
3065 ret |= io_kill_timeouts(ctx, task, cancel_all);
3066 if (task)
3067 ret |= io_run_task_work() > 0;
3068 return ret;
3069}
3070
3071static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3072{
3073 if (tracked)
3074 return atomic_read(&tctx->inflight_tracked);
3075 return percpu_counter_sum(&tctx->inflight);
3076}
3077
3078/*
3079 * Find any io_uring ctx that this task has registered or done IO on, and cancel
3080 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3081 */
3082__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3083{
3084 struct io_uring_task *tctx = current->io_uring;
3085 struct io_ring_ctx *ctx;
3086 s64 inflight;
3087 DEFINE_WAIT(wait);
3088
3089 WARN_ON_ONCE(sqd && sqd->thread != current);
3090
3091 if (!current->io_uring)
3092 return;
3093 if (tctx->io_wq)
3094 io_wq_exit_start(tctx->io_wq);
3095
3096 atomic_inc(&tctx->in_idle);
3097 do {
3098 bool loop = false;
3099
3100 io_uring_drop_tctx_refs(current);
3101 /* read completions before cancelations */
3102 inflight = tctx_inflight(tctx, !cancel_all);
3103 if (!inflight)
3104 break;
3105
3106 if (!sqd) {
3107 struct io_tctx_node *node;
3108 unsigned long index;
3109
3110 xa_for_each(&tctx->xa, index, node) {
3111 /* sqpoll task will cancel all its requests */
3112 if (node->ctx->sq_data)
3113 continue;
3114 loop |= io_uring_try_cancel_requests(node->ctx,
3115 current, cancel_all);
3116 }
3117 } else {
3118 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3119 loop |= io_uring_try_cancel_requests(ctx,
3120 current,
3121 cancel_all);
3122 }
3123
3124 if (loop) {
3125 cond_resched();
3126 continue;
3127 }
3128
3129 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3130 io_run_task_work();
3131 io_uring_drop_tctx_refs(current);
3132
3133 /*
3134 * If we've seen completions, retry without waiting. This
3135 * avoids a race where a completion comes in before we did
3136 * prepare_to_wait().
3137 */
3138 if (inflight == tctx_inflight(tctx, !cancel_all))
3139 schedule();
3140 finish_wait(&tctx->wait, &wait);
3141 } while (1);
3142
3143 io_uring_clean_tctx(tctx);
3144 if (cancel_all) {
3145 /*
3146 * We shouldn't run task_works after cancel, so just leave
3147 * ->in_idle set for normal exit.
3148 */
3149 atomic_dec(&tctx->in_idle);
3150 /* for exec all current's requests should be gone, kill tctx */
3151 __io_uring_free(current);
3152 }
3153}
3154
3155void __io_uring_cancel(bool cancel_all)
3156{
3157 io_uring_cancel_generic(cancel_all, NULL);
3158}
3159
3160static void *io_uring_validate_mmap_request(struct file *file,
3161 loff_t pgoff, size_t sz)
3162{
3163 struct io_ring_ctx *ctx = file->private_data;
3164 loff_t offset = pgoff << PAGE_SHIFT;
3165 struct page *page;
3166 void *ptr;
3167
3168 switch (offset) {
3169 case IORING_OFF_SQ_RING:
3170 case IORING_OFF_CQ_RING:
3171 ptr = ctx->rings;
3172 break;
3173 case IORING_OFF_SQES:
3174 ptr = ctx->sq_sqes;
3175 break;
3176 default:
3177 return ERR_PTR(-EINVAL);
3178 }
3179
3180 page = virt_to_head_page(ptr);
3181 if (sz > page_size(page))
3182 return ERR_PTR(-EINVAL);
3183
3184 return ptr;
3185}
3186
3187#ifdef CONFIG_MMU
3188
3189static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3190{
3191 size_t sz = vma->vm_end - vma->vm_start;
3192 unsigned long pfn;
3193 void *ptr;
3194
3195 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
3196 if (IS_ERR(ptr))
3197 return PTR_ERR(ptr);
3198
3199 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3200 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3201}
3202
3203#else /* !CONFIG_MMU */
3204
3205static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3206{
3207 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
3208}
3209
3210static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3211{
3212 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
3213}
3214
3215static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
3216 unsigned long addr, unsigned long len,
3217 unsigned long pgoff, unsigned long flags)
3218{
3219 void *ptr;
3220
3221 ptr = io_uring_validate_mmap_request(file, pgoff, len);
3222 if (IS_ERR(ptr))
3223 return PTR_ERR(ptr);
3224
3225 return (unsigned long) ptr;
3226}
3227
3228#endif /* !CONFIG_MMU */
3229
3230static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3231{
3232 if (flags & IORING_ENTER_EXT_ARG) {
3233 struct io_uring_getevents_arg arg;
3234
3235 if (argsz != sizeof(arg))
3236 return -EINVAL;
3237 if (copy_from_user(&arg, argp, sizeof(arg)))
3238 return -EFAULT;
3239 }
3240 return 0;
3241}
3242
3243static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3244 struct __kernel_timespec __user **ts,
3245 const sigset_t __user **sig)
3246{
3247 struct io_uring_getevents_arg arg;
3248
3249 /*
3250 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3251 * is just a pointer to the sigset_t.
3252 */
3253 if (!(flags & IORING_ENTER_EXT_ARG)) {
3254 *sig = (const sigset_t __user *) argp;
3255 *ts = NULL;
3256 return 0;
3257 }
3258
3259 /*
3260 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3261 * timespec and sigset_t pointers if good.
3262 */
3263 if (*argsz != sizeof(arg))
3264 return -EINVAL;
3265 if (copy_from_user(&arg, argp, sizeof(arg)))
3266 return -EFAULT;
3267 if (arg.pad)
3268 return -EINVAL;
3269 *sig = u64_to_user_ptr(arg.sigmask);
3270 *argsz = arg.sigmask_sz;
3271 *ts = u64_to_user_ptr(arg.ts);
3272 return 0;
3273}
3274
3275SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3276 u32, min_complete, u32, flags, const void __user *, argp,
3277 size_t, argsz)
3278{
3279 struct io_ring_ctx *ctx;
3280 struct fd f;
3281 long ret;
3282
3283 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3284 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3285 IORING_ENTER_REGISTERED_RING)))
3286 return -EINVAL;
3287
3288 /*
3289 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3290 * need only dereference our task private array to find it.
3291 */
3292 if (flags & IORING_ENTER_REGISTERED_RING) {
3293 struct io_uring_task *tctx = current->io_uring;
3294
3295 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3296 return -EINVAL;
3297 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3298 f.file = tctx->registered_rings[fd];
3299 f.flags = 0;
3300 if (unlikely(!f.file))
3301 return -EBADF;
3302 } else {
3303 f = fdget(fd);
3304 if (unlikely(!f.file))
3305 return -EBADF;
3306 ret = -EOPNOTSUPP;
3307 if (unlikely(!io_is_uring_fops(f.file)))
3308 goto out;
3309 }
3310
3311 ctx = f.file->private_data;
3312 ret = -EBADFD;
3313 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3314 goto out;
3315
3316 /*
3317 * For SQ polling, the thread will do all submissions and completions.
3318 * Just return the requested submit count, and wake the thread if
3319 * we were asked to.
3320 */
3321 ret = 0;
3322 if (ctx->flags & IORING_SETUP_SQPOLL) {
3323 io_cqring_overflow_flush(ctx);
3324
3325 if (unlikely(ctx->sq_data->thread == NULL)) {
3326 ret = -EOWNERDEAD;
3327 goto out;
3328 }
3329 if (flags & IORING_ENTER_SQ_WAKEUP)
3330 wake_up(&ctx->sq_data->wait);
3331 if (flags & IORING_ENTER_SQ_WAIT) {
3332 ret = io_sqpoll_wait_sq(ctx);
3333 if (ret)
3334 goto out;
3335 }
3336 ret = to_submit;
3337 } else if (to_submit) {
3338 ret = io_uring_add_tctx_node(ctx);
3339 if (unlikely(ret))
3340 goto out;
3341
3342 mutex_lock(&ctx->uring_lock);
3343 ret = io_submit_sqes(ctx, to_submit);
3344 if (ret != to_submit) {
3345 mutex_unlock(&ctx->uring_lock);
3346 goto out;
3347 }
3348 if (flags & IORING_ENTER_GETEVENTS) {
3349 if (ctx->syscall_iopoll)
3350 goto iopoll_locked;
3351 /*
3352 * Ignore errors, we'll soon call io_cqring_wait() and
3353 * it should handle ownership problems if any.
3354 */
3355 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3356 (void)io_run_local_work_locked(ctx);
3357 }
3358 mutex_unlock(&ctx->uring_lock);
3359 }
3360
3361 if (flags & IORING_ENTER_GETEVENTS) {
3362 int ret2;
3363
3364 if (ctx->syscall_iopoll) {
3365 /*
3366 * We disallow the app entering submit/complete with
3367 * polling, but we still need to lock the ring to
3368 * prevent racing with polled issue that got punted to
3369 * a workqueue.
3370 */
3371 mutex_lock(&ctx->uring_lock);
3372iopoll_locked:
3373 ret2 = io_validate_ext_arg(flags, argp, argsz);
3374 if (likely(!ret2)) {
3375 min_complete = min(min_complete,
3376 ctx->cq_entries);
3377 ret2 = io_iopoll_check(ctx, min_complete);
3378 }
3379 mutex_unlock(&ctx->uring_lock);
3380 } else {
3381 const sigset_t __user *sig;
3382 struct __kernel_timespec __user *ts;
3383
3384 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3385 if (likely(!ret2)) {
3386 min_complete = min(min_complete,
3387 ctx->cq_entries);
3388 ret2 = io_cqring_wait(ctx, min_complete, sig,
3389 argsz, ts);
3390 }
3391 }
3392
3393 if (!ret) {
3394 ret = ret2;
3395
3396 /*
3397 * EBADR indicates that one or more CQE were dropped.
3398 * Once the user has been informed we can clear the bit
3399 * as they are obviously ok with those drops.
3400 */
3401 if (unlikely(ret2 == -EBADR))
3402 clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3403 &ctx->check_cq);
3404 }
3405 }
3406out:
3407 fdput(f);
3408 return ret;
3409}
3410
3411static const struct file_operations io_uring_fops = {
3412 .release = io_uring_release,
3413 .mmap = io_uring_mmap,
3414#ifndef CONFIG_MMU
3415 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
3416 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
3417#endif
3418 .poll = io_uring_poll,
3419#ifdef CONFIG_PROC_FS
3420 .show_fdinfo = io_uring_show_fdinfo,
3421#endif
3422};
3423
3424bool io_is_uring_fops(struct file *file)
3425{
3426 return file->f_op == &io_uring_fops;
3427}
3428
3429static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3430 struct io_uring_params *p)
3431{
3432 struct io_rings *rings;
3433 size_t size, sq_array_offset;
3434
3435 /* make sure these are sane, as we already accounted them */
3436 ctx->sq_entries = p->sq_entries;
3437 ctx->cq_entries = p->cq_entries;
3438
3439 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3440 if (size == SIZE_MAX)
3441 return -EOVERFLOW;
3442
3443 rings = io_mem_alloc(size);
3444 if (!rings)
3445 return -ENOMEM;
3446
3447 ctx->rings = rings;
3448 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3449 rings->sq_ring_mask = p->sq_entries - 1;
3450 rings->cq_ring_mask = p->cq_entries - 1;
3451 rings->sq_ring_entries = p->sq_entries;
3452 rings->cq_ring_entries = p->cq_entries;
3453
3454 if (p->flags & IORING_SETUP_SQE128)
3455 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3456 else
3457 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3458 if (size == SIZE_MAX) {
3459 io_mem_free(ctx->rings);
3460 ctx->rings = NULL;
3461 return -EOVERFLOW;
3462 }
3463
3464 ctx->sq_sqes = io_mem_alloc(size);
3465 if (!ctx->sq_sqes) {
3466 io_mem_free(ctx->rings);
3467 ctx->rings = NULL;
3468 return -ENOMEM;
3469 }
3470
3471 return 0;
3472}
3473
3474static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
3475{
3476 int ret, fd;
3477
3478 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3479 if (fd < 0)
3480 return fd;
3481
3482 ret = __io_uring_add_tctx_node(ctx);
3483 if (ret) {
3484 put_unused_fd(fd);
3485 return ret;
3486 }
3487 fd_install(fd, file);
3488 return fd;
3489}
3490
3491/*
3492 * Allocate an anonymous fd, this is what constitutes the application
3493 * visible backing of an io_uring instance. The application mmaps this
3494 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3495 * we have to tie this fd to a socket for file garbage collection purposes.
3496 */
3497static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3498{
3499 struct file *file;
3500#if defined(CONFIG_UNIX)
3501 int ret;
3502
3503 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3504 &ctx->ring_sock);
3505 if (ret)
3506 return ERR_PTR(ret);
3507#endif
3508
3509 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
3510 O_RDWR | O_CLOEXEC, NULL);
3511#if defined(CONFIG_UNIX)
3512 if (IS_ERR(file)) {
3513 sock_release(ctx->ring_sock);
3514 ctx->ring_sock = NULL;
3515 } else {
3516 ctx->ring_sock->file = file;
3517 }
3518#endif
3519 return file;
3520}
3521
3522static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3523 struct io_uring_params __user *params)
3524{
3525 struct io_ring_ctx *ctx;
3526 struct file *file;
3527 int ret;
3528
3529 if (!entries)
3530 return -EINVAL;
3531 if (entries > IORING_MAX_ENTRIES) {
3532 if (!(p->flags & IORING_SETUP_CLAMP))
3533 return -EINVAL;
3534 entries = IORING_MAX_ENTRIES;
3535 }
3536
3537 /*
3538 * Use twice as many entries for the CQ ring. It's possible for the
3539 * application to drive a higher depth than the size of the SQ ring,
3540 * since the sqes are only used at submission time. This allows for
3541 * some flexibility in overcommitting a bit. If the application has
3542 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3543 * of CQ ring entries manually.
3544 */
3545 p->sq_entries = roundup_pow_of_two(entries);
3546 if (p->flags & IORING_SETUP_CQSIZE) {
3547 /*
3548 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3549 * to a power-of-two, if it isn't already. We do NOT impose
3550 * any cq vs sq ring sizing.
3551 */
3552 if (!p->cq_entries)
3553 return -EINVAL;
3554 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3555 if (!(p->flags & IORING_SETUP_CLAMP))
3556 return -EINVAL;
3557 p->cq_entries = IORING_MAX_CQ_ENTRIES;
3558 }
3559 p->cq_entries = roundup_pow_of_two(p->cq_entries);
3560 if (p->cq_entries < p->sq_entries)
3561 return -EINVAL;
3562 } else {
3563 p->cq_entries = 2 * p->sq_entries;
3564 }
3565
3566 ctx = io_ring_ctx_alloc(p);
3567 if (!ctx)
3568 return -ENOMEM;
3569
3570 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3571 !(ctx->flags & IORING_SETUP_IOPOLL) &&
3572 !(ctx->flags & IORING_SETUP_SQPOLL))
3573 ctx->task_complete = true;
3574
3575 /*
3576 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3577 * space applications don't need to do io completion events
3578 * polling again, they can rely on io_sq_thread to do polling
3579 * work, which can reduce cpu usage and uring_lock contention.
3580 */
3581 if (ctx->flags & IORING_SETUP_IOPOLL &&
3582 !(ctx->flags & IORING_SETUP_SQPOLL))
3583 ctx->syscall_iopoll = 1;
3584
3585 ctx->compat = in_compat_syscall();
3586 if (!capable(CAP_IPC_LOCK))
3587 ctx->user = get_uid(current_user());
3588
3589 /*
3590 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3591 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3592 */
3593 ret = -EINVAL;
3594 if (ctx->flags & IORING_SETUP_SQPOLL) {
3595 /* IPI related flags don't make sense with SQPOLL */
3596 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3597 IORING_SETUP_TASKRUN_FLAG |
3598 IORING_SETUP_DEFER_TASKRUN))
3599 goto err;
3600 ctx->notify_method = TWA_SIGNAL_NO_IPI;
3601 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3602 ctx->notify_method = TWA_SIGNAL_NO_IPI;
3603 } else {
3604 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3605 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3606 goto err;
3607 ctx->notify_method = TWA_SIGNAL;
3608 }
3609
3610 /*
3611 * For DEFER_TASKRUN we require the completion task to be the same as the
3612 * submission task. This implies that there is only one submitter, so enforce
3613 * that.
3614 */
3615 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3616 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3617 goto err;
3618 }
3619
3620 /*
3621 * This is just grabbed for accounting purposes. When a process exits,
3622 * the mm is exited and dropped before the files, hence we need to hang
3623 * on to this mm purely for the purposes of being able to unaccount
3624 * memory (locked/pinned vm). It's not used for anything else.
3625 */
3626 mmgrab(current->mm);
3627 ctx->mm_account = current->mm;
3628
3629 ret = io_allocate_scq_urings(ctx, p);
3630 if (ret)
3631 goto err;
3632
3633 ret = io_sq_offload_create(ctx, p);
3634 if (ret)
3635 goto err;
3636 /* always set a rsrc node */
3637 ret = io_rsrc_node_switch_start(ctx);
3638 if (ret)
3639 goto err;
3640 io_rsrc_node_switch(ctx, NULL);
3641
3642 memset(&p->sq_off, 0, sizeof(p->sq_off));
3643 p->sq_off.head = offsetof(struct io_rings, sq.head);
3644 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3645 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3646 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3647 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3648 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3649 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3650
3651 memset(&p->cq_off, 0, sizeof(p->cq_off));
3652 p->cq_off.head = offsetof(struct io_rings, cq.head);
3653 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3654 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3655 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3656 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3657 p->cq_off.cqes = offsetof(struct io_rings, cqes);
3658 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3659
3660 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3661 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3662 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3663 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3664 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3665 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3666 IORING_FEAT_LINKED_FILE;
3667
3668 if (copy_to_user(params, p, sizeof(*p))) {
3669 ret = -EFAULT;
3670 goto err;
3671 }
3672
3673 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3674 && !(ctx->flags & IORING_SETUP_R_DISABLED))
3675 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
3676
3677 file = io_uring_get_file(ctx);
3678 if (IS_ERR(file)) {
3679 ret = PTR_ERR(file);
3680 goto err;
3681 }
3682
3683 /*
3684 * Install ring fd as the very last thing, so we don't risk someone
3685 * having closed it before we finish setup
3686 */
3687 ret = io_uring_install_fd(ctx, file);
3688 if (ret < 0) {
3689 /* fput will clean it up */
3690 fput(file);
3691 return ret;
3692 }
3693
3694 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3695 return ret;
3696err:
3697 io_ring_ctx_wait_and_kill(ctx);
3698 return ret;
3699}
3700
3701/*
3702 * Sets up an aio uring context, and returns the fd. Applications asks for a
3703 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3704 * params structure passed in.
3705 */
3706static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3707{
3708 struct io_uring_params p;
3709 int i;
3710
3711 if (copy_from_user(&p, params, sizeof(p)))
3712 return -EFAULT;
3713 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3714 if (p.resv[i])
3715 return -EINVAL;
3716 }
3717
3718 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3719 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3720 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3721 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3722 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3723 IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3724 IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
3725 return -EINVAL;
3726
3727 return io_uring_create(entries, &p, params);
3728}
3729
3730SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3731 struct io_uring_params __user *, params)
3732{
3733 return io_uring_setup(entries, params);
3734}
3735
3736static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
3737 unsigned nr_args)
3738{
3739 struct io_uring_probe *p;
3740 size_t size;
3741 int i, ret;
3742
3743 size = struct_size(p, ops, nr_args);
3744 if (size == SIZE_MAX)
3745 return -EOVERFLOW;
3746 p = kzalloc(size, GFP_KERNEL);
3747 if (!p)
3748 return -ENOMEM;
3749
3750 ret = -EFAULT;
3751 if (copy_from_user(p, arg, size))
3752 goto out;
3753 ret = -EINVAL;
3754 if (memchr_inv(p, 0, size))
3755 goto out;
3756
3757 p->last_op = IORING_OP_LAST - 1;
3758 if (nr_args > IORING_OP_LAST)
3759 nr_args = IORING_OP_LAST;
3760
3761 for (i = 0; i < nr_args; i++) {
3762 p->ops[i].op = i;
3763 if (!io_op_defs[i].not_supported)
3764 p->ops[i].flags = IO_URING_OP_SUPPORTED;
3765 }
3766 p->ops_len = i;
3767
3768 ret = 0;
3769 if (copy_to_user(arg, p, size))
3770 ret = -EFAULT;
3771out:
3772 kfree(p);
3773 return ret;
3774}
3775
3776static int io_register_personality(struct io_ring_ctx *ctx)
3777{
3778 const struct cred *creds;
3779 u32 id;
3780 int ret;
3781
3782 creds = get_current_cred();
3783
3784 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
3785 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
3786 if (ret < 0) {
3787 put_cred(creds);
3788 return ret;
3789 }
3790 return id;
3791}
3792
3793static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
3794 void __user *arg, unsigned int nr_args)
3795{
3796 struct io_uring_restriction *res;
3797 size_t size;
3798 int i, ret;
3799
3800 /* Restrictions allowed only if rings started disabled */
3801 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3802 return -EBADFD;
3803
3804 /* We allow only a single restrictions registration */
3805 if (ctx->restrictions.registered)
3806 return -EBUSY;
3807
3808 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
3809 return -EINVAL;
3810
3811 size = array_size(nr_args, sizeof(*res));
3812 if (size == SIZE_MAX)
3813 return -EOVERFLOW;
3814
3815 res = memdup_user(arg, size);
3816 if (IS_ERR(res))
3817 return PTR_ERR(res);
3818
3819 ret = 0;
3820
3821 for (i = 0; i < nr_args; i++) {
3822 switch (res[i].opcode) {
3823 case IORING_RESTRICTION_REGISTER_OP:
3824 if (res[i].register_op >= IORING_REGISTER_LAST) {
3825 ret = -EINVAL;
3826 goto out;
3827 }
3828
3829 __set_bit(res[i].register_op,
3830 ctx->restrictions.register_op);
3831 break;
3832 case IORING_RESTRICTION_SQE_OP:
3833 if (res[i].sqe_op >= IORING_OP_LAST) {
3834 ret = -EINVAL;
3835 goto out;
3836 }
3837
3838 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
3839 break;
3840 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
3841 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
3842 break;
3843 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
3844 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
3845 break;
3846 default:
3847 ret = -EINVAL;
3848 goto out;
3849 }
3850 }
3851
3852out:
3853 /* Reset all restrictions if an error happened */
3854 if (ret != 0)
3855 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
3856 else
3857 ctx->restrictions.registered = true;
3858
3859 kfree(res);
3860 return ret;
3861}
3862
3863static int io_register_enable_rings(struct io_ring_ctx *ctx)
3864{
3865 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3866 return -EBADFD;
3867
3868 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
3869 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
3870
3871 if (ctx->restrictions.registered)
3872 ctx->restricted = 1;
3873
3874 ctx->flags &= ~IORING_SETUP_R_DISABLED;
3875 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
3876 wake_up(&ctx->sq_data->wait);
3877 return 0;
3878}
3879
3880static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
3881 void __user *arg, unsigned len)
3882{
3883 struct io_uring_task *tctx = current->io_uring;
3884 cpumask_var_t new_mask;
3885 int ret;
3886
3887 if (!tctx || !tctx->io_wq)
3888 return -EINVAL;
3889
3890 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3891 return -ENOMEM;
3892
3893 cpumask_clear(new_mask);
3894 if (len > cpumask_size())
3895 len = cpumask_size();
3896
3897 if (in_compat_syscall()) {
3898 ret = compat_get_bitmap(cpumask_bits(new_mask),
3899 (const compat_ulong_t __user *)arg,
3900 len * 8 /* CHAR_BIT */);
3901 } else {
3902 ret = copy_from_user(new_mask, arg, len);
3903 }
3904
3905 if (ret) {
3906 free_cpumask_var(new_mask);
3907 return -EFAULT;
3908 }
3909
3910 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
3911 free_cpumask_var(new_mask);
3912 return ret;
3913}
3914
3915static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
3916{
3917 struct io_uring_task *tctx = current->io_uring;
3918
3919 if (!tctx || !tctx->io_wq)
3920 return -EINVAL;
3921
3922 return io_wq_cpu_affinity(tctx->io_wq, NULL);
3923}
3924
3925static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
3926 void __user *arg)
3927 __must_hold(&ctx->uring_lock)
3928{
3929 struct io_tctx_node *node;
3930 struct io_uring_task *tctx = NULL;
3931 struct io_sq_data *sqd = NULL;
3932 __u32 new_count[2];
3933 int i, ret;
3934
3935 if (copy_from_user(new_count, arg, sizeof(new_count)))
3936 return -EFAULT;
3937 for (i = 0; i < ARRAY_SIZE(new_count); i++)
3938 if (new_count[i] > INT_MAX)
3939 return -EINVAL;
3940
3941 if (ctx->flags & IORING_SETUP_SQPOLL) {
3942 sqd = ctx->sq_data;
3943 if (sqd) {
3944 /*
3945 * Observe the correct sqd->lock -> ctx->uring_lock
3946 * ordering. Fine to drop uring_lock here, we hold
3947 * a ref to the ctx.
3948 */
3949 refcount_inc(&sqd->refs);
3950 mutex_unlock(&ctx->uring_lock);
3951 mutex_lock(&sqd->lock);
3952 mutex_lock(&ctx->uring_lock);
3953 if (sqd->thread)
3954 tctx = sqd->thread->io_uring;
3955 }
3956 } else {
3957 tctx = current->io_uring;
3958 }
3959
3960 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
3961
3962 for (i = 0; i < ARRAY_SIZE(new_count); i++)
3963 if (new_count[i])
3964 ctx->iowq_limits[i] = new_count[i];
3965 ctx->iowq_limits_set = true;
3966
3967 if (tctx && tctx->io_wq) {
3968 ret = io_wq_max_workers(tctx->io_wq, new_count);
3969 if (ret)
3970 goto err;
3971 } else {
3972 memset(new_count, 0, sizeof(new_count));
3973 }
3974
3975 if (sqd) {
3976 mutex_unlock(&sqd->lock);
3977 io_put_sq_data(sqd);
3978 }
3979
3980 if (copy_to_user(arg, new_count, sizeof(new_count)))
3981 return -EFAULT;
3982
3983 /* that's it for SQPOLL, only the SQPOLL task creates requests */
3984 if (sqd)
3985 return 0;
3986
3987 /* now propagate the restriction to all registered users */
3988 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3989 struct io_uring_task *tctx = node->task->io_uring;
3990
3991 if (WARN_ON_ONCE(!tctx->io_wq))
3992 continue;
3993
3994 for (i = 0; i < ARRAY_SIZE(new_count); i++)
3995 new_count[i] = ctx->iowq_limits[i];
3996 /* ignore errors, it always returns zero anyway */
3997 (void)io_wq_max_workers(tctx->io_wq, new_count);
3998 }
3999 return 0;
4000err:
4001 if (sqd) {
4002 mutex_unlock(&sqd->lock);
4003 io_put_sq_data(sqd);
4004 }
4005 return ret;
4006}
4007
4008static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4009 void __user *arg, unsigned nr_args)
4010 __releases(ctx->uring_lock)
4011 __acquires(ctx->uring_lock)
4012{
4013 int ret;
4014
4015 /*
4016 * We don't quiesce the refs for register anymore and so it can't be
4017 * dying as we're holding a file ref here.
4018 */
4019 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
4020 return -ENXIO;
4021
4022 if (ctx->submitter_task && ctx->submitter_task != current)
4023 return -EEXIST;
4024
4025 if (ctx->restricted) {
4026 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
4027 if (!test_bit(opcode, ctx->restrictions.register_op))
4028 return -EACCES;
4029 }
4030
4031 switch (opcode) {
4032 case IORING_REGISTER_BUFFERS:
4033 ret = -EFAULT;
4034 if (!arg)
4035 break;
4036 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
4037 break;
4038 case IORING_UNREGISTER_BUFFERS:
4039 ret = -EINVAL;
4040 if (arg || nr_args)
4041 break;
4042 ret = io_sqe_buffers_unregister(ctx);
4043 break;
4044 case IORING_REGISTER_FILES:
4045 ret = -EFAULT;
4046 if (!arg)
4047 break;
4048 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
4049 break;
4050 case IORING_UNREGISTER_FILES:
4051 ret = -EINVAL;
4052 if (arg || nr_args)
4053 break;
4054 ret = io_sqe_files_unregister(ctx);
4055 break;
4056 case IORING_REGISTER_FILES_UPDATE:
4057 ret = io_register_files_update(ctx, arg, nr_args);
4058 break;
4059 case IORING_REGISTER_EVENTFD:
4060 ret = -EINVAL;
4061 if (nr_args != 1)
4062 break;
4063 ret = io_eventfd_register(ctx, arg, 0);
4064 break;
4065 case IORING_REGISTER_EVENTFD_ASYNC:
4066 ret = -EINVAL;
4067 if (nr_args != 1)
4068 break;
4069 ret = io_eventfd_register(ctx, arg, 1);
4070 break;
4071 case IORING_UNREGISTER_EVENTFD:
4072 ret = -EINVAL;
4073 if (arg || nr_args)
4074 break;
4075 ret = io_eventfd_unregister(ctx);
4076 break;
4077 case IORING_REGISTER_PROBE:
4078 ret = -EINVAL;
4079 if (!arg || nr_args > 256)
4080 break;
4081 ret = io_probe(ctx, arg, nr_args);
4082 break;
4083 case IORING_REGISTER_PERSONALITY:
4084 ret = -EINVAL;
4085 if (arg || nr_args)
4086 break;
4087 ret = io_register_personality(ctx);
4088 break;
4089 case IORING_UNREGISTER_PERSONALITY:
4090 ret = -EINVAL;
4091 if (arg)
4092 break;
4093 ret = io_unregister_personality(ctx, nr_args);
4094 break;
4095 case IORING_REGISTER_ENABLE_RINGS:
4096 ret = -EINVAL;
4097 if (arg || nr_args)
4098 break;
4099 ret = io_register_enable_rings(ctx);
4100 break;
4101 case IORING_REGISTER_RESTRICTIONS:
4102 ret = io_register_restrictions(ctx, arg, nr_args);
4103 break;
4104 case IORING_REGISTER_FILES2:
4105 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
4106 break;
4107 case IORING_REGISTER_FILES_UPDATE2:
4108 ret = io_register_rsrc_update(ctx, arg, nr_args,
4109 IORING_RSRC_FILE);
4110 break;
4111 case IORING_REGISTER_BUFFERS2:
4112 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
4113 break;
4114 case IORING_REGISTER_BUFFERS_UPDATE:
4115 ret = io_register_rsrc_update(ctx, arg, nr_args,
4116 IORING_RSRC_BUFFER);
4117 break;
4118 case IORING_REGISTER_IOWQ_AFF:
4119 ret = -EINVAL;
4120 if (!arg || !nr_args)
4121 break;
4122 ret = io_register_iowq_aff(ctx, arg, nr_args);
4123 break;
4124 case IORING_UNREGISTER_IOWQ_AFF:
4125 ret = -EINVAL;
4126 if (arg || nr_args)
4127 break;
4128 ret = io_unregister_iowq_aff(ctx);
4129 break;
4130 case IORING_REGISTER_IOWQ_MAX_WORKERS:
4131 ret = -EINVAL;
4132 if (!arg || nr_args != 2)
4133 break;
4134 ret = io_register_iowq_max_workers(ctx, arg);
4135 break;
4136 case IORING_REGISTER_RING_FDS:
4137 ret = io_ringfd_register(ctx, arg, nr_args);
4138 break;
4139 case IORING_UNREGISTER_RING_FDS:
4140 ret = io_ringfd_unregister(ctx, arg, nr_args);
4141 break;
4142 case IORING_REGISTER_PBUF_RING:
4143 ret = -EINVAL;
4144 if (!arg || nr_args != 1)
4145 break;
4146 ret = io_register_pbuf_ring(ctx, arg);
4147 break;
4148 case IORING_UNREGISTER_PBUF_RING:
4149 ret = -EINVAL;
4150 if (!arg || nr_args != 1)
4151 break;
4152 ret = io_unregister_pbuf_ring(ctx, arg);
4153 break;
4154 case IORING_REGISTER_SYNC_CANCEL:
4155 ret = -EINVAL;
4156 if (!arg || nr_args != 1)
4157 break;
4158 ret = io_sync_cancel(ctx, arg);
4159 break;
4160 case IORING_REGISTER_FILE_ALLOC_RANGE:
4161 ret = -EINVAL;
4162 if (!arg || nr_args)
4163 break;
4164 ret = io_register_file_alloc_range(ctx, arg);
4165 break;
4166 default:
4167 ret = -EINVAL;
4168 break;
4169 }
4170
4171 return ret;
4172}
4173
4174SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4175 void __user *, arg, unsigned int, nr_args)
4176{
4177 struct io_ring_ctx *ctx;
4178 long ret = -EBADF;
4179 struct fd f;
4180
4181 if (opcode >= IORING_REGISTER_LAST)
4182 return -EINVAL;
4183
4184 f = fdget(fd);
4185 if (!f.file)
4186 return -EBADF;
4187
4188 ret = -EOPNOTSUPP;
4189 if (!io_is_uring_fops(f.file))
4190 goto out_fput;
4191
4192 ctx = f.file->private_data;
4193
4194 mutex_lock(&ctx->uring_lock);
4195 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4196 mutex_unlock(&ctx->uring_lock);
4197 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
4198out_fput:
4199 fdput(f);
4200 return ret;
4201}
4202
4203static int __init io_uring_init(void)
4204{
4205#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
4206 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
4207 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
4208} while (0)
4209
4210#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
4211 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
4212#define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
4213 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
4214 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
4215 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
4216 BUILD_BUG_SQE_ELEM(1, __u8, flags);
4217 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
4218 BUILD_BUG_SQE_ELEM(4, __s32, fd);
4219 BUILD_BUG_SQE_ELEM(8, __u64, off);
4220 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
4221 BUILD_BUG_SQE_ELEM(8, __u32, cmd_op);
4222 BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
4223 BUILD_BUG_SQE_ELEM(16, __u64, addr);
4224 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
4225 BUILD_BUG_SQE_ELEM(24, __u32, len);
4226 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
4227 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
4228 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
4229 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
4230 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
4231 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
4232 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
4233 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
4234 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
4235 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
4236 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
4237 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
4238 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
4239 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
4240 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
4241 BUILD_BUG_SQE_ELEM(28, __u32, rename_flags);
4242 BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags);
4243 BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags);
4244 BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags);
4245 BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags);
4246 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
4247 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
4248 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
4249 BUILD_BUG_SQE_ELEM(42, __u16, personality);
4250 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
4251 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
4252 BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
4253 BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
4254 BUILD_BUG_SQE_ELEM(48, __u64, addr3);
4255 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
4256 BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
4257
4258 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
4259 sizeof(struct io_uring_rsrc_update));
4260 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
4261 sizeof(struct io_uring_rsrc_update2));
4262
4263 /* ->buf_index is u16 */
4264 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
4265 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
4266 offsetof(struct io_uring_buf_ring, tail));
4267
4268 /* should fit into one byte */
4269 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
4270 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
4271 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
4272
4273 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
4274
4275 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
4276
4277 io_uring_optable_init();
4278
4279 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4280 SLAB_ACCOUNT);
4281 return 0;
4282};
4283__initcall(io_uring_init);