Loading...
1/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
9 *
10 * See ../COPYING for licensing terms.
11 */
12#define pr_fmt(fmt) "%s: " fmt, __func__
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/aio_abi.h>
19#include <linux/export.h>
20#include <linux/syscalls.h>
21#include <linux/backing-dev.h>
22#include <linux/refcount.h>
23#include <linux/uio.h>
24
25#include <linux/sched/signal.h>
26#include <linux/fs.h>
27#include <linux/file.h>
28#include <linux/mm.h>
29#include <linux/mman.h>
30#include <linux/percpu.h>
31#include <linux/slab.h>
32#include <linux/timer.h>
33#include <linux/aio.h>
34#include <linux/highmem.h>
35#include <linux/workqueue.h>
36#include <linux/security.h>
37#include <linux/eventfd.h>
38#include <linux/blkdev.h>
39#include <linux/compat.h>
40#include <linux/migrate.h>
41#include <linux/ramfs.h>
42#include <linux/percpu-refcount.h>
43#include <linux/mount.h>
44#include <linux/pseudo_fs.h>
45
46#include <linux/uaccess.h>
47#include <linux/nospec.h>
48
49#include "internal.h"
50
51#define KIOCB_KEY 0
52
53#define AIO_RING_MAGIC 0xa10a10a1
54#define AIO_RING_COMPAT_FEATURES 1
55#define AIO_RING_INCOMPAT_FEATURES 0
56struct aio_ring {
57 unsigned id; /* kernel internal index number */
58 unsigned nr; /* number of io_events */
59 unsigned head; /* Written to by userland or under ring_lock
60 * mutex by aio_read_events_ring(). */
61 unsigned tail;
62
63 unsigned magic;
64 unsigned compat_features;
65 unsigned incompat_features;
66 unsigned header_length; /* size of aio_ring */
67
68
69 struct io_event io_events[];
70}; /* 128 bytes + ring size */
71
72/*
73 * Plugging is meant to work with larger batches of IOs. If we don't
74 * have more than the below, then don't bother setting up a plug.
75 */
76#define AIO_PLUG_THRESHOLD 2
77
78#define AIO_RING_PAGES 8
79
80struct kioctx_table {
81 struct rcu_head rcu;
82 unsigned nr;
83 struct kioctx __rcu *table[];
84};
85
86struct kioctx_cpu {
87 unsigned reqs_available;
88};
89
90struct ctx_rq_wait {
91 struct completion comp;
92 atomic_t count;
93};
94
95struct kioctx {
96 struct percpu_ref users;
97 atomic_t dead;
98
99 struct percpu_ref reqs;
100
101 unsigned long user_id;
102
103 struct __percpu kioctx_cpu *cpu;
104
105 /*
106 * For percpu reqs_available, number of slots we move to/from global
107 * counter at a time:
108 */
109 unsigned req_batch;
110 /*
111 * This is what userspace passed to io_setup(), it's not used for
112 * anything but counting against the global max_reqs quota.
113 *
114 * The real limit is nr_events - 1, which will be larger (see
115 * aio_setup_ring())
116 */
117 unsigned max_reqs;
118
119 /* Size of ringbuffer, in units of struct io_event */
120 unsigned nr_events;
121
122 unsigned long mmap_base;
123 unsigned long mmap_size;
124
125 struct page **ring_pages;
126 long nr_pages;
127
128 struct rcu_work free_rwork; /* see free_ioctx() */
129
130 /*
131 * signals when all in-flight requests are done
132 */
133 struct ctx_rq_wait *rq_wait;
134
135 struct {
136 /*
137 * This counts the number of available slots in the ringbuffer,
138 * so we avoid overflowing it: it's decremented (if positive)
139 * when allocating a kiocb and incremented when the resulting
140 * io_event is pulled off the ringbuffer.
141 *
142 * We batch accesses to it with a percpu version.
143 */
144 atomic_t reqs_available;
145 } ____cacheline_aligned_in_smp;
146
147 struct {
148 spinlock_t ctx_lock;
149 struct list_head active_reqs; /* used for cancellation */
150 } ____cacheline_aligned_in_smp;
151
152 struct {
153 struct mutex ring_lock;
154 wait_queue_head_t wait;
155 } ____cacheline_aligned_in_smp;
156
157 struct {
158 unsigned tail;
159 unsigned completed_events;
160 spinlock_t completion_lock;
161 } ____cacheline_aligned_in_smp;
162
163 struct page *internal_pages[AIO_RING_PAGES];
164 struct file *aio_ring_file;
165
166 unsigned id;
167};
168
169/*
170 * First field must be the file pointer in all the
171 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172 */
173struct fsync_iocb {
174 struct file *file;
175 struct work_struct work;
176 bool datasync;
177 struct cred *creds;
178};
179
180struct poll_iocb {
181 struct file *file;
182 struct wait_queue_head *head;
183 __poll_t events;
184 bool cancelled;
185 bool work_scheduled;
186 bool work_need_resched;
187 struct wait_queue_entry wait;
188 struct work_struct work;
189};
190
191/*
192 * NOTE! Each of the iocb union members has the file pointer
193 * as the first entry in their struct definition. So you can
194 * access the file pointer through any of the sub-structs,
195 * or directly as just 'ki_filp' in this struct.
196 */
197struct aio_kiocb {
198 union {
199 struct file *ki_filp;
200 struct kiocb rw;
201 struct fsync_iocb fsync;
202 struct poll_iocb poll;
203 };
204
205 struct kioctx *ki_ctx;
206 kiocb_cancel_fn *ki_cancel;
207
208 struct io_event ki_res;
209
210 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */
212 refcount_t ki_refcnt;
213
214 /*
215 * If the aio_resfd field of the userspace iocb is not zero,
216 * this is the underlying eventfd context to deliver events to.
217 */
218 struct eventfd_ctx *ki_eventfd;
219};
220
221/*------ sysctl variables----*/
222static DEFINE_SPINLOCK(aio_nr_lock);
223static unsigned long aio_nr; /* current system wide number of aio requests */
224static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225/*----end sysctl variables---*/
226#ifdef CONFIG_SYSCTL
227static struct ctl_table aio_sysctls[] = {
228 {
229 .procname = "aio-nr",
230 .data = &aio_nr,
231 .maxlen = sizeof(aio_nr),
232 .mode = 0444,
233 .proc_handler = proc_doulongvec_minmax,
234 },
235 {
236 .procname = "aio-max-nr",
237 .data = &aio_max_nr,
238 .maxlen = sizeof(aio_max_nr),
239 .mode = 0644,
240 .proc_handler = proc_doulongvec_minmax,
241 },
242 {}
243};
244
245static void __init aio_sysctl_init(void)
246{
247 register_sysctl_init("fs", aio_sysctls);
248}
249#else
250#define aio_sysctl_init() do { } while (0)
251#endif
252
253static struct kmem_cache *kiocb_cachep;
254static struct kmem_cache *kioctx_cachep;
255
256static struct vfsmount *aio_mnt;
257
258static const struct file_operations aio_ring_fops;
259static const struct address_space_operations aio_ctx_aops;
260
261static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
262{
263 struct file *file;
264 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
265 if (IS_ERR(inode))
266 return ERR_CAST(inode);
267
268 inode->i_mapping->a_ops = &aio_ctx_aops;
269 inode->i_mapping->private_data = ctx;
270 inode->i_size = PAGE_SIZE * nr_pages;
271
272 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
273 O_RDWR, &aio_ring_fops);
274 if (IS_ERR(file))
275 iput(inode);
276 return file;
277}
278
279static int aio_init_fs_context(struct fs_context *fc)
280{
281 if (!init_pseudo(fc, AIO_RING_MAGIC))
282 return -ENOMEM;
283 fc->s_iflags |= SB_I_NOEXEC;
284 return 0;
285}
286
287/* aio_setup
288 * Creates the slab caches used by the aio routines, panic on
289 * failure as this is done early during the boot sequence.
290 */
291static int __init aio_setup(void)
292{
293 static struct file_system_type aio_fs = {
294 .name = "aio",
295 .init_fs_context = aio_init_fs_context,
296 .kill_sb = kill_anon_super,
297 };
298 aio_mnt = kern_mount(&aio_fs);
299 if (IS_ERR(aio_mnt))
300 panic("Failed to create aio fs mount.");
301
302 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
304 aio_sysctl_init();
305 return 0;
306}
307__initcall(aio_setup);
308
309static void put_aio_ring_file(struct kioctx *ctx)
310{
311 struct file *aio_ring_file = ctx->aio_ring_file;
312 struct address_space *i_mapping;
313
314 if (aio_ring_file) {
315 truncate_setsize(file_inode(aio_ring_file), 0);
316
317 /* Prevent further access to the kioctx from migratepages */
318 i_mapping = aio_ring_file->f_mapping;
319 spin_lock(&i_mapping->private_lock);
320 i_mapping->private_data = NULL;
321 ctx->aio_ring_file = NULL;
322 spin_unlock(&i_mapping->private_lock);
323
324 fput(aio_ring_file);
325 }
326}
327
328static void aio_free_ring(struct kioctx *ctx)
329{
330 int i;
331
332 /* Disconnect the kiotx from the ring file. This prevents future
333 * accesses to the kioctx from page migration.
334 */
335 put_aio_ring_file(ctx);
336
337 for (i = 0; i < ctx->nr_pages; i++) {
338 struct page *page;
339 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
340 page_count(ctx->ring_pages[i]));
341 page = ctx->ring_pages[i];
342 if (!page)
343 continue;
344 ctx->ring_pages[i] = NULL;
345 put_page(page);
346 }
347
348 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
349 kfree(ctx->ring_pages);
350 ctx->ring_pages = NULL;
351 }
352}
353
354static int aio_ring_mremap(struct vm_area_struct *vma)
355{
356 struct file *file = vma->vm_file;
357 struct mm_struct *mm = vma->vm_mm;
358 struct kioctx_table *table;
359 int i, res = -EINVAL;
360
361 spin_lock(&mm->ioctx_lock);
362 rcu_read_lock();
363 table = rcu_dereference(mm->ioctx_table);
364 if (!table)
365 goto out_unlock;
366
367 for (i = 0; i < table->nr; i++) {
368 struct kioctx *ctx;
369
370 ctx = rcu_dereference(table->table[i]);
371 if (ctx && ctx->aio_ring_file == file) {
372 if (!atomic_read(&ctx->dead)) {
373 ctx->user_id = ctx->mmap_base = vma->vm_start;
374 res = 0;
375 }
376 break;
377 }
378 }
379
380out_unlock:
381 rcu_read_unlock();
382 spin_unlock(&mm->ioctx_lock);
383 return res;
384}
385
386static const struct vm_operations_struct aio_ring_vm_ops = {
387 .mremap = aio_ring_mremap,
388#if IS_ENABLED(CONFIG_MMU)
389 .fault = filemap_fault,
390 .map_pages = filemap_map_pages,
391 .page_mkwrite = filemap_page_mkwrite,
392#endif
393};
394
395static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
396{
397 vma->vm_flags |= VM_DONTEXPAND;
398 vma->vm_ops = &aio_ring_vm_ops;
399 return 0;
400}
401
402static const struct file_operations aio_ring_fops = {
403 .mmap = aio_ring_mmap,
404};
405
406#if IS_ENABLED(CONFIG_MIGRATION)
407static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
408 struct folio *src, enum migrate_mode mode)
409{
410 struct kioctx *ctx;
411 unsigned long flags;
412 pgoff_t idx;
413 int rc;
414
415 /*
416 * We cannot support the _NO_COPY case here, because copy needs to
417 * happen under the ctx->completion_lock. That does not work with the
418 * migration workflow of MIGRATE_SYNC_NO_COPY.
419 */
420 if (mode == MIGRATE_SYNC_NO_COPY)
421 return -EINVAL;
422
423 rc = 0;
424
425 /* mapping->private_lock here protects against the kioctx teardown. */
426 spin_lock(&mapping->private_lock);
427 ctx = mapping->private_data;
428 if (!ctx) {
429 rc = -EINVAL;
430 goto out;
431 }
432
433 /* The ring_lock mutex. The prevents aio_read_events() from writing
434 * to the ring's head, and prevents page migration from mucking in
435 * a partially initialized kiotx.
436 */
437 if (!mutex_trylock(&ctx->ring_lock)) {
438 rc = -EAGAIN;
439 goto out;
440 }
441
442 idx = src->index;
443 if (idx < (pgoff_t)ctx->nr_pages) {
444 /* Make sure the old folio hasn't already been changed */
445 if (ctx->ring_pages[idx] != &src->page)
446 rc = -EAGAIN;
447 } else
448 rc = -EINVAL;
449
450 if (rc != 0)
451 goto out_unlock;
452
453 /* Writeback must be complete */
454 BUG_ON(folio_test_writeback(src));
455 folio_get(dst);
456
457 rc = folio_migrate_mapping(mapping, dst, src, 1);
458 if (rc != MIGRATEPAGE_SUCCESS) {
459 folio_put(dst);
460 goto out_unlock;
461 }
462
463 /* Take completion_lock to prevent other writes to the ring buffer
464 * while the old folio is copied to the new. This prevents new
465 * events from being lost.
466 */
467 spin_lock_irqsave(&ctx->completion_lock, flags);
468 folio_migrate_copy(dst, src);
469 BUG_ON(ctx->ring_pages[idx] != &src->page);
470 ctx->ring_pages[idx] = &dst->page;
471 spin_unlock_irqrestore(&ctx->completion_lock, flags);
472
473 /* The old folio is no longer accessible. */
474 folio_put(src);
475
476out_unlock:
477 mutex_unlock(&ctx->ring_lock);
478out:
479 spin_unlock(&mapping->private_lock);
480 return rc;
481}
482#else
483#define aio_migrate_folio NULL
484#endif
485
486static const struct address_space_operations aio_ctx_aops = {
487 .dirty_folio = noop_dirty_folio,
488 .migrate_folio = aio_migrate_folio,
489};
490
491static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
492{
493 struct aio_ring *ring;
494 struct mm_struct *mm = current->mm;
495 unsigned long size, unused;
496 int nr_pages;
497 int i;
498 struct file *file;
499
500 /* Compensate for the ring buffer's head/tail overlap entry */
501 nr_events += 2; /* 1 is required, 2 for good luck */
502
503 size = sizeof(struct aio_ring);
504 size += sizeof(struct io_event) * nr_events;
505
506 nr_pages = PFN_UP(size);
507 if (nr_pages < 0)
508 return -EINVAL;
509
510 file = aio_private_file(ctx, nr_pages);
511 if (IS_ERR(file)) {
512 ctx->aio_ring_file = NULL;
513 return -ENOMEM;
514 }
515
516 ctx->aio_ring_file = file;
517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
518 / sizeof(struct io_event);
519
520 ctx->ring_pages = ctx->internal_pages;
521 if (nr_pages > AIO_RING_PAGES) {
522 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
523 GFP_KERNEL);
524 if (!ctx->ring_pages) {
525 put_aio_ring_file(ctx);
526 return -ENOMEM;
527 }
528 }
529
530 for (i = 0; i < nr_pages; i++) {
531 struct page *page;
532 page = find_or_create_page(file->f_mapping,
533 i, GFP_HIGHUSER | __GFP_ZERO);
534 if (!page)
535 break;
536 pr_debug("pid(%d) page[%d]->count=%d\n",
537 current->pid, i, page_count(page));
538 SetPageUptodate(page);
539 unlock_page(page);
540
541 ctx->ring_pages[i] = page;
542 }
543 ctx->nr_pages = i;
544
545 if (unlikely(i != nr_pages)) {
546 aio_free_ring(ctx);
547 return -ENOMEM;
548 }
549
550 ctx->mmap_size = nr_pages * PAGE_SIZE;
551 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
552
553 if (mmap_write_lock_killable(mm)) {
554 ctx->mmap_size = 0;
555 aio_free_ring(ctx);
556 return -EINTR;
557 }
558
559 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
560 PROT_READ | PROT_WRITE,
561 MAP_SHARED, 0, &unused, NULL);
562 mmap_write_unlock(mm);
563 if (IS_ERR((void *)ctx->mmap_base)) {
564 ctx->mmap_size = 0;
565 aio_free_ring(ctx);
566 return -ENOMEM;
567 }
568
569 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
570
571 ctx->user_id = ctx->mmap_base;
572 ctx->nr_events = nr_events; /* trusted copy */
573
574 ring = kmap_atomic(ctx->ring_pages[0]);
575 ring->nr = nr_events; /* user copy */
576 ring->id = ~0U;
577 ring->head = ring->tail = 0;
578 ring->magic = AIO_RING_MAGIC;
579 ring->compat_features = AIO_RING_COMPAT_FEATURES;
580 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
581 ring->header_length = sizeof(struct aio_ring);
582 kunmap_atomic(ring);
583 flush_dcache_page(ctx->ring_pages[0]);
584
585 return 0;
586}
587
588#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
589#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
590#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
591
592void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
593{
594 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
595 struct kioctx *ctx = req->ki_ctx;
596 unsigned long flags;
597
598 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
599 return;
600
601 spin_lock_irqsave(&ctx->ctx_lock, flags);
602 list_add_tail(&req->ki_list, &ctx->active_reqs);
603 req->ki_cancel = cancel;
604 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
605}
606EXPORT_SYMBOL(kiocb_set_cancel_fn);
607
608/*
609 * free_ioctx() should be RCU delayed to synchronize against the RCU
610 * protected lookup_ioctx() and also needs process context to call
611 * aio_free_ring(). Use rcu_work.
612 */
613static void free_ioctx(struct work_struct *work)
614{
615 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
616 free_rwork);
617 pr_debug("freeing %p\n", ctx);
618
619 aio_free_ring(ctx);
620 free_percpu(ctx->cpu);
621 percpu_ref_exit(&ctx->reqs);
622 percpu_ref_exit(&ctx->users);
623 kmem_cache_free(kioctx_cachep, ctx);
624}
625
626static void free_ioctx_reqs(struct percpu_ref *ref)
627{
628 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
629
630 /* At this point we know that there are no any in-flight requests */
631 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
632 complete(&ctx->rq_wait->comp);
633
634 /* Synchronize against RCU protected table->table[] dereferences */
635 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
636 queue_rcu_work(system_wq, &ctx->free_rwork);
637}
638
639/*
640 * When this function runs, the kioctx has been removed from the "hash table"
641 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
642 * now it's safe to cancel any that need to be.
643 */
644static void free_ioctx_users(struct percpu_ref *ref)
645{
646 struct kioctx *ctx = container_of(ref, struct kioctx, users);
647 struct aio_kiocb *req;
648
649 spin_lock_irq(&ctx->ctx_lock);
650
651 while (!list_empty(&ctx->active_reqs)) {
652 req = list_first_entry(&ctx->active_reqs,
653 struct aio_kiocb, ki_list);
654 req->ki_cancel(&req->rw);
655 list_del_init(&req->ki_list);
656 }
657
658 spin_unlock_irq(&ctx->ctx_lock);
659
660 percpu_ref_kill(&ctx->reqs);
661 percpu_ref_put(&ctx->reqs);
662}
663
664static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
665{
666 unsigned i, new_nr;
667 struct kioctx_table *table, *old;
668 struct aio_ring *ring;
669
670 spin_lock(&mm->ioctx_lock);
671 table = rcu_dereference_raw(mm->ioctx_table);
672
673 while (1) {
674 if (table)
675 for (i = 0; i < table->nr; i++)
676 if (!rcu_access_pointer(table->table[i])) {
677 ctx->id = i;
678 rcu_assign_pointer(table->table[i], ctx);
679 spin_unlock(&mm->ioctx_lock);
680
681 /* While kioctx setup is in progress,
682 * we are protected from page migration
683 * changes ring_pages by ->ring_lock.
684 */
685 ring = kmap_atomic(ctx->ring_pages[0]);
686 ring->id = ctx->id;
687 kunmap_atomic(ring);
688 return 0;
689 }
690
691 new_nr = (table ? table->nr : 1) * 4;
692 spin_unlock(&mm->ioctx_lock);
693
694 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
695 if (!table)
696 return -ENOMEM;
697
698 table->nr = new_nr;
699
700 spin_lock(&mm->ioctx_lock);
701 old = rcu_dereference_raw(mm->ioctx_table);
702
703 if (!old) {
704 rcu_assign_pointer(mm->ioctx_table, table);
705 } else if (table->nr > old->nr) {
706 memcpy(table->table, old->table,
707 old->nr * sizeof(struct kioctx *));
708
709 rcu_assign_pointer(mm->ioctx_table, table);
710 kfree_rcu(old, rcu);
711 } else {
712 kfree(table);
713 table = old;
714 }
715 }
716}
717
718static void aio_nr_sub(unsigned nr)
719{
720 spin_lock(&aio_nr_lock);
721 if (WARN_ON(aio_nr - nr > aio_nr))
722 aio_nr = 0;
723 else
724 aio_nr -= nr;
725 spin_unlock(&aio_nr_lock);
726}
727
728/* ioctx_alloc
729 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
730 */
731static struct kioctx *ioctx_alloc(unsigned nr_events)
732{
733 struct mm_struct *mm = current->mm;
734 struct kioctx *ctx;
735 int err = -ENOMEM;
736
737 /*
738 * Store the original nr_events -- what userspace passed to io_setup(),
739 * for counting against the global limit -- before it changes.
740 */
741 unsigned int max_reqs = nr_events;
742
743 /*
744 * We keep track of the number of available ringbuffer slots, to prevent
745 * overflow (reqs_available), and we also use percpu counters for this.
746 *
747 * So since up to half the slots might be on other cpu's percpu counters
748 * and unavailable, double nr_events so userspace sees what they
749 * expected: additionally, we move req_batch slots to/from percpu
750 * counters at a time, so make sure that isn't 0:
751 */
752 nr_events = max(nr_events, num_possible_cpus() * 4);
753 nr_events *= 2;
754
755 /* Prevent overflows */
756 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
757 pr_debug("ENOMEM: nr_events too high\n");
758 return ERR_PTR(-EINVAL);
759 }
760
761 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
762 return ERR_PTR(-EAGAIN);
763
764 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
765 if (!ctx)
766 return ERR_PTR(-ENOMEM);
767
768 ctx->max_reqs = max_reqs;
769
770 spin_lock_init(&ctx->ctx_lock);
771 spin_lock_init(&ctx->completion_lock);
772 mutex_init(&ctx->ring_lock);
773 /* Protect against page migration throughout kiotx setup by keeping
774 * the ring_lock mutex held until setup is complete. */
775 mutex_lock(&ctx->ring_lock);
776 init_waitqueue_head(&ctx->wait);
777
778 INIT_LIST_HEAD(&ctx->active_reqs);
779
780 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
781 goto err;
782
783 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
784 goto err;
785
786 ctx->cpu = alloc_percpu(struct kioctx_cpu);
787 if (!ctx->cpu)
788 goto err;
789
790 err = aio_setup_ring(ctx, nr_events);
791 if (err < 0)
792 goto err;
793
794 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
795 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
796 if (ctx->req_batch < 1)
797 ctx->req_batch = 1;
798
799 /* limit the number of system wide aios */
800 spin_lock(&aio_nr_lock);
801 if (aio_nr + ctx->max_reqs > aio_max_nr ||
802 aio_nr + ctx->max_reqs < aio_nr) {
803 spin_unlock(&aio_nr_lock);
804 err = -EAGAIN;
805 goto err_ctx;
806 }
807 aio_nr += ctx->max_reqs;
808 spin_unlock(&aio_nr_lock);
809
810 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
811 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
812
813 err = ioctx_add_table(ctx, mm);
814 if (err)
815 goto err_cleanup;
816
817 /* Release the ring_lock mutex now that all setup is complete. */
818 mutex_unlock(&ctx->ring_lock);
819
820 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
821 ctx, ctx->user_id, mm, ctx->nr_events);
822 return ctx;
823
824err_cleanup:
825 aio_nr_sub(ctx->max_reqs);
826err_ctx:
827 atomic_set(&ctx->dead, 1);
828 if (ctx->mmap_size)
829 vm_munmap(ctx->mmap_base, ctx->mmap_size);
830 aio_free_ring(ctx);
831err:
832 mutex_unlock(&ctx->ring_lock);
833 free_percpu(ctx->cpu);
834 percpu_ref_exit(&ctx->reqs);
835 percpu_ref_exit(&ctx->users);
836 kmem_cache_free(kioctx_cachep, ctx);
837 pr_debug("error allocating ioctx %d\n", err);
838 return ERR_PTR(err);
839}
840
841/* kill_ioctx
842 * Cancels all outstanding aio requests on an aio context. Used
843 * when the processes owning a context have all exited to encourage
844 * the rapid destruction of the kioctx.
845 */
846static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
847 struct ctx_rq_wait *wait)
848{
849 struct kioctx_table *table;
850
851 spin_lock(&mm->ioctx_lock);
852 if (atomic_xchg(&ctx->dead, 1)) {
853 spin_unlock(&mm->ioctx_lock);
854 return -EINVAL;
855 }
856
857 table = rcu_dereference_raw(mm->ioctx_table);
858 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
859 RCU_INIT_POINTER(table->table[ctx->id], NULL);
860 spin_unlock(&mm->ioctx_lock);
861
862 /* free_ioctx_reqs() will do the necessary RCU synchronization */
863 wake_up_all(&ctx->wait);
864
865 /*
866 * It'd be more correct to do this in free_ioctx(), after all
867 * the outstanding kiocbs have finished - but by then io_destroy
868 * has already returned, so io_setup() could potentially return
869 * -EAGAIN with no ioctxs actually in use (as far as userspace
870 * could tell).
871 */
872 aio_nr_sub(ctx->max_reqs);
873
874 if (ctx->mmap_size)
875 vm_munmap(ctx->mmap_base, ctx->mmap_size);
876
877 ctx->rq_wait = wait;
878 percpu_ref_kill(&ctx->users);
879 return 0;
880}
881
882/*
883 * exit_aio: called when the last user of mm goes away. At this point, there is
884 * no way for any new requests to be submited or any of the io_* syscalls to be
885 * called on the context.
886 *
887 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
888 * them.
889 */
890void exit_aio(struct mm_struct *mm)
891{
892 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
893 struct ctx_rq_wait wait;
894 int i, skipped;
895
896 if (!table)
897 return;
898
899 atomic_set(&wait.count, table->nr);
900 init_completion(&wait.comp);
901
902 skipped = 0;
903 for (i = 0; i < table->nr; ++i) {
904 struct kioctx *ctx =
905 rcu_dereference_protected(table->table[i], true);
906
907 if (!ctx) {
908 skipped++;
909 continue;
910 }
911
912 /*
913 * We don't need to bother with munmap() here - exit_mmap(mm)
914 * is coming and it'll unmap everything. And we simply can't,
915 * this is not necessarily our ->mm.
916 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
917 * that it needs to unmap the area, just set it to 0.
918 */
919 ctx->mmap_size = 0;
920 kill_ioctx(mm, ctx, &wait);
921 }
922
923 if (!atomic_sub_and_test(skipped, &wait.count)) {
924 /* Wait until all IO for the context are done. */
925 wait_for_completion(&wait.comp);
926 }
927
928 RCU_INIT_POINTER(mm->ioctx_table, NULL);
929 kfree(table);
930}
931
932static void put_reqs_available(struct kioctx *ctx, unsigned nr)
933{
934 struct kioctx_cpu *kcpu;
935 unsigned long flags;
936
937 local_irq_save(flags);
938 kcpu = this_cpu_ptr(ctx->cpu);
939 kcpu->reqs_available += nr;
940
941 while (kcpu->reqs_available >= ctx->req_batch * 2) {
942 kcpu->reqs_available -= ctx->req_batch;
943 atomic_add(ctx->req_batch, &ctx->reqs_available);
944 }
945
946 local_irq_restore(flags);
947}
948
949static bool __get_reqs_available(struct kioctx *ctx)
950{
951 struct kioctx_cpu *kcpu;
952 bool ret = false;
953 unsigned long flags;
954
955 local_irq_save(flags);
956 kcpu = this_cpu_ptr(ctx->cpu);
957 if (!kcpu->reqs_available) {
958 int avail = atomic_read(&ctx->reqs_available);
959
960 do {
961 if (avail < ctx->req_batch)
962 goto out;
963 } while (!atomic_try_cmpxchg(&ctx->reqs_available,
964 &avail, avail - ctx->req_batch));
965
966 kcpu->reqs_available += ctx->req_batch;
967 }
968
969 ret = true;
970 kcpu->reqs_available--;
971out:
972 local_irq_restore(flags);
973 return ret;
974}
975
976/* refill_reqs_available
977 * Updates the reqs_available reference counts used for tracking the
978 * number of free slots in the completion ring. This can be called
979 * from aio_complete() (to optimistically update reqs_available) or
980 * from aio_get_req() (the we're out of events case). It must be
981 * called holding ctx->completion_lock.
982 */
983static void refill_reqs_available(struct kioctx *ctx, unsigned head,
984 unsigned tail)
985{
986 unsigned events_in_ring, completed;
987
988 /* Clamp head since userland can write to it. */
989 head %= ctx->nr_events;
990 if (head <= tail)
991 events_in_ring = tail - head;
992 else
993 events_in_ring = ctx->nr_events - (head - tail);
994
995 completed = ctx->completed_events;
996 if (events_in_ring < completed)
997 completed -= events_in_ring;
998 else
999 completed = 0;
1000
1001 if (!completed)
1002 return;
1003
1004 ctx->completed_events -= completed;
1005 put_reqs_available(ctx, completed);
1006}
1007
1008/* user_refill_reqs_available
1009 * Called to refill reqs_available when aio_get_req() encounters an
1010 * out of space in the completion ring.
1011 */
1012static void user_refill_reqs_available(struct kioctx *ctx)
1013{
1014 spin_lock_irq(&ctx->completion_lock);
1015 if (ctx->completed_events) {
1016 struct aio_ring *ring;
1017 unsigned head;
1018
1019 /* Access of ring->head may race with aio_read_events_ring()
1020 * here, but that's okay since whether we read the old version
1021 * or the new version, and either will be valid. The important
1022 * part is that head cannot pass tail since we prevent
1023 * aio_complete() from updating tail by holding
1024 * ctx->completion_lock. Even if head is invalid, the check
1025 * against ctx->completed_events below will make sure we do the
1026 * safe/right thing.
1027 */
1028 ring = kmap_atomic(ctx->ring_pages[0]);
1029 head = ring->head;
1030 kunmap_atomic(ring);
1031
1032 refill_reqs_available(ctx, head, ctx->tail);
1033 }
1034
1035 spin_unlock_irq(&ctx->completion_lock);
1036}
1037
1038static bool get_reqs_available(struct kioctx *ctx)
1039{
1040 if (__get_reqs_available(ctx))
1041 return true;
1042 user_refill_reqs_available(ctx);
1043 return __get_reqs_available(ctx);
1044}
1045
1046/* aio_get_req
1047 * Allocate a slot for an aio request.
1048 * Returns NULL if no requests are free.
1049 *
1050 * The refcount is initialized to 2 - one for the async op completion,
1051 * one for the synchronous code that does this.
1052 */
1053static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1054{
1055 struct aio_kiocb *req;
1056
1057 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1058 if (unlikely(!req))
1059 return NULL;
1060
1061 if (unlikely(!get_reqs_available(ctx))) {
1062 kmem_cache_free(kiocb_cachep, req);
1063 return NULL;
1064 }
1065
1066 percpu_ref_get(&ctx->reqs);
1067 req->ki_ctx = ctx;
1068 INIT_LIST_HEAD(&req->ki_list);
1069 refcount_set(&req->ki_refcnt, 2);
1070 req->ki_eventfd = NULL;
1071 return req;
1072}
1073
1074static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1075{
1076 struct aio_ring __user *ring = (void __user *)ctx_id;
1077 struct mm_struct *mm = current->mm;
1078 struct kioctx *ctx, *ret = NULL;
1079 struct kioctx_table *table;
1080 unsigned id;
1081
1082 if (get_user(id, &ring->id))
1083 return NULL;
1084
1085 rcu_read_lock();
1086 table = rcu_dereference(mm->ioctx_table);
1087
1088 if (!table || id >= table->nr)
1089 goto out;
1090
1091 id = array_index_nospec(id, table->nr);
1092 ctx = rcu_dereference(table->table[id]);
1093 if (ctx && ctx->user_id == ctx_id) {
1094 if (percpu_ref_tryget_live(&ctx->users))
1095 ret = ctx;
1096 }
1097out:
1098 rcu_read_unlock();
1099 return ret;
1100}
1101
1102static inline void iocb_destroy(struct aio_kiocb *iocb)
1103{
1104 if (iocb->ki_eventfd)
1105 eventfd_ctx_put(iocb->ki_eventfd);
1106 if (iocb->ki_filp)
1107 fput(iocb->ki_filp);
1108 percpu_ref_put(&iocb->ki_ctx->reqs);
1109 kmem_cache_free(kiocb_cachep, iocb);
1110}
1111
1112/* aio_complete
1113 * Called when the io request on the given iocb is complete.
1114 */
1115static void aio_complete(struct aio_kiocb *iocb)
1116{
1117 struct kioctx *ctx = iocb->ki_ctx;
1118 struct aio_ring *ring;
1119 struct io_event *ev_page, *event;
1120 unsigned tail, pos, head;
1121 unsigned long flags;
1122
1123 /*
1124 * Add a completion event to the ring buffer. Must be done holding
1125 * ctx->completion_lock to prevent other code from messing with the tail
1126 * pointer since we might be called from irq context.
1127 */
1128 spin_lock_irqsave(&ctx->completion_lock, flags);
1129
1130 tail = ctx->tail;
1131 pos = tail + AIO_EVENTS_OFFSET;
1132
1133 if (++tail >= ctx->nr_events)
1134 tail = 0;
1135
1136 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1137 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1138
1139 *event = iocb->ki_res;
1140
1141 kunmap_atomic(ev_page);
1142 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1143
1144 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1145 (void __user *)(unsigned long)iocb->ki_res.obj,
1146 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1147
1148 /* after flagging the request as done, we
1149 * must never even look at it again
1150 */
1151 smp_wmb(); /* make event visible before updating tail */
1152
1153 ctx->tail = tail;
1154
1155 ring = kmap_atomic(ctx->ring_pages[0]);
1156 head = ring->head;
1157 ring->tail = tail;
1158 kunmap_atomic(ring);
1159 flush_dcache_page(ctx->ring_pages[0]);
1160
1161 ctx->completed_events++;
1162 if (ctx->completed_events > 1)
1163 refill_reqs_available(ctx, head, tail);
1164 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1165
1166 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1167
1168 /*
1169 * Check if the user asked us to deliver the result through an
1170 * eventfd. The eventfd_signal() function is safe to be called
1171 * from IRQ context.
1172 */
1173 if (iocb->ki_eventfd)
1174 eventfd_signal(iocb->ki_eventfd, 1);
1175
1176 /*
1177 * We have to order our ring_info tail store above and test
1178 * of the wait list below outside the wait lock. This is
1179 * like in wake_up_bit() where clearing a bit has to be
1180 * ordered with the unlocked test.
1181 */
1182 smp_mb();
1183
1184 if (waitqueue_active(&ctx->wait))
1185 wake_up(&ctx->wait);
1186}
1187
1188static inline void iocb_put(struct aio_kiocb *iocb)
1189{
1190 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1191 aio_complete(iocb);
1192 iocb_destroy(iocb);
1193 }
1194}
1195
1196/* aio_read_events_ring
1197 * Pull an event off of the ioctx's event ring. Returns the number of
1198 * events fetched
1199 */
1200static long aio_read_events_ring(struct kioctx *ctx,
1201 struct io_event __user *event, long nr)
1202{
1203 struct aio_ring *ring;
1204 unsigned head, tail, pos;
1205 long ret = 0;
1206 int copy_ret;
1207
1208 /*
1209 * The mutex can block and wake us up and that will cause
1210 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1211 * and repeat. This should be rare enough that it doesn't cause
1212 * peformance issues. See the comment in read_events() for more detail.
1213 */
1214 sched_annotate_sleep();
1215 mutex_lock(&ctx->ring_lock);
1216
1217 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1218 ring = kmap_atomic(ctx->ring_pages[0]);
1219 head = ring->head;
1220 tail = ring->tail;
1221 kunmap_atomic(ring);
1222
1223 /*
1224 * Ensure that once we've read the current tail pointer, that
1225 * we also see the events that were stored up to the tail.
1226 */
1227 smp_rmb();
1228
1229 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1230
1231 if (head == tail)
1232 goto out;
1233
1234 head %= ctx->nr_events;
1235 tail %= ctx->nr_events;
1236
1237 while (ret < nr) {
1238 long avail;
1239 struct io_event *ev;
1240 struct page *page;
1241
1242 avail = (head <= tail ? tail : ctx->nr_events) - head;
1243 if (head == tail)
1244 break;
1245
1246 pos = head + AIO_EVENTS_OFFSET;
1247 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1248 pos %= AIO_EVENTS_PER_PAGE;
1249
1250 avail = min(avail, nr - ret);
1251 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1252
1253 ev = kmap(page);
1254 copy_ret = copy_to_user(event + ret, ev + pos,
1255 sizeof(*ev) * avail);
1256 kunmap(page);
1257
1258 if (unlikely(copy_ret)) {
1259 ret = -EFAULT;
1260 goto out;
1261 }
1262
1263 ret += avail;
1264 head += avail;
1265 head %= ctx->nr_events;
1266 }
1267
1268 ring = kmap_atomic(ctx->ring_pages[0]);
1269 ring->head = head;
1270 kunmap_atomic(ring);
1271 flush_dcache_page(ctx->ring_pages[0]);
1272
1273 pr_debug("%li h%u t%u\n", ret, head, tail);
1274out:
1275 mutex_unlock(&ctx->ring_lock);
1276
1277 return ret;
1278}
1279
1280static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1281 struct io_event __user *event, long *i)
1282{
1283 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1284
1285 if (ret > 0)
1286 *i += ret;
1287
1288 if (unlikely(atomic_read(&ctx->dead)))
1289 ret = -EINVAL;
1290
1291 if (!*i)
1292 *i = ret;
1293
1294 return ret < 0 || *i >= min_nr;
1295}
1296
1297static long read_events(struct kioctx *ctx, long min_nr, long nr,
1298 struct io_event __user *event,
1299 ktime_t until)
1300{
1301 long ret = 0;
1302
1303 /*
1304 * Note that aio_read_events() is being called as the conditional - i.e.
1305 * we're calling it after prepare_to_wait() has set task state to
1306 * TASK_INTERRUPTIBLE.
1307 *
1308 * But aio_read_events() can block, and if it blocks it's going to flip
1309 * the task state back to TASK_RUNNING.
1310 *
1311 * This should be ok, provided it doesn't flip the state back to
1312 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1313 * will only happen if the mutex_lock() call blocks, and we then find
1314 * the ringbuffer empty. So in practice we should be ok, but it's
1315 * something to be aware of when touching this code.
1316 */
1317 if (until == 0)
1318 aio_read_events(ctx, min_nr, nr, event, &ret);
1319 else
1320 wait_event_interruptible_hrtimeout(ctx->wait,
1321 aio_read_events(ctx, min_nr, nr, event, &ret),
1322 until);
1323 return ret;
1324}
1325
1326/* sys_io_setup:
1327 * Create an aio_context capable of receiving at least nr_events.
1328 * ctxp must not point to an aio_context that already exists, and
1329 * must be initialized to 0 prior to the call. On successful
1330 * creation of the aio_context, *ctxp is filled in with the resulting
1331 * handle. May fail with -EINVAL if *ctxp is not initialized,
1332 * if the specified nr_events exceeds internal limits. May fail
1333 * with -EAGAIN if the specified nr_events exceeds the user's limit
1334 * of available events. May fail with -ENOMEM if insufficient kernel
1335 * resources are available. May fail with -EFAULT if an invalid
1336 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1337 * implemented.
1338 */
1339SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1340{
1341 struct kioctx *ioctx = NULL;
1342 unsigned long ctx;
1343 long ret;
1344
1345 ret = get_user(ctx, ctxp);
1346 if (unlikely(ret))
1347 goto out;
1348
1349 ret = -EINVAL;
1350 if (unlikely(ctx || nr_events == 0)) {
1351 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1352 ctx, nr_events);
1353 goto out;
1354 }
1355
1356 ioctx = ioctx_alloc(nr_events);
1357 ret = PTR_ERR(ioctx);
1358 if (!IS_ERR(ioctx)) {
1359 ret = put_user(ioctx->user_id, ctxp);
1360 if (ret)
1361 kill_ioctx(current->mm, ioctx, NULL);
1362 percpu_ref_put(&ioctx->users);
1363 }
1364
1365out:
1366 return ret;
1367}
1368
1369#ifdef CONFIG_COMPAT
1370COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1371{
1372 struct kioctx *ioctx = NULL;
1373 unsigned long ctx;
1374 long ret;
1375
1376 ret = get_user(ctx, ctx32p);
1377 if (unlikely(ret))
1378 goto out;
1379
1380 ret = -EINVAL;
1381 if (unlikely(ctx || nr_events == 0)) {
1382 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1383 ctx, nr_events);
1384 goto out;
1385 }
1386
1387 ioctx = ioctx_alloc(nr_events);
1388 ret = PTR_ERR(ioctx);
1389 if (!IS_ERR(ioctx)) {
1390 /* truncating is ok because it's a user address */
1391 ret = put_user((u32)ioctx->user_id, ctx32p);
1392 if (ret)
1393 kill_ioctx(current->mm, ioctx, NULL);
1394 percpu_ref_put(&ioctx->users);
1395 }
1396
1397out:
1398 return ret;
1399}
1400#endif
1401
1402/* sys_io_destroy:
1403 * Destroy the aio_context specified. May cancel any outstanding
1404 * AIOs and block on completion. Will fail with -ENOSYS if not
1405 * implemented. May fail with -EINVAL if the context pointed to
1406 * is invalid.
1407 */
1408SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1409{
1410 struct kioctx *ioctx = lookup_ioctx(ctx);
1411 if (likely(NULL != ioctx)) {
1412 struct ctx_rq_wait wait;
1413 int ret;
1414
1415 init_completion(&wait.comp);
1416 atomic_set(&wait.count, 1);
1417
1418 /* Pass requests_done to kill_ioctx() where it can be set
1419 * in a thread-safe way. If we try to set it here then we have
1420 * a race condition if two io_destroy() called simultaneously.
1421 */
1422 ret = kill_ioctx(current->mm, ioctx, &wait);
1423 percpu_ref_put(&ioctx->users);
1424
1425 /* Wait until all IO for the context are done. Otherwise kernel
1426 * keep using user-space buffers even if user thinks the context
1427 * is destroyed.
1428 */
1429 if (!ret)
1430 wait_for_completion(&wait.comp);
1431
1432 return ret;
1433 }
1434 pr_debug("EINVAL: invalid context id\n");
1435 return -EINVAL;
1436}
1437
1438static void aio_remove_iocb(struct aio_kiocb *iocb)
1439{
1440 struct kioctx *ctx = iocb->ki_ctx;
1441 unsigned long flags;
1442
1443 spin_lock_irqsave(&ctx->ctx_lock, flags);
1444 list_del(&iocb->ki_list);
1445 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1446}
1447
1448static void aio_complete_rw(struct kiocb *kiocb, long res)
1449{
1450 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1451
1452 if (!list_empty_careful(&iocb->ki_list))
1453 aio_remove_iocb(iocb);
1454
1455 if (kiocb->ki_flags & IOCB_WRITE) {
1456 struct inode *inode = file_inode(kiocb->ki_filp);
1457
1458 /*
1459 * Tell lockdep we inherited freeze protection from submission
1460 * thread.
1461 */
1462 if (S_ISREG(inode->i_mode))
1463 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1464 file_end_write(kiocb->ki_filp);
1465 }
1466
1467 iocb->ki_res.res = res;
1468 iocb->ki_res.res2 = 0;
1469 iocb_put(iocb);
1470}
1471
1472static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1473{
1474 int ret;
1475
1476 req->ki_complete = aio_complete_rw;
1477 req->private = NULL;
1478 req->ki_pos = iocb->aio_offset;
1479 req->ki_flags = req->ki_filp->f_iocb_flags;
1480 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1481 req->ki_flags |= IOCB_EVENTFD;
1482 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1483 /*
1484 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1485 * aio_reqprio is interpreted as an I/O scheduling
1486 * class and priority.
1487 */
1488 ret = ioprio_check_cap(iocb->aio_reqprio);
1489 if (ret) {
1490 pr_debug("aio ioprio check cap error: %d\n", ret);
1491 return ret;
1492 }
1493
1494 req->ki_ioprio = iocb->aio_reqprio;
1495 } else
1496 req->ki_ioprio = get_current_ioprio();
1497
1498 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1499 if (unlikely(ret))
1500 return ret;
1501
1502 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1503 return 0;
1504}
1505
1506static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1507 struct iovec **iovec, bool vectored, bool compat,
1508 struct iov_iter *iter)
1509{
1510 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1511 size_t len = iocb->aio_nbytes;
1512
1513 if (!vectored) {
1514 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1515 *iovec = NULL;
1516 return ret;
1517 }
1518
1519 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1520}
1521
1522static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1523{
1524 switch (ret) {
1525 case -EIOCBQUEUED:
1526 break;
1527 case -ERESTARTSYS:
1528 case -ERESTARTNOINTR:
1529 case -ERESTARTNOHAND:
1530 case -ERESTART_RESTARTBLOCK:
1531 /*
1532 * There's no easy way to restart the syscall since other AIO's
1533 * may be already running. Just fail this IO with EINTR.
1534 */
1535 ret = -EINTR;
1536 fallthrough;
1537 default:
1538 req->ki_complete(req, ret);
1539 }
1540}
1541
1542static int aio_read(struct kiocb *req, const struct iocb *iocb,
1543 bool vectored, bool compat)
1544{
1545 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1546 struct iov_iter iter;
1547 struct file *file;
1548 int ret;
1549
1550 ret = aio_prep_rw(req, iocb);
1551 if (ret)
1552 return ret;
1553 file = req->ki_filp;
1554 if (unlikely(!(file->f_mode & FMODE_READ)))
1555 return -EBADF;
1556 if (unlikely(!file->f_op->read_iter))
1557 return -EINVAL;
1558
1559 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1560 if (ret < 0)
1561 return ret;
1562 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1563 if (!ret)
1564 aio_rw_done(req, call_read_iter(file, req, &iter));
1565 kfree(iovec);
1566 return ret;
1567}
1568
1569static int aio_write(struct kiocb *req, const struct iocb *iocb,
1570 bool vectored, bool compat)
1571{
1572 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1573 struct iov_iter iter;
1574 struct file *file;
1575 int ret;
1576
1577 ret = aio_prep_rw(req, iocb);
1578 if (ret)
1579 return ret;
1580 file = req->ki_filp;
1581
1582 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1583 return -EBADF;
1584 if (unlikely(!file->f_op->write_iter))
1585 return -EINVAL;
1586
1587 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1588 if (ret < 0)
1589 return ret;
1590 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1591 if (!ret) {
1592 /*
1593 * Open-code file_start_write here to grab freeze protection,
1594 * which will be released by another thread in
1595 * aio_complete_rw(). Fool lockdep by telling it the lock got
1596 * released so that it doesn't complain about the held lock when
1597 * we return to userspace.
1598 */
1599 if (S_ISREG(file_inode(file)->i_mode)) {
1600 sb_start_write(file_inode(file)->i_sb);
1601 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1602 }
1603 req->ki_flags |= IOCB_WRITE;
1604 aio_rw_done(req, call_write_iter(file, req, &iter));
1605 }
1606 kfree(iovec);
1607 return ret;
1608}
1609
1610static void aio_fsync_work(struct work_struct *work)
1611{
1612 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1613 const struct cred *old_cred = override_creds(iocb->fsync.creds);
1614
1615 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1616 revert_creds(old_cred);
1617 put_cred(iocb->fsync.creds);
1618 iocb_put(iocb);
1619}
1620
1621static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1622 bool datasync)
1623{
1624 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1625 iocb->aio_rw_flags))
1626 return -EINVAL;
1627
1628 if (unlikely(!req->file->f_op->fsync))
1629 return -EINVAL;
1630
1631 req->creds = prepare_creds();
1632 if (!req->creds)
1633 return -ENOMEM;
1634
1635 req->datasync = datasync;
1636 INIT_WORK(&req->work, aio_fsync_work);
1637 schedule_work(&req->work);
1638 return 0;
1639}
1640
1641static void aio_poll_put_work(struct work_struct *work)
1642{
1643 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1644 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1645
1646 iocb_put(iocb);
1647}
1648
1649/*
1650 * Safely lock the waitqueue which the request is on, synchronizing with the
1651 * case where the ->poll() provider decides to free its waitqueue early.
1652 *
1653 * Returns true on success, meaning that req->head->lock was locked, req->wait
1654 * is on req->head, and an RCU read lock was taken. Returns false if the
1655 * request was already removed from its waitqueue (which might no longer exist).
1656 */
1657static bool poll_iocb_lock_wq(struct poll_iocb *req)
1658{
1659 wait_queue_head_t *head;
1660
1661 /*
1662 * While we hold the waitqueue lock and the waitqueue is nonempty,
1663 * wake_up_pollfree() will wait for us. However, taking the waitqueue
1664 * lock in the first place can race with the waitqueue being freed.
1665 *
1666 * We solve this as eventpoll does: by taking advantage of the fact that
1667 * all users of wake_up_pollfree() will RCU-delay the actual free. If
1668 * we enter rcu_read_lock() and see that the pointer to the queue is
1669 * non-NULL, we can then lock it without the memory being freed out from
1670 * under us, then check whether the request is still on the queue.
1671 *
1672 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1673 * case the caller deletes the entry from the queue, leaving it empty.
1674 * In that case, only RCU prevents the queue memory from being freed.
1675 */
1676 rcu_read_lock();
1677 head = smp_load_acquire(&req->head);
1678 if (head) {
1679 spin_lock(&head->lock);
1680 if (!list_empty(&req->wait.entry))
1681 return true;
1682 spin_unlock(&head->lock);
1683 }
1684 rcu_read_unlock();
1685 return false;
1686}
1687
1688static void poll_iocb_unlock_wq(struct poll_iocb *req)
1689{
1690 spin_unlock(&req->head->lock);
1691 rcu_read_unlock();
1692}
1693
1694static void aio_poll_complete_work(struct work_struct *work)
1695{
1696 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1697 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1698 struct poll_table_struct pt = { ._key = req->events };
1699 struct kioctx *ctx = iocb->ki_ctx;
1700 __poll_t mask = 0;
1701
1702 if (!READ_ONCE(req->cancelled))
1703 mask = vfs_poll(req->file, &pt) & req->events;
1704
1705 /*
1706 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1707 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1708 * synchronize with them. In the cancellation case the list_del_init
1709 * itself is not actually needed, but harmless so we keep it in to
1710 * avoid further branches in the fast path.
1711 */
1712 spin_lock_irq(&ctx->ctx_lock);
1713 if (poll_iocb_lock_wq(req)) {
1714 if (!mask && !READ_ONCE(req->cancelled)) {
1715 /*
1716 * The request isn't actually ready to be completed yet.
1717 * Reschedule completion if another wakeup came in.
1718 */
1719 if (req->work_need_resched) {
1720 schedule_work(&req->work);
1721 req->work_need_resched = false;
1722 } else {
1723 req->work_scheduled = false;
1724 }
1725 poll_iocb_unlock_wq(req);
1726 spin_unlock_irq(&ctx->ctx_lock);
1727 return;
1728 }
1729 list_del_init(&req->wait.entry);
1730 poll_iocb_unlock_wq(req);
1731 } /* else, POLLFREE has freed the waitqueue, so we must complete */
1732 list_del_init(&iocb->ki_list);
1733 iocb->ki_res.res = mangle_poll(mask);
1734 spin_unlock_irq(&ctx->ctx_lock);
1735
1736 iocb_put(iocb);
1737}
1738
1739/* assumes we are called with irqs disabled */
1740static int aio_poll_cancel(struct kiocb *iocb)
1741{
1742 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1743 struct poll_iocb *req = &aiocb->poll;
1744
1745 if (poll_iocb_lock_wq(req)) {
1746 WRITE_ONCE(req->cancelled, true);
1747 if (!req->work_scheduled) {
1748 schedule_work(&aiocb->poll.work);
1749 req->work_scheduled = true;
1750 }
1751 poll_iocb_unlock_wq(req);
1752 } /* else, the request was force-cancelled by POLLFREE already */
1753
1754 return 0;
1755}
1756
1757static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1758 void *key)
1759{
1760 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1761 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1762 __poll_t mask = key_to_poll(key);
1763 unsigned long flags;
1764
1765 /* for instances that support it check for an event match first: */
1766 if (mask && !(mask & req->events))
1767 return 0;
1768
1769 /*
1770 * Complete the request inline if possible. This requires that three
1771 * conditions be met:
1772 * 1. An event mask must have been passed. If a plain wakeup was done
1773 * instead, then mask == 0 and we have to call vfs_poll() to get
1774 * the events, so inline completion isn't possible.
1775 * 2. The completion work must not have already been scheduled.
1776 * 3. ctx_lock must not be busy. We have to use trylock because we
1777 * already hold the waitqueue lock, so this inverts the normal
1778 * locking order. Use irqsave/irqrestore because not all
1779 * filesystems (e.g. fuse) call this function with IRQs disabled,
1780 * yet IRQs have to be disabled before ctx_lock is obtained.
1781 */
1782 if (mask && !req->work_scheduled &&
1783 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1784 struct kioctx *ctx = iocb->ki_ctx;
1785
1786 list_del_init(&req->wait.entry);
1787 list_del(&iocb->ki_list);
1788 iocb->ki_res.res = mangle_poll(mask);
1789 if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1790 iocb = NULL;
1791 INIT_WORK(&req->work, aio_poll_put_work);
1792 schedule_work(&req->work);
1793 }
1794 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1795 if (iocb)
1796 iocb_put(iocb);
1797 } else {
1798 /*
1799 * Schedule the completion work if needed. If it was already
1800 * scheduled, record that another wakeup came in.
1801 *
1802 * Don't remove the request from the waitqueue here, as it might
1803 * not actually be complete yet (we won't know until vfs_poll()
1804 * is called), and we must not miss any wakeups. POLLFREE is an
1805 * exception to this; see below.
1806 */
1807 if (req->work_scheduled) {
1808 req->work_need_resched = true;
1809 } else {
1810 schedule_work(&req->work);
1811 req->work_scheduled = true;
1812 }
1813
1814 /*
1815 * If the waitqueue is being freed early but we can't complete
1816 * the request inline, we have to tear down the request as best
1817 * we can. That means immediately removing the request from its
1818 * waitqueue and preventing all further accesses to the
1819 * waitqueue via the request. We also need to schedule the
1820 * completion work (done above). Also mark the request as
1821 * cancelled, to potentially skip an unneeded call to ->poll().
1822 */
1823 if (mask & POLLFREE) {
1824 WRITE_ONCE(req->cancelled, true);
1825 list_del_init(&req->wait.entry);
1826
1827 /*
1828 * Careful: this *must* be the last step, since as soon
1829 * as req->head is NULL'ed out, the request can be
1830 * completed and freed, since aio_poll_complete_work()
1831 * will no longer need to take the waitqueue lock.
1832 */
1833 smp_store_release(&req->head, NULL);
1834 }
1835 }
1836 return 1;
1837}
1838
1839struct aio_poll_table {
1840 struct poll_table_struct pt;
1841 struct aio_kiocb *iocb;
1842 bool queued;
1843 int error;
1844};
1845
1846static void
1847aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1848 struct poll_table_struct *p)
1849{
1850 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1851
1852 /* multiple wait queues per file are not supported */
1853 if (unlikely(pt->queued)) {
1854 pt->error = -EINVAL;
1855 return;
1856 }
1857
1858 pt->queued = true;
1859 pt->error = 0;
1860 pt->iocb->poll.head = head;
1861 add_wait_queue(head, &pt->iocb->poll.wait);
1862}
1863
1864static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1865{
1866 struct kioctx *ctx = aiocb->ki_ctx;
1867 struct poll_iocb *req = &aiocb->poll;
1868 struct aio_poll_table apt;
1869 bool cancel = false;
1870 __poll_t mask;
1871
1872 /* reject any unknown events outside the normal event mask. */
1873 if ((u16)iocb->aio_buf != iocb->aio_buf)
1874 return -EINVAL;
1875 /* reject fields that are not defined for poll */
1876 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1877 return -EINVAL;
1878
1879 INIT_WORK(&req->work, aio_poll_complete_work);
1880 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1881
1882 req->head = NULL;
1883 req->cancelled = false;
1884 req->work_scheduled = false;
1885 req->work_need_resched = false;
1886
1887 apt.pt._qproc = aio_poll_queue_proc;
1888 apt.pt._key = req->events;
1889 apt.iocb = aiocb;
1890 apt.queued = false;
1891 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1892
1893 /* initialized the list so that we can do list_empty checks */
1894 INIT_LIST_HEAD(&req->wait.entry);
1895 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1896
1897 mask = vfs_poll(req->file, &apt.pt) & req->events;
1898 spin_lock_irq(&ctx->ctx_lock);
1899 if (likely(apt.queued)) {
1900 bool on_queue = poll_iocb_lock_wq(req);
1901
1902 if (!on_queue || req->work_scheduled) {
1903 /*
1904 * aio_poll_wake() already either scheduled the async
1905 * completion work, or completed the request inline.
1906 */
1907 if (apt.error) /* unsupported case: multiple queues */
1908 cancel = true;
1909 apt.error = 0;
1910 mask = 0;
1911 }
1912 if (mask || apt.error) {
1913 /* Steal to complete synchronously. */
1914 list_del_init(&req->wait.entry);
1915 } else if (cancel) {
1916 /* Cancel if possible (may be too late though). */
1917 WRITE_ONCE(req->cancelled, true);
1918 } else if (on_queue) {
1919 /*
1920 * Actually waiting for an event, so add the request to
1921 * active_reqs so that it can be cancelled if needed.
1922 */
1923 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1924 aiocb->ki_cancel = aio_poll_cancel;
1925 }
1926 if (on_queue)
1927 poll_iocb_unlock_wq(req);
1928 }
1929 if (mask) { /* no async, we'd stolen it */
1930 aiocb->ki_res.res = mangle_poll(mask);
1931 apt.error = 0;
1932 }
1933 spin_unlock_irq(&ctx->ctx_lock);
1934 if (mask)
1935 iocb_put(aiocb);
1936 return apt.error;
1937}
1938
1939static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1940 struct iocb __user *user_iocb, struct aio_kiocb *req,
1941 bool compat)
1942{
1943 req->ki_filp = fget(iocb->aio_fildes);
1944 if (unlikely(!req->ki_filp))
1945 return -EBADF;
1946
1947 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1948 struct eventfd_ctx *eventfd;
1949 /*
1950 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1951 * instance of the file* now. The file descriptor must be
1952 * an eventfd() fd, and will be signaled for each completed
1953 * event using the eventfd_signal() function.
1954 */
1955 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1956 if (IS_ERR(eventfd))
1957 return PTR_ERR(eventfd);
1958
1959 req->ki_eventfd = eventfd;
1960 }
1961
1962 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1963 pr_debug("EFAULT: aio_key\n");
1964 return -EFAULT;
1965 }
1966
1967 req->ki_res.obj = (u64)(unsigned long)user_iocb;
1968 req->ki_res.data = iocb->aio_data;
1969 req->ki_res.res = 0;
1970 req->ki_res.res2 = 0;
1971
1972 switch (iocb->aio_lio_opcode) {
1973 case IOCB_CMD_PREAD:
1974 return aio_read(&req->rw, iocb, false, compat);
1975 case IOCB_CMD_PWRITE:
1976 return aio_write(&req->rw, iocb, false, compat);
1977 case IOCB_CMD_PREADV:
1978 return aio_read(&req->rw, iocb, true, compat);
1979 case IOCB_CMD_PWRITEV:
1980 return aio_write(&req->rw, iocb, true, compat);
1981 case IOCB_CMD_FSYNC:
1982 return aio_fsync(&req->fsync, iocb, false);
1983 case IOCB_CMD_FDSYNC:
1984 return aio_fsync(&req->fsync, iocb, true);
1985 case IOCB_CMD_POLL:
1986 return aio_poll(req, iocb);
1987 default:
1988 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1989 return -EINVAL;
1990 }
1991}
1992
1993static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1994 bool compat)
1995{
1996 struct aio_kiocb *req;
1997 struct iocb iocb;
1998 int err;
1999
2000 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2001 return -EFAULT;
2002
2003 /* enforce forwards compatibility on users */
2004 if (unlikely(iocb.aio_reserved2)) {
2005 pr_debug("EINVAL: reserve field set\n");
2006 return -EINVAL;
2007 }
2008
2009 /* prevent overflows */
2010 if (unlikely(
2011 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2012 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2013 ((ssize_t)iocb.aio_nbytes < 0)
2014 )) {
2015 pr_debug("EINVAL: overflow check\n");
2016 return -EINVAL;
2017 }
2018
2019 req = aio_get_req(ctx);
2020 if (unlikely(!req))
2021 return -EAGAIN;
2022
2023 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2024
2025 /* Done with the synchronous reference */
2026 iocb_put(req);
2027
2028 /*
2029 * If err is 0, we'd either done aio_complete() ourselves or have
2030 * arranged for that to be done asynchronously. Anything non-zero
2031 * means that we need to destroy req ourselves.
2032 */
2033 if (unlikely(err)) {
2034 iocb_destroy(req);
2035 put_reqs_available(ctx, 1);
2036 }
2037 return err;
2038}
2039
2040/* sys_io_submit:
2041 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
2042 * the number of iocbs queued. May return -EINVAL if the aio_context
2043 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2044 * *iocbpp[0] is not properly initialized, if the operation specified
2045 * is invalid for the file descriptor in the iocb. May fail with
2046 * -EFAULT if any of the data structures point to invalid data. May
2047 * fail with -EBADF if the file descriptor specified in the first
2048 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2049 * are available to queue any iocbs. Will return 0 if nr is 0. Will
2050 * fail with -ENOSYS if not implemented.
2051 */
2052SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2053 struct iocb __user * __user *, iocbpp)
2054{
2055 struct kioctx *ctx;
2056 long ret = 0;
2057 int i = 0;
2058 struct blk_plug plug;
2059
2060 if (unlikely(nr < 0))
2061 return -EINVAL;
2062
2063 ctx = lookup_ioctx(ctx_id);
2064 if (unlikely(!ctx)) {
2065 pr_debug("EINVAL: invalid context id\n");
2066 return -EINVAL;
2067 }
2068
2069 if (nr > ctx->nr_events)
2070 nr = ctx->nr_events;
2071
2072 if (nr > AIO_PLUG_THRESHOLD)
2073 blk_start_plug(&plug);
2074 for (i = 0; i < nr; i++) {
2075 struct iocb __user *user_iocb;
2076
2077 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2078 ret = -EFAULT;
2079 break;
2080 }
2081
2082 ret = io_submit_one(ctx, user_iocb, false);
2083 if (ret)
2084 break;
2085 }
2086 if (nr > AIO_PLUG_THRESHOLD)
2087 blk_finish_plug(&plug);
2088
2089 percpu_ref_put(&ctx->users);
2090 return i ? i : ret;
2091}
2092
2093#ifdef CONFIG_COMPAT
2094COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2095 int, nr, compat_uptr_t __user *, iocbpp)
2096{
2097 struct kioctx *ctx;
2098 long ret = 0;
2099 int i = 0;
2100 struct blk_plug plug;
2101
2102 if (unlikely(nr < 0))
2103 return -EINVAL;
2104
2105 ctx = lookup_ioctx(ctx_id);
2106 if (unlikely(!ctx)) {
2107 pr_debug("EINVAL: invalid context id\n");
2108 return -EINVAL;
2109 }
2110
2111 if (nr > ctx->nr_events)
2112 nr = ctx->nr_events;
2113
2114 if (nr > AIO_PLUG_THRESHOLD)
2115 blk_start_plug(&plug);
2116 for (i = 0; i < nr; i++) {
2117 compat_uptr_t user_iocb;
2118
2119 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2120 ret = -EFAULT;
2121 break;
2122 }
2123
2124 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2125 if (ret)
2126 break;
2127 }
2128 if (nr > AIO_PLUG_THRESHOLD)
2129 blk_finish_plug(&plug);
2130
2131 percpu_ref_put(&ctx->users);
2132 return i ? i : ret;
2133}
2134#endif
2135
2136/* sys_io_cancel:
2137 * Attempts to cancel an iocb previously passed to io_submit. If
2138 * the operation is successfully cancelled, the resulting event is
2139 * copied into the memory pointed to by result without being placed
2140 * into the completion queue and 0 is returned. May fail with
2141 * -EFAULT if any of the data structures pointed to are invalid.
2142 * May fail with -EINVAL if aio_context specified by ctx_id is
2143 * invalid. May fail with -EAGAIN if the iocb specified was not
2144 * cancelled. Will fail with -ENOSYS if not implemented.
2145 */
2146SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2147 struct io_event __user *, result)
2148{
2149 struct kioctx *ctx;
2150 struct aio_kiocb *kiocb;
2151 int ret = -EINVAL;
2152 u32 key;
2153 u64 obj = (u64)(unsigned long)iocb;
2154
2155 if (unlikely(get_user(key, &iocb->aio_key)))
2156 return -EFAULT;
2157 if (unlikely(key != KIOCB_KEY))
2158 return -EINVAL;
2159
2160 ctx = lookup_ioctx(ctx_id);
2161 if (unlikely(!ctx))
2162 return -EINVAL;
2163
2164 spin_lock_irq(&ctx->ctx_lock);
2165 /* TODO: use a hash or array, this sucks. */
2166 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2167 if (kiocb->ki_res.obj == obj) {
2168 ret = kiocb->ki_cancel(&kiocb->rw);
2169 list_del_init(&kiocb->ki_list);
2170 break;
2171 }
2172 }
2173 spin_unlock_irq(&ctx->ctx_lock);
2174
2175 if (!ret) {
2176 /*
2177 * The result argument is no longer used - the io_event is
2178 * always delivered via the ring buffer. -EINPROGRESS indicates
2179 * cancellation is progress:
2180 */
2181 ret = -EINPROGRESS;
2182 }
2183
2184 percpu_ref_put(&ctx->users);
2185
2186 return ret;
2187}
2188
2189static long do_io_getevents(aio_context_t ctx_id,
2190 long min_nr,
2191 long nr,
2192 struct io_event __user *events,
2193 struct timespec64 *ts)
2194{
2195 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2196 struct kioctx *ioctx = lookup_ioctx(ctx_id);
2197 long ret = -EINVAL;
2198
2199 if (likely(ioctx)) {
2200 if (likely(min_nr <= nr && min_nr >= 0))
2201 ret = read_events(ioctx, min_nr, nr, events, until);
2202 percpu_ref_put(&ioctx->users);
2203 }
2204
2205 return ret;
2206}
2207
2208/* io_getevents:
2209 * Attempts to read at least min_nr events and up to nr events from
2210 * the completion queue for the aio_context specified by ctx_id. If
2211 * it succeeds, the number of read events is returned. May fail with
2212 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2213 * out of range, if timeout is out of range. May fail with -EFAULT
2214 * if any of the memory specified is invalid. May return 0 or
2215 * < min_nr if the timeout specified by timeout has elapsed
2216 * before sufficient events are available, where timeout == NULL
2217 * specifies an infinite timeout. Note that the timeout pointed to by
2218 * timeout is relative. Will fail with -ENOSYS if not implemented.
2219 */
2220#ifdef CONFIG_64BIT
2221
2222SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2223 long, min_nr,
2224 long, nr,
2225 struct io_event __user *, events,
2226 struct __kernel_timespec __user *, timeout)
2227{
2228 struct timespec64 ts;
2229 int ret;
2230
2231 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2232 return -EFAULT;
2233
2234 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2235 if (!ret && signal_pending(current))
2236 ret = -EINTR;
2237 return ret;
2238}
2239
2240#endif
2241
2242struct __aio_sigset {
2243 const sigset_t __user *sigmask;
2244 size_t sigsetsize;
2245};
2246
2247SYSCALL_DEFINE6(io_pgetevents,
2248 aio_context_t, ctx_id,
2249 long, min_nr,
2250 long, nr,
2251 struct io_event __user *, events,
2252 struct __kernel_timespec __user *, timeout,
2253 const struct __aio_sigset __user *, usig)
2254{
2255 struct __aio_sigset ksig = { NULL, };
2256 struct timespec64 ts;
2257 bool interrupted;
2258 int ret;
2259
2260 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2261 return -EFAULT;
2262
2263 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2264 return -EFAULT;
2265
2266 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2267 if (ret)
2268 return ret;
2269
2270 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2271
2272 interrupted = signal_pending(current);
2273 restore_saved_sigmask_unless(interrupted);
2274 if (interrupted && !ret)
2275 ret = -ERESTARTNOHAND;
2276
2277 return ret;
2278}
2279
2280#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2281
2282SYSCALL_DEFINE6(io_pgetevents_time32,
2283 aio_context_t, ctx_id,
2284 long, min_nr,
2285 long, nr,
2286 struct io_event __user *, events,
2287 struct old_timespec32 __user *, timeout,
2288 const struct __aio_sigset __user *, usig)
2289{
2290 struct __aio_sigset ksig = { NULL, };
2291 struct timespec64 ts;
2292 bool interrupted;
2293 int ret;
2294
2295 if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2296 return -EFAULT;
2297
2298 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2299 return -EFAULT;
2300
2301
2302 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2303 if (ret)
2304 return ret;
2305
2306 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2307
2308 interrupted = signal_pending(current);
2309 restore_saved_sigmask_unless(interrupted);
2310 if (interrupted && !ret)
2311 ret = -ERESTARTNOHAND;
2312
2313 return ret;
2314}
2315
2316#endif
2317
2318#if defined(CONFIG_COMPAT_32BIT_TIME)
2319
2320SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2321 __s32, min_nr,
2322 __s32, nr,
2323 struct io_event __user *, events,
2324 struct old_timespec32 __user *, timeout)
2325{
2326 struct timespec64 t;
2327 int ret;
2328
2329 if (timeout && get_old_timespec32(&t, timeout))
2330 return -EFAULT;
2331
2332 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2333 if (!ret && signal_pending(current))
2334 ret = -EINTR;
2335 return ret;
2336}
2337
2338#endif
2339
2340#ifdef CONFIG_COMPAT
2341
2342struct __compat_aio_sigset {
2343 compat_uptr_t sigmask;
2344 compat_size_t sigsetsize;
2345};
2346
2347#if defined(CONFIG_COMPAT_32BIT_TIME)
2348
2349COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2350 compat_aio_context_t, ctx_id,
2351 compat_long_t, min_nr,
2352 compat_long_t, nr,
2353 struct io_event __user *, events,
2354 struct old_timespec32 __user *, timeout,
2355 const struct __compat_aio_sigset __user *, usig)
2356{
2357 struct __compat_aio_sigset ksig = { 0, };
2358 struct timespec64 t;
2359 bool interrupted;
2360 int ret;
2361
2362 if (timeout && get_old_timespec32(&t, timeout))
2363 return -EFAULT;
2364
2365 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2366 return -EFAULT;
2367
2368 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2369 if (ret)
2370 return ret;
2371
2372 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2373
2374 interrupted = signal_pending(current);
2375 restore_saved_sigmask_unless(interrupted);
2376 if (interrupted && !ret)
2377 ret = -ERESTARTNOHAND;
2378
2379 return ret;
2380}
2381
2382#endif
2383
2384COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2385 compat_aio_context_t, ctx_id,
2386 compat_long_t, min_nr,
2387 compat_long_t, nr,
2388 struct io_event __user *, events,
2389 struct __kernel_timespec __user *, timeout,
2390 const struct __compat_aio_sigset __user *, usig)
2391{
2392 struct __compat_aio_sigset ksig = { 0, };
2393 struct timespec64 t;
2394 bool interrupted;
2395 int ret;
2396
2397 if (timeout && get_timespec64(&t, timeout))
2398 return -EFAULT;
2399
2400 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2401 return -EFAULT;
2402
2403 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2404 if (ret)
2405 return ret;
2406
2407 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2408
2409 interrupted = signal_pending(current);
2410 restore_saved_sigmask_unless(interrupted);
2411 if (interrupted && !ret)
2412 ret = -ERESTARTNOHAND;
2413
2414 return ret;
2415}
2416#endif
1/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
9 *
10 * See ../COPYING for licensing terms.
11 */
12#define pr_fmt(fmt) "%s: " fmt, __func__
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/aio_abi.h>
19#include <linux/export.h>
20#include <linux/syscalls.h>
21#include <linux/backing-dev.h>
22#include <linux/refcount.h>
23#include <linux/uio.h>
24
25#include <linux/sched/signal.h>
26#include <linux/fs.h>
27#include <linux/file.h>
28#include <linux/mm.h>
29#include <linux/mman.h>
30#include <linux/percpu.h>
31#include <linux/slab.h>
32#include <linux/timer.h>
33#include <linux/aio.h>
34#include <linux/highmem.h>
35#include <linux/workqueue.h>
36#include <linux/security.h>
37#include <linux/eventfd.h>
38#include <linux/blkdev.h>
39#include <linux/compat.h>
40#include <linux/migrate.h>
41#include <linux/ramfs.h>
42#include <linux/percpu-refcount.h>
43#include <linux/mount.h>
44#include <linux/pseudo_fs.h>
45
46#include <linux/uaccess.h>
47#include <linux/nospec.h>
48
49#include "internal.h"
50
51#define KIOCB_KEY 0
52
53#define AIO_RING_MAGIC 0xa10a10a1
54#define AIO_RING_COMPAT_FEATURES 1
55#define AIO_RING_INCOMPAT_FEATURES 0
56struct aio_ring {
57 unsigned id; /* kernel internal index number */
58 unsigned nr; /* number of io_events */
59 unsigned head; /* Written to by userland or under ring_lock
60 * mutex by aio_read_events_ring(). */
61 unsigned tail;
62
63 unsigned magic;
64 unsigned compat_features;
65 unsigned incompat_features;
66 unsigned header_length; /* size of aio_ring */
67
68
69 struct io_event io_events[];
70}; /* 128 bytes + ring size */
71
72/*
73 * Plugging is meant to work with larger batches of IOs. If we don't
74 * have more than the below, then don't bother setting up a plug.
75 */
76#define AIO_PLUG_THRESHOLD 2
77
78#define AIO_RING_PAGES 8
79
80struct kioctx_table {
81 struct rcu_head rcu;
82 unsigned nr;
83 struct kioctx __rcu *table[] __counted_by(nr);
84};
85
86struct kioctx_cpu {
87 unsigned reqs_available;
88};
89
90struct ctx_rq_wait {
91 struct completion comp;
92 atomic_t count;
93};
94
95struct kioctx {
96 struct percpu_ref users;
97 atomic_t dead;
98
99 struct percpu_ref reqs;
100
101 unsigned long user_id;
102
103 struct __percpu kioctx_cpu *cpu;
104
105 /*
106 * For percpu reqs_available, number of slots we move to/from global
107 * counter at a time:
108 */
109 unsigned req_batch;
110 /*
111 * This is what userspace passed to io_setup(), it's not used for
112 * anything but counting against the global max_reqs quota.
113 *
114 * The real limit is nr_events - 1, which will be larger (see
115 * aio_setup_ring())
116 */
117 unsigned max_reqs;
118
119 /* Size of ringbuffer, in units of struct io_event */
120 unsigned nr_events;
121
122 unsigned long mmap_base;
123 unsigned long mmap_size;
124
125 struct page **ring_pages;
126 long nr_pages;
127
128 struct rcu_work free_rwork; /* see free_ioctx() */
129
130 /*
131 * signals when all in-flight requests are done
132 */
133 struct ctx_rq_wait *rq_wait;
134
135 struct {
136 /*
137 * This counts the number of available slots in the ringbuffer,
138 * so we avoid overflowing it: it's decremented (if positive)
139 * when allocating a kiocb and incremented when the resulting
140 * io_event is pulled off the ringbuffer.
141 *
142 * We batch accesses to it with a percpu version.
143 */
144 atomic_t reqs_available;
145 } ____cacheline_aligned_in_smp;
146
147 struct {
148 spinlock_t ctx_lock;
149 struct list_head active_reqs; /* used for cancellation */
150 } ____cacheline_aligned_in_smp;
151
152 struct {
153 struct mutex ring_lock;
154 wait_queue_head_t wait;
155 } ____cacheline_aligned_in_smp;
156
157 struct {
158 unsigned tail;
159 unsigned completed_events;
160 spinlock_t completion_lock;
161 } ____cacheline_aligned_in_smp;
162
163 struct page *internal_pages[AIO_RING_PAGES];
164 struct file *aio_ring_file;
165
166 unsigned id;
167};
168
169/*
170 * First field must be the file pointer in all the
171 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172 */
173struct fsync_iocb {
174 struct file *file;
175 struct work_struct work;
176 bool datasync;
177 struct cred *creds;
178};
179
180struct poll_iocb {
181 struct file *file;
182 struct wait_queue_head *head;
183 __poll_t events;
184 bool cancelled;
185 bool work_scheduled;
186 bool work_need_resched;
187 struct wait_queue_entry wait;
188 struct work_struct work;
189};
190
191/*
192 * NOTE! Each of the iocb union members has the file pointer
193 * as the first entry in their struct definition. So you can
194 * access the file pointer through any of the sub-structs,
195 * or directly as just 'ki_filp' in this struct.
196 */
197struct aio_kiocb {
198 union {
199 struct file *ki_filp;
200 struct kiocb rw;
201 struct fsync_iocb fsync;
202 struct poll_iocb poll;
203 };
204
205 struct kioctx *ki_ctx;
206 kiocb_cancel_fn *ki_cancel;
207
208 struct io_event ki_res;
209
210 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */
212 refcount_t ki_refcnt;
213
214 /*
215 * If the aio_resfd field of the userspace iocb is not zero,
216 * this is the underlying eventfd context to deliver events to.
217 */
218 struct eventfd_ctx *ki_eventfd;
219};
220
221/*------ sysctl variables----*/
222static DEFINE_SPINLOCK(aio_nr_lock);
223static unsigned long aio_nr; /* current system wide number of aio requests */
224static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225/*----end sysctl variables---*/
226#ifdef CONFIG_SYSCTL
227static struct ctl_table aio_sysctls[] = {
228 {
229 .procname = "aio-nr",
230 .data = &aio_nr,
231 .maxlen = sizeof(aio_nr),
232 .mode = 0444,
233 .proc_handler = proc_doulongvec_minmax,
234 },
235 {
236 .procname = "aio-max-nr",
237 .data = &aio_max_nr,
238 .maxlen = sizeof(aio_max_nr),
239 .mode = 0644,
240 .proc_handler = proc_doulongvec_minmax,
241 },
242};
243
244static void __init aio_sysctl_init(void)
245{
246 register_sysctl_init("fs", aio_sysctls);
247}
248#else
249#define aio_sysctl_init() do { } while (0)
250#endif
251
252static struct kmem_cache *kiocb_cachep;
253static struct kmem_cache *kioctx_cachep;
254
255static struct vfsmount *aio_mnt;
256
257static const struct file_operations aio_ring_fops;
258static const struct address_space_operations aio_ctx_aops;
259
260static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
261{
262 struct file *file;
263 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
264 if (IS_ERR(inode))
265 return ERR_CAST(inode);
266
267 inode->i_mapping->a_ops = &aio_ctx_aops;
268 inode->i_mapping->i_private_data = ctx;
269 inode->i_size = PAGE_SIZE * nr_pages;
270
271 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
272 O_RDWR, &aio_ring_fops);
273 if (IS_ERR(file))
274 iput(inode);
275 return file;
276}
277
278static int aio_init_fs_context(struct fs_context *fc)
279{
280 if (!init_pseudo(fc, AIO_RING_MAGIC))
281 return -ENOMEM;
282 fc->s_iflags |= SB_I_NOEXEC;
283 return 0;
284}
285
286/* aio_setup
287 * Creates the slab caches used by the aio routines, panic on
288 * failure as this is done early during the boot sequence.
289 */
290static int __init aio_setup(void)
291{
292 static struct file_system_type aio_fs = {
293 .name = "aio",
294 .init_fs_context = aio_init_fs_context,
295 .kill_sb = kill_anon_super,
296 };
297 aio_mnt = kern_mount(&aio_fs);
298 if (IS_ERR(aio_mnt))
299 panic("Failed to create aio fs mount.");
300
301 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303 aio_sysctl_init();
304 return 0;
305}
306__initcall(aio_setup);
307
308static void put_aio_ring_file(struct kioctx *ctx)
309{
310 struct file *aio_ring_file = ctx->aio_ring_file;
311 struct address_space *i_mapping;
312
313 if (aio_ring_file) {
314 truncate_setsize(file_inode(aio_ring_file), 0);
315
316 /* Prevent further access to the kioctx from migratepages */
317 i_mapping = aio_ring_file->f_mapping;
318 spin_lock(&i_mapping->i_private_lock);
319 i_mapping->i_private_data = NULL;
320 ctx->aio_ring_file = NULL;
321 spin_unlock(&i_mapping->i_private_lock);
322
323 fput(aio_ring_file);
324 }
325}
326
327static void aio_free_ring(struct kioctx *ctx)
328{
329 int i;
330
331 /* Disconnect the kiotx from the ring file. This prevents future
332 * accesses to the kioctx from page migration.
333 */
334 put_aio_ring_file(ctx);
335
336 for (i = 0; i < ctx->nr_pages; i++) {
337 struct page *page;
338 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
339 page_count(ctx->ring_pages[i]));
340 page = ctx->ring_pages[i];
341 if (!page)
342 continue;
343 ctx->ring_pages[i] = NULL;
344 put_page(page);
345 }
346
347 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
348 kfree(ctx->ring_pages);
349 ctx->ring_pages = NULL;
350 }
351}
352
353static int aio_ring_mremap(struct vm_area_struct *vma)
354{
355 struct file *file = vma->vm_file;
356 struct mm_struct *mm = vma->vm_mm;
357 struct kioctx_table *table;
358 int i, res = -EINVAL;
359
360 spin_lock(&mm->ioctx_lock);
361 rcu_read_lock();
362 table = rcu_dereference(mm->ioctx_table);
363 if (!table)
364 goto out_unlock;
365
366 for (i = 0; i < table->nr; i++) {
367 struct kioctx *ctx;
368
369 ctx = rcu_dereference(table->table[i]);
370 if (ctx && ctx->aio_ring_file == file) {
371 if (!atomic_read(&ctx->dead)) {
372 ctx->user_id = ctx->mmap_base = vma->vm_start;
373 res = 0;
374 }
375 break;
376 }
377 }
378
379out_unlock:
380 rcu_read_unlock();
381 spin_unlock(&mm->ioctx_lock);
382 return res;
383}
384
385static const struct vm_operations_struct aio_ring_vm_ops = {
386 .mremap = aio_ring_mremap,
387#if IS_ENABLED(CONFIG_MMU)
388 .fault = filemap_fault,
389 .map_pages = filemap_map_pages,
390 .page_mkwrite = filemap_page_mkwrite,
391#endif
392};
393
394static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
395{
396 vm_flags_set(vma, VM_DONTEXPAND);
397 vma->vm_ops = &aio_ring_vm_ops;
398 return 0;
399}
400
401static const struct file_operations aio_ring_fops = {
402 .mmap = aio_ring_mmap,
403};
404
405#if IS_ENABLED(CONFIG_MIGRATION)
406static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
407 struct folio *src, enum migrate_mode mode)
408{
409 struct kioctx *ctx;
410 unsigned long flags;
411 pgoff_t idx;
412 int rc;
413
414 /*
415 * We cannot support the _NO_COPY case here, because copy needs to
416 * happen under the ctx->completion_lock. That does not work with the
417 * migration workflow of MIGRATE_SYNC_NO_COPY.
418 */
419 if (mode == MIGRATE_SYNC_NO_COPY)
420 return -EINVAL;
421
422 rc = 0;
423
424 /* mapping->i_private_lock here protects against the kioctx teardown. */
425 spin_lock(&mapping->i_private_lock);
426 ctx = mapping->i_private_data;
427 if (!ctx) {
428 rc = -EINVAL;
429 goto out;
430 }
431
432 /* The ring_lock mutex. The prevents aio_read_events() from writing
433 * to the ring's head, and prevents page migration from mucking in
434 * a partially initialized kiotx.
435 */
436 if (!mutex_trylock(&ctx->ring_lock)) {
437 rc = -EAGAIN;
438 goto out;
439 }
440
441 idx = src->index;
442 if (idx < (pgoff_t)ctx->nr_pages) {
443 /* Make sure the old folio hasn't already been changed */
444 if (ctx->ring_pages[idx] != &src->page)
445 rc = -EAGAIN;
446 } else
447 rc = -EINVAL;
448
449 if (rc != 0)
450 goto out_unlock;
451
452 /* Writeback must be complete */
453 BUG_ON(folio_test_writeback(src));
454 folio_get(dst);
455
456 rc = folio_migrate_mapping(mapping, dst, src, 1);
457 if (rc != MIGRATEPAGE_SUCCESS) {
458 folio_put(dst);
459 goto out_unlock;
460 }
461
462 /* Take completion_lock to prevent other writes to the ring buffer
463 * while the old folio is copied to the new. This prevents new
464 * events from being lost.
465 */
466 spin_lock_irqsave(&ctx->completion_lock, flags);
467 folio_migrate_copy(dst, src);
468 BUG_ON(ctx->ring_pages[idx] != &src->page);
469 ctx->ring_pages[idx] = &dst->page;
470 spin_unlock_irqrestore(&ctx->completion_lock, flags);
471
472 /* The old folio is no longer accessible. */
473 folio_put(src);
474
475out_unlock:
476 mutex_unlock(&ctx->ring_lock);
477out:
478 spin_unlock(&mapping->i_private_lock);
479 return rc;
480}
481#else
482#define aio_migrate_folio NULL
483#endif
484
485static const struct address_space_operations aio_ctx_aops = {
486 .dirty_folio = noop_dirty_folio,
487 .migrate_folio = aio_migrate_folio,
488};
489
490static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
491{
492 struct aio_ring *ring;
493 struct mm_struct *mm = current->mm;
494 unsigned long size, unused;
495 int nr_pages;
496 int i;
497 struct file *file;
498
499 /* Compensate for the ring buffer's head/tail overlap entry */
500 nr_events += 2; /* 1 is required, 2 for good luck */
501
502 size = sizeof(struct aio_ring);
503 size += sizeof(struct io_event) * nr_events;
504
505 nr_pages = PFN_UP(size);
506 if (nr_pages < 0)
507 return -EINVAL;
508
509 file = aio_private_file(ctx, nr_pages);
510 if (IS_ERR(file)) {
511 ctx->aio_ring_file = NULL;
512 return -ENOMEM;
513 }
514
515 ctx->aio_ring_file = file;
516 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
517 / sizeof(struct io_event);
518
519 ctx->ring_pages = ctx->internal_pages;
520 if (nr_pages > AIO_RING_PAGES) {
521 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
522 GFP_KERNEL);
523 if (!ctx->ring_pages) {
524 put_aio_ring_file(ctx);
525 return -ENOMEM;
526 }
527 }
528
529 for (i = 0; i < nr_pages; i++) {
530 struct page *page;
531 page = find_or_create_page(file->f_mapping,
532 i, GFP_USER | __GFP_ZERO);
533 if (!page)
534 break;
535 pr_debug("pid(%d) page[%d]->count=%d\n",
536 current->pid, i, page_count(page));
537 SetPageUptodate(page);
538 unlock_page(page);
539
540 ctx->ring_pages[i] = page;
541 }
542 ctx->nr_pages = i;
543
544 if (unlikely(i != nr_pages)) {
545 aio_free_ring(ctx);
546 return -ENOMEM;
547 }
548
549 ctx->mmap_size = nr_pages * PAGE_SIZE;
550 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
551
552 if (mmap_write_lock_killable(mm)) {
553 ctx->mmap_size = 0;
554 aio_free_ring(ctx);
555 return -EINTR;
556 }
557
558 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED, 0, 0, &unused, NULL);
561 mmap_write_unlock(mm);
562 if (IS_ERR((void *)ctx->mmap_base)) {
563 ctx->mmap_size = 0;
564 aio_free_ring(ctx);
565 return -ENOMEM;
566 }
567
568 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
569
570 ctx->user_id = ctx->mmap_base;
571 ctx->nr_events = nr_events; /* trusted copy */
572
573 ring = page_address(ctx->ring_pages[0]);
574 ring->nr = nr_events; /* user copy */
575 ring->id = ~0U;
576 ring->head = ring->tail = 0;
577 ring->magic = AIO_RING_MAGIC;
578 ring->compat_features = AIO_RING_COMPAT_FEATURES;
579 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
580 ring->header_length = sizeof(struct aio_ring);
581 flush_dcache_page(ctx->ring_pages[0]);
582
583 return 0;
584}
585
586#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
587#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
588#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
589
590void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
591{
592 struct aio_kiocb *req;
593 struct kioctx *ctx;
594 unsigned long flags;
595
596 /*
597 * kiocb didn't come from aio or is neither a read nor a write, hence
598 * ignore it.
599 */
600 if (!(iocb->ki_flags & IOCB_AIO_RW))
601 return;
602
603 req = container_of(iocb, struct aio_kiocb, rw);
604
605 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
606 return;
607
608 ctx = req->ki_ctx;
609
610 spin_lock_irqsave(&ctx->ctx_lock, flags);
611 list_add_tail(&req->ki_list, &ctx->active_reqs);
612 req->ki_cancel = cancel;
613 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
614}
615EXPORT_SYMBOL(kiocb_set_cancel_fn);
616
617/*
618 * free_ioctx() should be RCU delayed to synchronize against the RCU
619 * protected lookup_ioctx() and also needs process context to call
620 * aio_free_ring(). Use rcu_work.
621 */
622static void free_ioctx(struct work_struct *work)
623{
624 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
625 free_rwork);
626 pr_debug("freeing %p\n", ctx);
627
628 aio_free_ring(ctx);
629 free_percpu(ctx->cpu);
630 percpu_ref_exit(&ctx->reqs);
631 percpu_ref_exit(&ctx->users);
632 kmem_cache_free(kioctx_cachep, ctx);
633}
634
635static void free_ioctx_reqs(struct percpu_ref *ref)
636{
637 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
638
639 /* At this point we know that there are no any in-flight requests */
640 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
641 complete(&ctx->rq_wait->comp);
642
643 /* Synchronize against RCU protected table->table[] dereferences */
644 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
645 queue_rcu_work(system_wq, &ctx->free_rwork);
646}
647
648/*
649 * When this function runs, the kioctx has been removed from the "hash table"
650 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
651 * now it's safe to cancel any that need to be.
652 */
653static void free_ioctx_users(struct percpu_ref *ref)
654{
655 struct kioctx *ctx = container_of(ref, struct kioctx, users);
656 struct aio_kiocb *req;
657
658 spin_lock_irq(&ctx->ctx_lock);
659
660 while (!list_empty(&ctx->active_reqs)) {
661 req = list_first_entry(&ctx->active_reqs,
662 struct aio_kiocb, ki_list);
663 req->ki_cancel(&req->rw);
664 list_del_init(&req->ki_list);
665 }
666
667 spin_unlock_irq(&ctx->ctx_lock);
668
669 percpu_ref_kill(&ctx->reqs);
670 percpu_ref_put(&ctx->reqs);
671}
672
673static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
674{
675 unsigned i, new_nr;
676 struct kioctx_table *table, *old;
677 struct aio_ring *ring;
678
679 spin_lock(&mm->ioctx_lock);
680 table = rcu_dereference_raw(mm->ioctx_table);
681
682 while (1) {
683 if (table)
684 for (i = 0; i < table->nr; i++)
685 if (!rcu_access_pointer(table->table[i])) {
686 ctx->id = i;
687 rcu_assign_pointer(table->table[i], ctx);
688 spin_unlock(&mm->ioctx_lock);
689
690 /* While kioctx setup is in progress,
691 * we are protected from page migration
692 * changes ring_pages by ->ring_lock.
693 */
694 ring = page_address(ctx->ring_pages[0]);
695 ring->id = ctx->id;
696 return 0;
697 }
698
699 new_nr = (table ? table->nr : 1) * 4;
700 spin_unlock(&mm->ioctx_lock);
701
702 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
703 if (!table)
704 return -ENOMEM;
705
706 table->nr = new_nr;
707
708 spin_lock(&mm->ioctx_lock);
709 old = rcu_dereference_raw(mm->ioctx_table);
710
711 if (!old) {
712 rcu_assign_pointer(mm->ioctx_table, table);
713 } else if (table->nr > old->nr) {
714 memcpy(table->table, old->table,
715 old->nr * sizeof(struct kioctx *));
716
717 rcu_assign_pointer(mm->ioctx_table, table);
718 kfree_rcu(old, rcu);
719 } else {
720 kfree(table);
721 table = old;
722 }
723 }
724}
725
726static void aio_nr_sub(unsigned nr)
727{
728 spin_lock(&aio_nr_lock);
729 if (WARN_ON(aio_nr - nr > aio_nr))
730 aio_nr = 0;
731 else
732 aio_nr -= nr;
733 spin_unlock(&aio_nr_lock);
734}
735
736/* ioctx_alloc
737 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
738 */
739static struct kioctx *ioctx_alloc(unsigned nr_events)
740{
741 struct mm_struct *mm = current->mm;
742 struct kioctx *ctx;
743 int err = -ENOMEM;
744
745 /*
746 * Store the original nr_events -- what userspace passed to io_setup(),
747 * for counting against the global limit -- before it changes.
748 */
749 unsigned int max_reqs = nr_events;
750
751 /*
752 * We keep track of the number of available ringbuffer slots, to prevent
753 * overflow (reqs_available), and we also use percpu counters for this.
754 *
755 * So since up to half the slots might be on other cpu's percpu counters
756 * and unavailable, double nr_events so userspace sees what they
757 * expected: additionally, we move req_batch slots to/from percpu
758 * counters at a time, so make sure that isn't 0:
759 */
760 nr_events = max(nr_events, num_possible_cpus() * 4);
761 nr_events *= 2;
762
763 /* Prevent overflows */
764 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
765 pr_debug("ENOMEM: nr_events too high\n");
766 return ERR_PTR(-EINVAL);
767 }
768
769 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
770 return ERR_PTR(-EAGAIN);
771
772 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
773 if (!ctx)
774 return ERR_PTR(-ENOMEM);
775
776 ctx->max_reqs = max_reqs;
777
778 spin_lock_init(&ctx->ctx_lock);
779 spin_lock_init(&ctx->completion_lock);
780 mutex_init(&ctx->ring_lock);
781 /* Protect against page migration throughout kiotx setup by keeping
782 * the ring_lock mutex held until setup is complete. */
783 mutex_lock(&ctx->ring_lock);
784 init_waitqueue_head(&ctx->wait);
785
786 INIT_LIST_HEAD(&ctx->active_reqs);
787
788 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
789 goto err;
790
791 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
792 goto err;
793
794 ctx->cpu = alloc_percpu(struct kioctx_cpu);
795 if (!ctx->cpu)
796 goto err;
797
798 err = aio_setup_ring(ctx, nr_events);
799 if (err < 0)
800 goto err;
801
802 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
803 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
804 if (ctx->req_batch < 1)
805 ctx->req_batch = 1;
806
807 /* limit the number of system wide aios */
808 spin_lock(&aio_nr_lock);
809 if (aio_nr + ctx->max_reqs > aio_max_nr ||
810 aio_nr + ctx->max_reqs < aio_nr) {
811 spin_unlock(&aio_nr_lock);
812 err = -EAGAIN;
813 goto err_ctx;
814 }
815 aio_nr += ctx->max_reqs;
816 spin_unlock(&aio_nr_lock);
817
818 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
819 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
820
821 err = ioctx_add_table(ctx, mm);
822 if (err)
823 goto err_cleanup;
824
825 /* Release the ring_lock mutex now that all setup is complete. */
826 mutex_unlock(&ctx->ring_lock);
827
828 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
829 ctx, ctx->user_id, mm, ctx->nr_events);
830 return ctx;
831
832err_cleanup:
833 aio_nr_sub(ctx->max_reqs);
834err_ctx:
835 atomic_set(&ctx->dead, 1);
836 if (ctx->mmap_size)
837 vm_munmap(ctx->mmap_base, ctx->mmap_size);
838 aio_free_ring(ctx);
839err:
840 mutex_unlock(&ctx->ring_lock);
841 free_percpu(ctx->cpu);
842 percpu_ref_exit(&ctx->reqs);
843 percpu_ref_exit(&ctx->users);
844 kmem_cache_free(kioctx_cachep, ctx);
845 pr_debug("error allocating ioctx %d\n", err);
846 return ERR_PTR(err);
847}
848
849/* kill_ioctx
850 * Cancels all outstanding aio requests on an aio context. Used
851 * when the processes owning a context have all exited to encourage
852 * the rapid destruction of the kioctx.
853 */
854static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
855 struct ctx_rq_wait *wait)
856{
857 struct kioctx_table *table;
858
859 spin_lock(&mm->ioctx_lock);
860 if (atomic_xchg(&ctx->dead, 1)) {
861 spin_unlock(&mm->ioctx_lock);
862 return -EINVAL;
863 }
864
865 table = rcu_dereference_raw(mm->ioctx_table);
866 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
867 RCU_INIT_POINTER(table->table[ctx->id], NULL);
868 spin_unlock(&mm->ioctx_lock);
869
870 /* free_ioctx_reqs() will do the necessary RCU synchronization */
871 wake_up_all(&ctx->wait);
872
873 /*
874 * It'd be more correct to do this in free_ioctx(), after all
875 * the outstanding kiocbs have finished - but by then io_destroy
876 * has already returned, so io_setup() could potentially return
877 * -EAGAIN with no ioctxs actually in use (as far as userspace
878 * could tell).
879 */
880 aio_nr_sub(ctx->max_reqs);
881
882 if (ctx->mmap_size)
883 vm_munmap(ctx->mmap_base, ctx->mmap_size);
884
885 ctx->rq_wait = wait;
886 percpu_ref_kill(&ctx->users);
887 return 0;
888}
889
890/*
891 * exit_aio: called when the last user of mm goes away. At this point, there is
892 * no way for any new requests to be submited or any of the io_* syscalls to be
893 * called on the context.
894 *
895 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
896 * them.
897 */
898void exit_aio(struct mm_struct *mm)
899{
900 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
901 struct ctx_rq_wait wait;
902 int i, skipped;
903
904 if (!table)
905 return;
906
907 atomic_set(&wait.count, table->nr);
908 init_completion(&wait.comp);
909
910 skipped = 0;
911 for (i = 0; i < table->nr; ++i) {
912 struct kioctx *ctx =
913 rcu_dereference_protected(table->table[i], true);
914
915 if (!ctx) {
916 skipped++;
917 continue;
918 }
919
920 /*
921 * We don't need to bother with munmap() here - exit_mmap(mm)
922 * is coming and it'll unmap everything. And we simply can't,
923 * this is not necessarily our ->mm.
924 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
925 * that it needs to unmap the area, just set it to 0.
926 */
927 ctx->mmap_size = 0;
928 kill_ioctx(mm, ctx, &wait);
929 }
930
931 if (!atomic_sub_and_test(skipped, &wait.count)) {
932 /* Wait until all IO for the context are done. */
933 wait_for_completion(&wait.comp);
934 }
935
936 RCU_INIT_POINTER(mm->ioctx_table, NULL);
937 kfree(table);
938}
939
940static void put_reqs_available(struct kioctx *ctx, unsigned nr)
941{
942 struct kioctx_cpu *kcpu;
943 unsigned long flags;
944
945 local_irq_save(flags);
946 kcpu = this_cpu_ptr(ctx->cpu);
947 kcpu->reqs_available += nr;
948
949 while (kcpu->reqs_available >= ctx->req_batch * 2) {
950 kcpu->reqs_available -= ctx->req_batch;
951 atomic_add(ctx->req_batch, &ctx->reqs_available);
952 }
953
954 local_irq_restore(flags);
955}
956
957static bool __get_reqs_available(struct kioctx *ctx)
958{
959 struct kioctx_cpu *kcpu;
960 bool ret = false;
961 unsigned long flags;
962
963 local_irq_save(flags);
964 kcpu = this_cpu_ptr(ctx->cpu);
965 if (!kcpu->reqs_available) {
966 int avail = atomic_read(&ctx->reqs_available);
967
968 do {
969 if (avail < ctx->req_batch)
970 goto out;
971 } while (!atomic_try_cmpxchg(&ctx->reqs_available,
972 &avail, avail - ctx->req_batch));
973
974 kcpu->reqs_available += ctx->req_batch;
975 }
976
977 ret = true;
978 kcpu->reqs_available--;
979out:
980 local_irq_restore(flags);
981 return ret;
982}
983
984/* refill_reqs_available
985 * Updates the reqs_available reference counts used for tracking the
986 * number of free slots in the completion ring. This can be called
987 * from aio_complete() (to optimistically update reqs_available) or
988 * from aio_get_req() (the we're out of events case). It must be
989 * called holding ctx->completion_lock.
990 */
991static void refill_reqs_available(struct kioctx *ctx, unsigned head,
992 unsigned tail)
993{
994 unsigned events_in_ring, completed;
995
996 /* Clamp head since userland can write to it. */
997 head %= ctx->nr_events;
998 if (head <= tail)
999 events_in_ring = tail - head;
1000 else
1001 events_in_ring = ctx->nr_events - (head - tail);
1002
1003 completed = ctx->completed_events;
1004 if (events_in_ring < completed)
1005 completed -= events_in_ring;
1006 else
1007 completed = 0;
1008
1009 if (!completed)
1010 return;
1011
1012 ctx->completed_events -= completed;
1013 put_reqs_available(ctx, completed);
1014}
1015
1016/* user_refill_reqs_available
1017 * Called to refill reqs_available when aio_get_req() encounters an
1018 * out of space in the completion ring.
1019 */
1020static void user_refill_reqs_available(struct kioctx *ctx)
1021{
1022 spin_lock_irq(&ctx->completion_lock);
1023 if (ctx->completed_events) {
1024 struct aio_ring *ring;
1025 unsigned head;
1026
1027 /* Access of ring->head may race with aio_read_events_ring()
1028 * here, but that's okay since whether we read the old version
1029 * or the new version, and either will be valid. The important
1030 * part is that head cannot pass tail since we prevent
1031 * aio_complete() from updating tail by holding
1032 * ctx->completion_lock. Even if head is invalid, the check
1033 * against ctx->completed_events below will make sure we do the
1034 * safe/right thing.
1035 */
1036 ring = page_address(ctx->ring_pages[0]);
1037 head = ring->head;
1038
1039 refill_reqs_available(ctx, head, ctx->tail);
1040 }
1041
1042 spin_unlock_irq(&ctx->completion_lock);
1043}
1044
1045static bool get_reqs_available(struct kioctx *ctx)
1046{
1047 if (__get_reqs_available(ctx))
1048 return true;
1049 user_refill_reqs_available(ctx);
1050 return __get_reqs_available(ctx);
1051}
1052
1053/* aio_get_req
1054 * Allocate a slot for an aio request.
1055 * Returns NULL if no requests are free.
1056 *
1057 * The refcount is initialized to 2 - one for the async op completion,
1058 * one for the synchronous code that does this.
1059 */
1060static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1061{
1062 struct aio_kiocb *req;
1063
1064 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1065 if (unlikely(!req))
1066 return NULL;
1067
1068 if (unlikely(!get_reqs_available(ctx))) {
1069 kmem_cache_free(kiocb_cachep, req);
1070 return NULL;
1071 }
1072
1073 percpu_ref_get(&ctx->reqs);
1074 req->ki_ctx = ctx;
1075 INIT_LIST_HEAD(&req->ki_list);
1076 refcount_set(&req->ki_refcnt, 2);
1077 req->ki_eventfd = NULL;
1078 return req;
1079}
1080
1081static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1082{
1083 struct aio_ring __user *ring = (void __user *)ctx_id;
1084 struct mm_struct *mm = current->mm;
1085 struct kioctx *ctx, *ret = NULL;
1086 struct kioctx_table *table;
1087 unsigned id;
1088
1089 if (get_user(id, &ring->id))
1090 return NULL;
1091
1092 rcu_read_lock();
1093 table = rcu_dereference(mm->ioctx_table);
1094
1095 if (!table || id >= table->nr)
1096 goto out;
1097
1098 id = array_index_nospec(id, table->nr);
1099 ctx = rcu_dereference(table->table[id]);
1100 if (ctx && ctx->user_id == ctx_id) {
1101 if (percpu_ref_tryget_live(&ctx->users))
1102 ret = ctx;
1103 }
1104out:
1105 rcu_read_unlock();
1106 return ret;
1107}
1108
1109static inline void iocb_destroy(struct aio_kiocb *iocb)
1110{
1111 if (iocb->ki_eventfd)
1112 eventfd_ctx_put(iocb->ki_eventfd);
1113 if (iocb->ki_filp)
1114 fput(iocb->ki_filp);
1115 percpu_ref_put(&iocb->ki_ctx->reqs);
1116 kmem_cache_free(kiocb_cachep, iocb);
1117}
1118
1119struct aio_waiter {
1120 struct wait_queue_entry w;
1121 size_t min_nr;
1122};
1123
1124/* aio_complete
1125 * Called when the io request on the given iocb is complete.
1126 */
1127static void aio_complete(struct aio_kiocb *iocb)
1128{
1129 struct kioctx *ctx = iocb->ki_ctx;
1130 struct aio_ring *ring;
1131 struct io_event *ev_page, *event;
1132 unsigned tail, pos, head, avail;
1133 unsigned long flags;
1134
1135 /*
1136 * Add a completion event to the ring buffer. Must be done holding
1137 * ctx->completion_lock to prevent other code from messing with the tail
1138 * pointer since we might be called from irq context.
1139 */
1140 spin_lock_irqsave(&ctx->completion_lock, flags);
1141
1142 tail = ctx->tail;
1143 pos = tail + AIO_EVENTS_OFFSET;
1144
1145 if (++tail >= ctx->nr_events)
1146 tail = 0;
1147
1148 ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1149 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1150
1151 *event = iocb->ki_res;
1152
1153 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1154
1155 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1156 (void __user *)(unsigned long)iocb->ki_res.obj,
1157 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1158
1159 /* after flagging the request as done, we
1160 * must never even look at it again
1161 */
1162 smp_wmb(); /* make event visible before updating tail */
1163
1164 ctx->tail = tail;
1165
1166 ring = page_address(ctx->ring_pages[0]);
1167 head = ring->head;
1168 ring->tail = tail;
1169 flush_dcache_page(ctx->ring_pages[0]);
1170
1171 ctx->completed_events++;
1172 if (ctx->completed_events > 1)
1173 refill_reqs_available(ctx, head, tail);
1174
1175 avail = tail > head
1176 ? tail - head
1177 : tail + ctx->nr_events - head;
1178 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1179
1180 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1181
1182 /*
1183 * Check if the user asked us to deliver the result through an
1184 * eventfd. The eventfd_signal() function is safe to be called
1185 * from IRQ context.
1186 */
1187 if (iocb->ki_eventfd)
1188 eventfd_signal(iocb->ki_eventfd);
1189
1190 /*
1191 * We have to order our ring_info tail store above and test
1192 * of the wait list below outside the wait lock. This is
1193 * like in wake_up_bit() where clearing a bit has to be
1194 * ordered with the unlocked test.
1195 */
1196 smp_mb();
1197
1198 if (waitqueue_active(&ctx->wait)) {
1199 struct aio_waiter *curr, *next;
1200 unsigned long flags;
1201
1202 spin_lock_irqsave(&ctx->wait.lock, flags);
1203 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
1204 if (avail >= curr->min_nr) {
1205 list_del_init_careful(&curr->w.entry);
1206 wake_up_process(curr->w.private);
1207 }
1208 spin_unlock_irqrestore(&ctx->wait.lock, flags);
1209 }
1210}
1211
1212static inline void iocb_put(struct aio_kiocb *iocb)
1213{
1214 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1215 aio_complete(iocb);
1216 iocb_destroy(iocb);
1217 }
1218}
1219
1220/* aio_read_events_ring
1221 * Pull an event off of the ioctx's event ring. Returns the number of
1222 * events fetched
1223 */
1224static long aio_read_events_ring(struct kioctx *ctx,
1225 struct io_event __user *event, long nr)
1226{
1227 struct aio_ring *ring;
1228 unsigned head, tail, pos;
1229 long ret = 0;
1230 int copy_ret;
1231
1232 /*
1233 * The mutex can block and wake us up and that will cause
1234 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1235 * and repeat. This should be rare enough that it doesn't cause
1236 * peformance issues. See the comment in read_events() for more detail.
1237 */
1238 sched_annotate_sleep();
1239 mutex_lock(&ctx->ring_lock);
1240
1241 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1242 ring = page_address(ctx->ring_pages[0]);
1243 head = ring->head;
1244 tail = ring->tail;
1245
1246 /*
1247 * Ensure that once we've read the current tail pointer, that
1248 * we also see the events that were stored up to the tail.
1249 */
1250 smp_rmb();
1251
1252 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1253
1254 if (head == tail)
1255 goto out;
1256
1257 head %= ctx->nr_events;
1258 tail %= ctx->nr_events;
1259
1260 while (ret < nr) {
1261 long avail;
1262 struct io_event *ev;
1263 struct page *page;
1264
1265 avail = (head <= tail ? tail : ctx->nr_events) - head;
1266 if (head == tail)
1267 break;
1268
1269 pos = head + AIO_EVENTS_OFFSET;
1270 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1271 pos %= AIO_EVENTS_PER_PAGE;
1272
1273 avail = min(avail, nr - ret);
1274 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1275
1276 ev = page_address(page);
1277 copy_ret = copy_to_user(event + ret, ev + pos,
1278 sizeof(*ev) * avail);
1279
1280 if (unlikely(copy_ret)) {
1281 ret = -EFAULT;
1282 goto out;
1283 }
1284
1285 ret += avail;
1286 head += avail;
1287 head %= ctx->nr_events;
1288 }
1289
1290 ring = page_address(ctx->ring_pages[0]);
1291 ring->head = head;
1292 flush_dcache_page(ctx->ring_pages[0]);
1293
1294 pr_debug("%li h%u t%u\n", ret, head, tail);
1295out:
1296 mutex_unlock(&ctx->ring_lock);
1297
1298 return ret;
1299}
1300
1301static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1302 struct io_event __user *event, long *i)
1303{
1304 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1305
1306 if (ret > 0)
1307 *i += ret;
1308
1309 if (unlikely(atomic_read(&ctx->dead)))
1310 ret = -EINVAL;
1311
1312 if (!*i)
1313 *i = ret;
1314
1315 return ret < 0 || *i >= min_nr;
1316}
1317
1318static long read_events(struct kioctx *ctx, long min_nr, long nr,
1319 struct io_event __user *event,
1320 ktime_t until)
1321{
1322 struct hrtimer_sleeper t;
1323 struct aio_waiter w;
1324 long ret = 0, ret2 = 0;
1325
1326 /*
1327 * Note that aio_read_events() is being called as the conditional - i.e.
1328 * we're calling it after prepare_to_wait() has set task state to
1329 * TASK_INTERRUPTIBLE.
1330 *
1331 * But aio_read_events() can block, and if it blocks it's going to flip
1332 * the task state back to TASK_RUNNING.
1333 *
1334 * This should be ok, provided it doesn't flip the state back to
1335 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1336 * will only happen if the mutex_lock() call blocks, and we then find
1337 * the ringbuffer empty. So in practice we should be ok, but it's
1338 * something to be aware of when touching this code.
1339 */
1340 aio_read_events(ctx, min_nr, nr, event, &ret);
1341 if (until == 0 || ret < 0 || ret >= min_nr)
1342 return ret;
1343
1344 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1345 if (until != KTIME_MAX) {
1346 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
1347 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
1348 }
1349
1350 init_wait(&w.w);
1351
1352 while (1) {
1353 unsigned long nr_got = ret;
1354
1355 w.min_nr = min_nr - ret;
1356
1357 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
1358 if (!ret2 && !t.task)
1359 ret2 = -ETIME;
1360
1361 if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
1362 break;
1363
1364 if (nr_got == ret)
1365 schedule();
1366 }
1367
1368 finish_wait(&ctx->wait, &w.w);
1369 hrtimer_cancel(&t.timer);
1370 destroy_hrtimer_on_stack(&t.timer);
1371
1372 return ret;
1373}
1374
1375/* sys_io_setup:
1376 * Create an aio_context capable of receiving at least nr_events.
1377 * ctxp must not point to an aio_context that already exists, and
1378 * must be initialized to 0 prior to the call. On successful
1379 * creation of the aio_context, *ctxp is filled in with the resulting
1380 * handle. May fail with -EINVAL if *ctxp is not initialized,
1381 * if the specified nr_events exceeds internal limits. May fail
1382 * with -EAGAIN if the specified nr_events exceeds the user's limit
1383 * of available events. May fail with -ENOMEM if insufficient kernel
1384 * resources are available. May fail with -EFAULT if an invalid
1385 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1386 * implemented.
1387 */
1388SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1389{
1390 struct kioctx *ioctx = NULL;
1391 unsigned long ctx;
1392 long ret;
1393
1394 ret = get_user(ctx, ctxp);
1395 if (unlikely(ret))
1396 goto out;
1397
1398 ret = -EINVAL;
1399 if (unlikely(ctx || nr_events == 0)) {
1400 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1401 ctx, nr_events);
1402 goto out;
1403 }
1404
1405 ioctx = ioctx_alloc(nr_events);
1406 ret = PTR_ERR(ioctx);
1407 if (!IS_ERR(ioctx)) {
1408 ret = put_user(ioctx->user_id, ctxp);
1409 if (ret)
1410 kill_ioctx(current->mm, ioctx, NULL);
1411 percpu_ref_put(&ioctx->users);
1412 }
1413
1414out:
1415 return ret;
1416}
1417
1418#ifdef CONFIG_COMPAT
1419COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1420{
1421 struct kioctx *ioctx = NULL;
1422 unsigned long ctx;
1423 long ret;
1424
1425 ret = get_user(ctx, ctx32p);
1426 if (unlikely(ret))
1427 goto out;
1428
1429 ret = -EINVAL;
1430 if (unlikely(ctx || nr_events == 0)) {
1431 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1432 ctx, nr_events);
1433 goto out;
1434 }
1435
1436 ioctx = ioctx_alloc(nr_events);
1437 ret = PTR_ERR(ioctx);
1438 if (!IS_ERR(ioctx)) {
1439 /* truncating is ok because it's a user address */
1440 ret = put_user((u32)ioctx->user_id, ctx32p);
1441 if (ret)
1442 kill_ioctx(current->mm, ioctx, NULL);
1443 percpu_ref_put(&ioctx->users);
1444 }
1445
1446out:
1447 return ret;
1448}
1449#endif
1450
1451/* sys_io_destroy:
1452 * Destroy the aio_context specified. May cancel any outstanding
1453 * AIOs and block on completion. Will fail with -ENOSYS if not
1454 * implemented. May fail with -EINVAL if the context pointed to
1455 * is invalid.
1456 */
1457SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1458{
1459 struct kioctx *ioctx = lookup_ioctx(ctx);
1460 if (likely(NULL != ioctx)) {
1461 struct ctx_rq_wait wait;
1462 int ret;
1463
1464 init_completion(&wait.comp);
1465 atomic_set(&wait.count, 1);
1466
1467 /* Pass requests_done to kill_ioctx() where it can be set
1468 * in a thread-safe way. If we try to set it here then we have
1469 * a race condition if two io_destroy() called simultaneously.
1470 */
1471 ret = kill_ioctx(current->mm, ioctx, &wait);
1472 percpu_ref_put(&ioctx->users);
1473
1474 /* Wait until all IO for the context are done. Otherwise kernel
1475 * keep using user-space buffers even if user thinks the context
1476 * is destroyed.
1477 */
1478 if (!ret)
1479 wait_for_completion(&wait.comp);
1480
1481 return ret;
1482 }
1483 pr_debug("EINVAL: invalid context id\n");
1484 return -EINVAL;
1485}
1486
1487static void aio_remove_iocb(struct aio_kiocb *iocb)
1488{
1489 struct kioctx *ctx = iocb->ki_ctx;
1490 unsigned long flags;
1491
1492 spin_lock_irqsave(&ctx->ctx_lock, flags);
1493 list_del(&iocb->ki_list);
1494 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1495}
1496
1497static void aio_complete_rw(struct kiocb *kiocb, long res)
1498{
1499 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1500
1501 if (!list_empty_careful(&iocb->ki_list))
1502 aio_remove_iocb(iocb);
1503
1504 if (kiocb->ki_flags & IOCB_WRITE) {
1505 struct inode *inode = file_inode(kiocb->ki_filp);
1506
1507 if (S_ISREG(inode->i_mode))
1508 kiocb_end_write(kiocb);
1509 }
1510
1511 iocb->ki_res.res = res;
1512 iocb->ki_res.res2 = 0;
1513 iocb_put(iocb);
1514}
1515
1516static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1517{
1518 int ret;
1519
1520 req->ki_complete = aio_complete_rw;
1521 req->private = NULL;
1522 req->ki_pos = iocb->aio_offset;
1523 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
1524 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1525 req->ki_flags |= IOCB_EVENTFD;
1526 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1527 /*
1528 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1529 * aio_reqprio is interpreted as an I/O scheduling
1530 * class and priority.
1531 */
1532 ret = ioprio_check_cap(iocb->aio_reqprio);
1533 if (ret) {
1534 pr_debug("aio ioprio check cap error: %d\n", ret);
1535 return ret;
1536 }
1537
1538 req->ki_ioprio = iocb->aio_reqprio;
1539 } else
1540 req->ki_ioprio = get_current_ioprio();
1541
1542 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1543 if (unlikely(ret))
1544 return ret;
1545
1546 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1547 return 0;
1548}
1549
1550static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1551 struct iovec **iovec, bool vectored, bool compat,
1552 struct iov_iter *iter)
1553{
1554 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1555 size_t len = iocb->aio_nbytes;
1556
1557 if (!vectored) {
1558 ssize_t ret = import_ubuf(rw, buf, len, iter);
1559 *iovec = NULL;
1560 return ret;
1561 }
1562
1563 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1564}
1565
1566static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1567{
1568 switch (ret) {
1569 case -EIOCBQUEUED:
1570 break;
1571 case -ERESTARTSYS:
1572 case -ERESTARTNOINTR:
1573 case -ERESTARTNOHAND:
1574 case -ERESTART_RESTARTBLOCK:
1575 /*
1576 * There's no easy way to restart the syscall since other AIO's
1577 * may be already running. Just fail this IO with EINTR.
1578 */
1579 ret = -EINTR;
1580 fallthrough;
1581 default:
1582 req->ki_complete(req, ret);
1583 }
1584}
1585
1586static int aio_read(struct kiocb *req, const struct iocb *iocb,
1587 bool vectored, bool compat)
1588{
1589 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1590 struct iov_iter iter;
1591 struct file *file;
1592 int ret;
1593
1594 ret = aio_prep_rw(req, iocb);
1595 if (ret)
1596 return ret;
1597 file = req->ki_filp;
1598 if (unlikely(!(file->f_mode & FMODE_READ)))
1599 return -EBADF;
1600 if (unlikely(!file->f_op->read_iter))
1601 return -EINVAL;
1602
1603 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1604 if (ret < 0)
1605 return ret;
1606 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1607 if (!ret)
1608 aio_rw_done(req, call_read_iter(file, req, &iter));
1609 kfree(iovec);
1610 return ret;
1611}
1612
1613static int aio_write(struct kiocb *req, const struct iocb *iocb,
1614 bool vectored, bool compat)
1615{
1616 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1617 struct iov_iter iter;
1618 struct file *file;
1619 int ret;
1620
1621 ret = aio_prep_rw(req, iocb);
1622 if (ret)
1623 return ret;
1624 file = req->ki_filp;
1625
1626 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1627 return -EBADF;
1628 if (unlikely(!file->f_op->write_iter))
1629 return -EINVAL;
1630
1631 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1632 if (ret < 0)
1633 return ret;
1634 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1635 if (!ret) {
1636 if (S_ISREG(file_inode(file)->i_mode))
1637 kiocb_start_write(req);
1638 req->ki_flags |= IOCB_WRITE;
1639 aio_rw_done(req, call_write_iter(file, req, &iter));
1640 }
1641 kfree(iovec);
1642 return ret;
1643}
1644
1645static void aio_fsync_work(struct work_struct *work)
1646{
1647 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1648 const struct cred *old_cred = override_creds(iocb->fsync.creds);
1649
1650 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1651 revert_creds(old_cred);
1652 put_cred(iocb->fsync.creds);
1653 iocb_put(iocb);
1654}
1655
1656static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1657 bool datasync)
1658{
1659 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1660 iocb->aio_rw_flags))
1661 return -EINVAL;
1662
1663 if (unlikely(!req->file->f_op->fsync))
1664 return -EINVAL;
1665
1666 req->creds = prepare_creds();
1667 if (!req->creds)
1668 return -ENOMEM;
1669
1670 req->datasync = datasync;
1671 INIT_WORK(&req->work, aio_fsync_work);
1672 schedule_work(&req->work);
1673 return 0;
1674}
1675
1676static void aio_poll_put_work(struct work_struct *work)
1677{
1678 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1679 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1680
1681 iocb_put(iocb);
1682}
1683
1684/*
1685 * Safely lock the waitqueue which the request is on, synchronizing with the
1686 * case where the ->poll() provider decides to free its waitqueue early.
1687 *
1688 * Returns true on success, meaning that req->head->lock was locked, req->wait
1689 * is on req->head, and an RCU read lock was taken. Returns false if the
1690 * request was already removed from its waitqueue (which might no longer exist).
1691 */
1692static bool poll_iocb_lock_wq(struct poll_iocb *req)
1693{
1694 wait_queue_head_t *head;
1695
1696 /*
1697 * While we hold the waitqueue lock and the waitqueue is nonempty,
1698 * wake_up_pollfree() will wait for us. However, taking the waitqueue
1699 * lock in the first place can race with the waitqueue being freed.
1700 *
1701 * We solve this as eventpoll does: by taking advantage of the fact that
1702 * all users of wake_up_pollfree() will RCU-delay the actual free. If
1703 * we enter rcu_read_lock() and see that the pointer to the queue is
1704 * non-NULL, we can then lock it without the memory being freed out from
1705 * under us, then check whether the request is still on the queue.
1706 *
1707 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1708 * case the caller deletes the entry from the queue, leaving it empty.
1709 * In that case, only RCU prevents the queue memory from being freed.
1710 */
1711 rcu_read_lock();
1712 head = smp_load_acquire(&req->head);
1713 if (head) {
1714 spin_lock(&head->lock);
1715 if (!list_empty(&req->wait.entry))
1716 return true;
1717 spin_unlock(&head->lock);
1718 }
1719 rcu_read_unlock();
1720 return false;
1721}
1722
1723static void poll_iocb_unlock_wq(struct poll_iocb *req)
1724{
1725 spin_unlock(&req->head->lock);
1726 rcu_read_unlock();
1727}
1728
1729static void aio_poll_complete_work(struct work_struct *work)
1730{
1731 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1732 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1733 struct poll_table_struct pt = { ._key = req->events };
1734 struct kioctx *ctx = iocb->ki_ctx;
1735 __poll_t mask = 0;
1736
1737 if (!READ_ONCE(req->cancelled))
1738 mask = vfs_poll(req->file, &pt) & req->events;
1739
1740 /*
1741 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1742 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1743 * synchronize with them. In the cancellation case the list_del_init
1744 * itself is not actually needed, but harmless so we keep it in to
1745 * avoid further branches in the fast path.
1746 */
1747 spin_lock_irq(&ctx->ctx_lock);
1748 if (poll_iocb_lock_wq(req)) {
1749 if (!mask && !READ_ONCE(req->cancelled)) {
1750 /*
1751 * The request isn't actually ready to be completed yet.
1752 * Reschedule completion if another wakeup came in.
1753 */
1754 if (req->work_need_resched) {
1755 schedule_work(&req->work);
1756 req->work_need_resched = false;
1757 } else {
1758 req->work_scheduled = false;
1759 }
1760 poll_iocb_unlock_wq(req);
1761 spin_unlock_irq(&ctx->ctx_lock);
1762 return;
1763 }
1764 list_del_init(&req->wait.entry);
1765 poll_iocb_unlock_wq(req);
1766 } /* else, POLLFREE has freed the waitqueue, so we must complete */
1767 list_del_init(&iocb->ki_list);
1768 iocb->ki_res.res = mangle_poll(mask);
1769 spin_unlock_irq(&ctx->ctx_lock);
1770
1771 iocb_put(iocb);
1772}
1773
1774/* assumes we are called with irqs disabled */
1775static int aio_poll_cancel(struct kiocb *iocb)
1776{
1777 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1778 struct poll_iocb *req = &aiocb->poll;
1779
1780 if (poll_iocb_lock_wq(req)) {
1781 WRITE_ONCE(req->cancelled, true);
1782 if (!req->work_scheduled) {
1783 schedule_work(&aiocb->poll.work);
1784 req->work_scheduled = true;
1785 }
1786 poll_iocb_unlock_wq(req);
1787 } /* else, the request was force-cancelled by POLLFREE already */
1788
1789 return 0;
1790}
1791
1792static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1793 void *key)
1794{
1795 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1796 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1797 __poll_t mask = key_to_poll(key);
1798 unsigned long flags;
1799
1800 /* for instances that support it check for an event match first: */
1801 if (mask && !(mask & req->events))
1802 return 0;
1803
1804 /*
1805 * Complete the request inline if possible. This requires that three
1806 * conditions be met:
1807 * 1. An event mask must have been passed. If a plain wakeup was done
1808 * instead, then mask == 0 and we have to call vfs_poll() to get
1809 * the events, so inline completion isn't possible.
1810 * 2. The completion work must not have already been scheduled.
1811 * 3. ctx_lock must not be busy. We have to use trylock because we
1812 * already hold the waitqueue lock, so this inverts the normal
1813 * locking order. Use irqsave/irqrestore because not all
1814 * filesystems (e.g. fuse) call this function with IRQs disabled,
1815 * yet IRQs have to be disabled before ctx_lock is obtained.
1816 */
1817 if (mask && !req->work_scheduled &&
1818 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1819 struct kioctx *ctx = iocb->ki_ctx;
1820
1821 list_del_init(&req->wait.entry);
1822 list_del(&iocb->ki_list);
1823 iocb->ki_res.res = mangle_poll(mask);
1824 if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1825 iocb = NULL;
1826 INIT_WORK(&req->work, aio_poll_put_work);
1827 schedule_work(&req->work);
1828 }
1829 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1830 if (iocb)
1831 iocb_put(iocb);
1832 } else {
1833 /*
1834 * Schedule the completion work if needed. If it was already
1835 * scheduled, record that another wakeup came in.
1836 *
1837 * Don't remove the request from the waitqueue here, as it might
1838 * not actually be complete yet (we won't know until vfs_poll()
1839 * is called), and we must not miss any wakeups. POLLFREE is an
1840 * exception to this; see below.
1841 */
1842 if (req->work_scheduled) {
1843 req->work_need_resched = true;
1844 } else {
1845 schedule_work(&req->work);
1846 req->work_scheduled = true;
1847 }
1848
1849 /*
1850 * If the waitqueue is being freed early but we can't complete
1851 * the request inline, we have to tear down the request as best
1852 * we can. That means immediately removing the request from its
1853 * waitqueue and preventing all further accesses to the
1854 * waitqueue via the request. We also need to schedule the
1855 * completion work (done above). Also mark the request as
1856 * cancelled, to potentially skip an unneeded call to ->poll().
1857 */
1858 if (mask & POLLFREE) {
1859 WRITE_ONCE(req->cancelled, true);
1860 list_del_init(&req->wait.entry);
1861
1862 /*
1863 * Careful: this *must* be the last step, since as soon
1864 * as req->head is NULL'ed out, the request can be
1865 * completed and freed, since aio_poll_complete_work()
1866 * will no longer need to take the waitqueue lock.
1867 */
1868 smp_store_release(&req->head, NULL);
1869 }
1870 }
1871 return 1;
1872}
1873
1874struct aio_poll_table {
1875 struct poll_table_struct pt;
1876 struct aio_kiocb *iocb;
1877 bool queued;
1878 int error;
1879};
1880
1881static void
1882aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1883 struct poll_table_struct *p)
1884{
1885 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1886
1887 /* multiple wait queues per file are not supported */
1888 if (unlikely(pt->queued)) {
1889 pt->error = -EINVAL;
1890 return;
1891 }
1892
1893 pt->queued = true;
1894 pt->error = 0;
1895 pt->iocb->poll.head = head;
1896 add_wait_queue(head, &pt->iocb->poll.wait);
1897}
1898
1899static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1900{
1901 struct kioctx *ctx = aiocb->ki_ctx;
1902 struct poll_iocb *req = &aiocb->poll;
1903 struct aio_poll_table apt;
1904 bool cancel = false;
1905 __poll_t mask;
1906
1907 /* reject any unknown events outside the normal event mask. */
1908 if ((u16)iocb->aio_buf != iocb->aio_buf)
1909 return -EINVAL;
1910 /* reject fields that are not defined for poll */
1911 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1912 return -EINVAL;
1913
1914 INIT_WORK(&req->work, aio_poll_complete_work);
1915 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1916
1917 req->head = NULL;
1918 req->cancelled = false;
1919 req->work_scheduled = false;
1920 req->work_need_resched = false;
1921
1922 apt.pt._qproc = aio_poll_queue_proc;
1923 apt.pt._key = req->events;
1924 apt.iocb = aiocb;
1925 apt.queued = false;
1926 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1927
1928 /* initialized the list so that we can do list_empty checks */
1929 INIT_LIST_HEAD(&req->wait.entry);
1930 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1931
1932 mask = vfs_poll(req->file, &apt.pt) & req->events;
1933 spin_lock_irq(&ctx->ctx_lock);
1934 if (likely(apt.queued)) {
1935 bool on_queue = poll_iocb_lock_wq(req);
1936
1937 if (!on_queue || req->work_scheduled) {
1938 /*
1939 * aio_poll_wake() already either scheduled the async
1940 * completion work, or completed the request inline.
1941 */
1942 if (apt.error) /* unsupported case: multiple queues */
1943 cancel = true;
1944 apt.error = 0;
1945 mask = 0;
1946 }
1947 if (mask || apt.error) {
1948 /* Steal to complete synchronously. */
1949 list_del_init(&req->wait.entry);
1950 } else if (cancel) {
1951 /* Cancel if possible (may be too late though). */
1952 WRITE_ONCE(req->cancelled, true);
1953 } else if (on_queue) {
1954 /*
1955 * Actually waiting for an event, so add the request to
1956 * active_reqs so that it can be cancelled if needed.
1957 */
1958 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1959 aiocb->ki_cancel = aio_poll_cancel;
1960 }
1961 if (on_queue)
1962 poll_iocb_unlock_wq(req);
1963 }
1964 if (mask) { /* no async, we'd stolen it */
1965 aiocb->ki_res.res = mangle_poll(mask);
1966 apt.error = 0;
1967 }
1968 spin_unlock_irq(&ctx->ctx_lock);
1969 if (mask)
1970 iocb_put(aiocb);
1971 return apt.error;
1972}
1973
1974static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1975 struct iocb __user *user_iocb, struct aio_kiocb *req,
1976 bool compat)
1977{
1978 req->ki_filp = fget(iocb->aio_fildes);
1979 if (unlikely(!req->ki_filp))
1980 return -EBADF;
1981
1982 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1983 struct eventfd_ctx *eventfd;
1984 /*
1985 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1986 * instance of the file* now. The file descriptor must be
1987 * an eventfd() fd, and will be signaled for each completed
1988 * event using the eventfd_signal() function.
1989 */
1990 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1991 if (IS_ERR(eventfd))
1992 return PTR_ERR(eventfd);
1993
1994 req->ki_eventfd = eventfd;
1995 }
1996
1997 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1998 pr_debug("EFAULT: aio_key\n");
1999 return -EFAULT;
2000 }
2001
2002 req->ki_res.obj = (u64)(unsigned long)user_iocb;
2003 req->ki_res.data = iocb->aio_data;
2004 req->ki_res.res = 0;
2005 req->ki_res.res2 = 0;
2006
2007 switch (iocb->aio_lio_opcode) {
2008 case IOCB_CMD_PREAD:
2009 return aio_read(&req->rw, iocb, false, compat);
2010 case IOCB_CMD_PWRITE:
2011 return aio_write(&req->rw, iocb, false, compat);
2012 case IOCB_CMD_PREADV:
2013 return aio_read(&req->rw, iocb, true, compat);
2014 case IOCB_CMD_PWRITEV:
2015 return aio_write(&req->rw, iocb, true, compat);
2016 case IOCB_CMD_FSYNC:
2017 return aio_fsync(&req->fsync, iocb, false);
2018 case IOCB_CMD_FDSYNC:
2019 return aio_fsync(&req->fsync, iocb, true);
2020 case IOCB_CMD_POLL:
2021 return aio_poll(req, iocb);
2022 default:
2023 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2024 return -EINVAL;
2025 }
2026}
2027
2028static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
2029 bool compat)
2030{
2031 struct aio_kiocb *req;
2032 struct iocb iocb;
2033 int err;
2034
2035 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2036 return -EFAULT;
2037
2038 /* enforce forwards compatibility on users */
2039 if (unlikely(iocb.aio_reserved2)) {
2040 pr_debug("EINVAL: reserve field set\n");
2041 return -EINVAL;
2042 }
2043
2044 /* prevent overflows */
2045 if (unlikely(
2046 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2047 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2048 ((ssize_t)iocb.aio_nbytes < 0)
2049 )) {
2050 pr_debug("EINVAL: overflow check\n");
2051 return -EINVAL;
2052 }
2053
2054 req = aio_get_req(ctx);
2055 if (unlikely(!req))
2056 return -EAGAIN;
2057
2058 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2059
2060 /* Done with the synchronous reference */
2061 iocb_put(req);
2062
2063 /*
2064 * If err is 0, we'd either done aio_complete() ourselves or have
2065 * arranged for that to be done asynchronously. Anything non-zero
2066 * means that we need to destroy req ourselves.
2067 */
2068 if (unlikely(err)) {
2069 iocb_destroy(req);
2070 put_reqs_available(ctx, 1);
2071 }
2072 return err;
2073}
2074
2075/* sys_io_submit:
2076 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
2077 * the number of iocbs queued. May return -EINVAL if the aio_context
2078 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2079 * *iocbpp[0] is not properly initialized, if the operation specified
2080 * is invalid for the file descriptor in the iocb. May fail with
2081 * -EFAULT if any of the data structures point to invalid data. May
2082 * fail with -EBADF if the file descriptor specified in the first
2083 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2084 * are available to queue any iocbs. Will return 0 if nr is 0. Will
2085 * fail with -ENOSYS if not implemented.
2086 */
2087SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2088 struct iocb __user * __user *, iocbpp)
2089{
2090 struct kioctx *ctx;
2091 long ret = 0;
2092 int i = 0;
2093 struct blk_plug plug;
2094
2095 if (unlikely(nr < 0))
2096 return -EINVAL;
2097
2098 ctx = lookup_ioctx(ctx_id);
2099 if (unlikely(!ctx)) {
2100 pr_debug("EINVAL: invalid context id\n");
2101 return -EINVAL;
2102 }
2103
2104 if (nr > ctx->nr_events)
2105 nr = ctx->nr_events;
2106
2107 if (nr > AIO_PLUG_THRESHOLD)
2108 blk_start_plug(&plug);
2109 for (i = 0; i < nr; i++) {
2110 struct iocb __user *user_iocb;
2111
2112 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2113 ret = -EFAULT;
2114 break;
2115 }
2116
2117 ret = io_submit_one(ctx, user_iocb, false);
2118 if (ret)
2119 break;
2120 }
2121 if (nr > AIO_PLUG_THRESHOLD)
2122 blk_finish_plug(&plug);
2123
2124 percpu_ref_put(&ctx->users);
2125 return i ? i : ret;
2126}
2127
2128#ifdef CONFIG_COMPAT
2129COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2130 int, nr, compat_uptr_t __user *, iocbpp)
2131{
2132 struct kioctx *ctx;
2133 long ret = 0;
2134 int i = 0;
2135 struct blk_plug plug;
2136
2137 if (unlikely(nr < 0))
2138 return -EINVAL;
2139
2140 ctx = lookup_ioctx(ctx_id);
2141 if (unlikely(!ctx)) {
2142 pr_debug("EINVAL: invalid context id\n");
2143 return -EINVAL;
2144 }
2145
2146 if (nr > ctx->nr_events)
2147 nr = ctx->nr_events;
2148
2149 if (nr > AIO_PLUG_THRESHOLD)
2150 blk_start_plug(&plug);
2151 for (i = 0; i < nr; i++) {
2152 compat_uptr_t user_iocb;
2153
2154 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2155 ret = -EFAULT;
2156 break;
2157 }
2158
2159 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2160 if (ret)
2161 break;
2162 }
2163 if (nr > AIO_PLUG_THRESHOLD)
2164 blk_finish_plug(&plug);
2165
2166 percpu_ref_put(&ctx->users);
2167 return i ? i : ret;
2168}
2169#endif
2170
2171/* sys_io_cancel:
2172 * Attempts to cancel an iocb previously passed to io_submit. If
2173 * the operation is successfully cancelled, the resulting event is
2174 * copied into the memory pointed to by result without being placed
2175 * into the completion queue and 0 is returned. May fail with
2176 * -EFAULT if any of the data structures pointed to are invalid.
2177 * May fail with -EINVAL if aio_context specified by ctx_id is
2178 * invalid. May fail with -EAGAIN if the iocb specified was not
2179 * cancelled. Will fail with -ENOSYS if not implemented.
2180 */
2181SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2182 struct io_event __user *, result)
2183{
2184 struct kioctx *ctx;
2185 struct aio_kiocb *kiocb;
2186 int ret = -EINVAL;
2187 u32 key;
2188 u64 obj = (u64)(unsigned long)iocb;
2189
2190 if (unlikely(get_user(key, &iocb->aio_key)))
2191 return -EFAULT;
2192 if (unlikely(key != KIOCB_KEY))
2193 return -EINVAL;
2194
2195 ctx = lookup_ioctx(ctx_id);
2196 if (unlikely(!ctx))
2197 return -EINVAL;
2198
2199 spin_lock_irq(&ctx->ctx_lock);
2200 /* TODO: use a hash or array, this sucks. */
2201 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2202 if (kiocb->ki_res.obj == obj) {
2203 ret = kiocb->ki_cancel(&kiocb->rw);
2204 list_del_init(&kiocb->ki_list);
2205 break;
2206 }
2207 }
2208 spin_unlock_irq(&ctx->ctx_lock);
2209
2210 if (!ret) {
2211 /*
2212 * The result argument is no longer used - the io_event is
2213 * always delivered via the ring buffer. -EINPROGRESS indicates
2214 * cancellation is progress:
2215 */
2216 ret = -EINPROGRESS;
2217 }
2218
2219 percpu_ref_put(&ctx->users);
2220
2221 return ret;
2222}
2223
2224static long do_io_getevents(aio_context_t ctx_id,
2225 long min_nr,
2226 long nr,
2227 struct io_event __user *events,
2228 struct timespec64 *ts)
2229{
2230 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2231 struct kioctx *ioctx = lookup_ioctx(ctx_id);
2232 long ret = -EINVAL;
2233
2234 if (likely(ioctx)) {
2235 if (likely(min_nr <= nr && min_nr >= 0))
2236 ret = read_events(ioctx, min_nr, nr, events, until);
2237 percpu_ref_put(&ioctx->users);
2238 }
2239
2240 return ret;
2241}
2242
2243/* io_getevents:
2244 * Attempts to read at least min_nr events and up to nr events from
2245 * the completion queue for the aio_context specified by ctx_id. If
2246 * it succeeds, the number of read events is returned. May fail with
2247 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2248 * out of range, if timeout is out of range. May fail with -EFAULT
2249 * if any of the memory specified is invalid. May return 0 or
2250 * < min_nr if the timeout specified by timeout has elapsed
2251 * before sufficient events are available, where timeout == NULL
2252 * specifies an infinite timeout. Note that the timeout pointed to by
2253 * timeout is relative. Will fail with -ENOSYS if not implemented.
2254 */
2255#ifdef CONFIG_64BIT
2256
2257SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2258 long, min_nr,
2259 long, nr,
2260 struct io_event __user *, events,
2261 struct __kernel_timespec __user *, timeout)
2262{
2263 struct timespec64 ts;
2264 int ret;
2265
2266 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2267 return -EFAULT;
2268
2269 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2270 if (!ret && signal_pending(current))
2271 ret = -EINTR;
2272 return ret;
2273}
2274
2275#endif
2276
2277struct __aio_sigset {
2278 const sigset_t __user *sigmask;
2279 size_t sigsetsize;
2280};
2281
2282SYSCALL_DEFINE6(io_pgetevents,
2283 aio_context_t, ctx_id,
2284 long, min_nr,
2285 long, nr,
2286 struct io_event __user *, events,
2287 struct __kernel_timespec __user *, timeout,
2288 const struct __aio_sigset __user *, usig)
2289{
2290 struct __aio_sigset ksig = { NULL, };
2291 struct timespec64 ts;
2292 bool interrupted;
2293 int ret;
2294
2295 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2296 return -EFAULT;
2297
2298 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2299 return -EFAULT;
2300
2301 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2302 if (ret)
2303 return ret;
2304
2305 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2306
2307 interrupted = signal_pending(current);
2308 restore_saved_sigmask_unless(interrupted);
2309 if (interrupted && !ret)
2310 ret = -ERESTARTNOHAND;
2311
2312 return ret;
2313}
2314
2315#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2316
2317SYSCALL_DEFINE6(io_pgetevents_time32,
2318 aio_context_t, ctx_id,
2319 long, min_nr,
2320 long, nr,
2321 struct io_event __user *, events,
2322 struct old_timespec32 __user *, timeout,
2323 const struct __aio_sigset __user *, usig)
2324{
2325 struct __aio_sigset ksig = { NULL, };
2326 struct timespec64 ts;
2327 bool interrupted;
2328 int ret;
2329
2330 if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2331 return -EFAULT;
2332
2333 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2334 return -EFAULT;
2335
2336
2337 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2338 if (ret)
2339 return ret;
2340
2341 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2342
2343 interrupted = signal_pending(current);
2344 restore_saved_sigmask_unless(interrupted);
2345 if (interrupted && !ret)
2346 ret = -ERESTARTNOHAND;
2347
2348 return ret;
2349}
2350
2351#endif
2352
2353#if defined(CONFIG_COMPAT_32BIT_TIME)
2354
2355SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2356 __s32, min_nr,
2357 __s32, nr,
2358 struct io_event __user *, events,
2359 struct old_timespec32 __user *, timeout)
2360{
2361 struct timespec64 t;
2362 int ret;
2363
2364 if (timeout && get_old_timespec32(&t, timeout))
2365 return -EFAULT;
2366
2367 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2368 if (!ret && signal_pending(current))
2369 ret = -EINTR;
2370 return ret;
2371}
2372
2373#endif
2374
2375#ifdef CONFIG_COMPAT
2376
2377struct __compat_aio_sigset {
2378 compat_uptr_t sigmask;
2379 compat_size_t sigsetsize;
2380};
2381
2382#if defined(CONFIG_COMPAT_32BIT_TIME)
2383
2384COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2385 compat_aio_context_t, ctx_id,
2386 compat_long_t, min_nr,
2387 compat_long_t, nr,
2388 struct io_event __user *, events,
2389 struct old_timespec32 __user *, timeout,
2390 const struct __compat_aio_sigset __user *, usig)
2391{
2392 struct __compat_aio_sigset ksig = { 0, };
2393 struct timespec64 t;
2394 bool interrupted;
2395 int ret;
2396
2397 if (timeout && get_old_timespec32(&t, timeout))
2398 return -EFAULT;
2399
2400 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2401 return -EFAULT;
2402
2403 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2404 if (ret)
2405 return ret;
2406
2407 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2408
2409 interrupted = signal_pending(current);
2410 restore_saved_sigmask_unless(interrupted);
2411 if (interrupted && !ret)
2412 ret = -ERESTARTNOHAND;
2413
2414 return ret;
2415}
2416
2417#endif
2418
2419COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2420 compat_aio_context_t, ctx_id,
2421 compat_long_t, min_nr,
2422 compat_long_t, nr,
2423 struct io_event __user *, events,
2424 struct __kernel_timespec __user *, timeout,
2425 const struct __compat_aio_sigset __user *, usig)
2426{
2427 struct __compat_aio_sigset ksig = { 0, };
2428 struct timespec64 t;
2429 bool interrupted;
2430 int ret;
2431
2432 if (timeout && get_timespec64(&t, timeout))
2433 return -EFAULT;
2434
2435 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2436 return -EFAULT;
2437
2438 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2439 if (ret)
2440 return ret;
2441
2442 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2443
2444 interrupted = signal_pending(current);
2445 restore_saved_sigmask_unless(interrupted);
2446 if (interrupted && !ret)
2447 ret = -ERESTARTNOHAND;
2448
2449 return ret;
2450}
2451#endif